2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
26 static inline float128
float128_snan_to_qnan(float128 x
)
30 r
.high
= x
.high
| 0x0000800000000000;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 static inline bool fp_exceptions_enabled(CPUPPCState
*env
)
41 #ifdef CONFIG_USER_ONLY
44 return (env
->msr
& ((1U << MSR_FE0
) | (1U << MSR_FE1
))) != 0;
48 /*****************************************************************************/
49 /* Floating point operations helpers */
52 * This is the non-arithmatic conversion that happens e.g. on loads.
53 * In the Power ISA pseudocode, this is called DOUBLE.
55 uint64_t helper_todouble(uint32_t arg
)
57 uint32_t abs_arg
= arg
& 0x7fffffff;
60 if (likely(abs_arg
>= 0x00800000)) {
61 /* Normalized operand, or Inf, or NaN. */
62 ret
= (uint64_t)extract32(arg
, 30, 2) << 62;
63 ret
|= ((extract32(arg
, 30, 1) ^ 1) * (uint64_t)7) << 59;
64 ret
|= (uint64_t)extract32(arg
, 0, 30) << 29;
66 /* Zero or Denormalized operand. */
67 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
68 if (unlikely(abs_arg
!= 0)) {
69 /* Denormalized operand. */
70 int shift
= clz32(abs_arg
) - 9;
71 int exp
= -126 - shift
+ 1023;
72 ret
|= (uint64_t)exp
<< 52;
73 ret
|= abs_arg
<< (shift
+ 29);
80 * This is the non-arithmatic conversion that happens e.g. on stores.
81 * In the Power ISA pseudocode, this is called SINGLE.
83 uint32_t helper_tosingle(uint64_t arg
)
85 int exp
= extract64(arg
, 52, 11);
88 if (likely(exp
> 896)) {
89 /* No denormalization required (includes Inf, NaN). */
90 ret
= extract64(arg
, 62, 2) << 30;
91 ret
|= extract64(arg
, 29, 30);
93 /* Zero or Denormal result. If the exponent is in bounds for
94 * a single-precision denormal result, extract the proper bits.
95 * If the input is not zero, and the exponent is out of bounds,
96 * then the result is undefined; this underflows to zero.
98 ret
= extract64(arg
, 63, 1) << 31;
99 if (unlikely(exp
>= 874)) {
100 /* Denormal result. */
101 ret
|= ((1ULL << 52) | extract64(arg
, 0, 52)) >> (896 + 30 - exp
);
107 static inline int ppc_float32_get_unbiased_exp(float32 f
)
109 return ((f
>> 23) & 0xFF) - 127;
112 static inline int ppc_float64_get_unbiased_exp(float64 f
)
114 return ((f
>> 52) & 0x7FF) - 1023;
117 #define COMPUTE_FPRF(tp) \
118 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
123 isneg = tp##_is_neg(arg); \
124 if (unlikely(tp##_is_any_nan(arg))) { \
125 if (tp##_is_signaling_nan(arg, &env->fp_status)) { \
126 /* Signaling NaN: flags are undefined */ \
132 } else if (unlikely(tp##_is_infinity(arg))) { \
140 if (tp##_is_zero(arg)) { \
148 if (tp##_is_zero_or_denormal(arg)) { \
149 /* Denormalized numbers */ \
152 /* Normalized numbers */ \
162 /* We update FPSCR_FPRF */ \
163 env->fpscr &= ~(0x1F << FPSCR_FPRF); \
164 env->fpscr |= fprf << FPSCR_FPRF; \
167 COMPUTE_FPRF(float16
)
168 COMPUTE_FPRF(float32
)
169 COMPUTE_FPRF(float64
)
170 COMPUTE_FPRF(float128
)
172 /* Floating-point invalid operations exception */
173 static inline __attribute__((__always_inline__
))
174 uint64_t float_invalid_op_excp(CPUPPCState
*env
, int op
, int set_fpcc
)
176 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
182 case POWERPC_EXCP_FP_VXSNAN
:
183 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
185 case POWERPC_EXCP_FP_VXSOFT
:
186 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
188 case POWERPC_EXCP_FP_VXISI
:
189 /* Magnitude subtraction of infinities */
190 env
->fpscr
|= 1 << FPSCR_VXISI
;
192 case POWERPC_EXCP_FP_VXIDI
:
193 /* Division of infinity by infinity */
194 env
->fpscr
|= 1 << FPSCR_VXIDI
;
196 case POWERPC_EXCP_FP_VXZDZ
:
197 /* Division of zero by zero */
198 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
200 case POWERPC_EXCP_FP_VXIMZ
:
201 /* Multiplication of zero by infinity */
202 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
204 case POWERPC_EXCP_FP_VXVC
:
205 /* Ordered comparison of NaN */
206 env
->fpscr
|= 1 << FPSCR_VXVC
;
208 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
209 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
211 /* We must update the target FPR before raising the exception */
213 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
214 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
215 /* Update the floating-point enabled exception summary */
216 env
->fpscr
|= 1 << FPSCR_FEX
;
217 /* Exception is differed */
221 case POWERPC_EXCP_FP_VXSQRT
:
222 /* Square root of a negative number */
223 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
225 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
227 /* Set the result to quiet NaN */
228 ret
= 0x7FF8000000000000ULL
;
230 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
231 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
235 case POWERPC_EXCP_FP_VXCVI
:
236 /* Invalid conversion */
237 env
->fpscr
|= 1 << FPSCR_VXCVI
;
238 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
240 /* Set the result to quiet NaN */
241 ret
= 0x7FF8000000000000ULL
;
243 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
244 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
249 /* Update the floating-point invalid operation summary */
250 env
->fpscr
|= 1 << FPSCR_VX
;
251 /* Update the floating-point exception summary */
254 /* Update the floating-point enabled exception summary */
255 env
->fpscr
|= 1 << FPSCR_FEX
;
256 if (fp_exceptions_enabled(env
)) {
257 /* GETPC() works here because this is inline */
258 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
259 POWERPC_EXCP_FP
| op
, GETPC());
265 static inline void float_zero_divide_excp(CPUPPCState
*env
, uintptr_t raddr
)
267 env
->fpscr
|= 1 << FPSCR_ZX
;
268 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
269 /* Update the floating-point exception summary */
272 /* Update the floating-point enabled exception summary */
273 env
->fpscr
|= 1 << FPSCR_FEX
;
274 if (fp_exceptions_enabled(env
)) {
275 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
276 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
,
282 static inline void float_overflow_excp(CPUPPCState
*env
)
284 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
286 env
->fpscr
|= 1 << FPSCR_OX
;
287 /* Update the floating-point exception summary */
290 /* XXX: should adjust the result */
291 /* Update the floating-point enabled exception summary */
292 env
->fpscr
|= 1 << FPSCR_FEX
;
293 /* We must update the target FPR before raising the exception */
294 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
295 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
297 env
->fpscr
|= 1 << FPSCR_XX
;
298 env
->fpscr
|= 1 << FPSCR_FI
;
302 static inline void float_underflow_excp(CPUPPCState
*env
)
304 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
306 env
->fpscr
|= 1 << FPSCR_UX
;
307 /* Update the floating-point exception summary */
310 /* XXX: should adjust the result */
311 /* Update the floating-point enabled exception summary */
312 env
->fpscr
|= 1 << FPSCR_FEX
;
313 /* We must update the target FPR before raising the exception */
314 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
315 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
319 static inline void float_inexact_excp(CPUPPCState
*env
)
321 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
323 env
->fpscr
|= 1 << FPSCR_FI
;
324 env
->fpscr
|= 1 << FPSCR_XX
;
325 /* Update the floating-point exception summary */
328 /* Update the floating-point enabled exception summary */
329 env
->fpscr
|= 1 << FPSCR_FEX
;
330 /* We must update the target FPR before raising the exception */
331 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
332 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
336 static inline void fpscr_set_rounding_mode(CPUPPCState
*env
)
340 /* Set rounding mode */
343 /* Best approximation (round to nearest) */
344 rnd_type
= float_round_nearest_even
;
347 /* Smaller magnitude (round toward zero) */
348 rnd_type
= float_round_to_zero
;
351 /* Round toward +infinite */
352 rnd_type
= float_round_up
;
356 /* Round toward -infinite */
357 rnd_type
= float_round_down
;
360 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
363 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
367 prev
= (env
->fpscr
>> bit
) & 1;
368 env
->fpscr
&= ~(1 << bit
);
373 fpscr_set_rounding_mode(env
);
385 /* Set VX bit to zero */
386 env
->fpscr
&= ~(1 << FPSCR_VX
);
399 /* Set the FEX bit */
400 env
->fpscr
&= ~(1 << FPSCR_FEX
);
409 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
411 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
414 prev
= (env
->fpscr
>> bit
) & 1;
415 env
->fpscr
|= 1 << bit
;
457 env
->fpscr
|= 1 << FPSCR_VX
;
466 env
->error_code
= POWERPC_EXCP_FP
;
468 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
471 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
474 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
477 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
480 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
483 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
486 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
489 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
492 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
500 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
507 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
514 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
521 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
527 fpscr_set_rounding_mode(env
);
532 /* Update the floating-point enabled exception summary */
533 env
->fpscr
|= 1 << FPSCR_FEX
;
534 /* We have to update Rc1 before raising the exception */
535 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
541 void helper_store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
543 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
544 target_ulong prev
, new;
548 new = (target_ulong
)arg
;
549 new &= ~0x60000000LL
;
550 new |= prev
& 0x60000000LL
;
551 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
552 if (mask
& (1 << i
)) {
553 env
->fpscr
&= ~(0xFLL
<< (4 * i
));
554 env
->fpscr
|= new & (0xFLL
<< (4 * i
));
557 /* Update VX and FEX */
559 env
->fpscr
|= 1 << FPSCR_VX
;
561 env
->fpscr
&= ~(1 << FPSCR_VX
);
563 if ((fpscr_ex
& fpscr_eex
) != 0) {
564 env
->fpscr
|= 1 << FPSCR_FEX
;
565 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
566 /* XXX: we should compute it properly */
567 env
->error_code
= POWERPC_EXCP_FP
;
569 env
->fpscr
&= ~(1 << FPSCR_FEX
);
571 fpscr_set_rounding_mode(env
);
574 void store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
576 helper_store_fpscr(env
, arg
, mask
);
579 static void do_float_check_status(CPUPPCState
*env
, uintptr_t raddr
)
581 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
582 int status
= get_float_exception_flags(&env
->fp_status
);
583 bool inexact_happened
= false;
585 if (status
& float_flag_overflow
) {
586 float_overflow_excp(env
);
587 } else if (status
& float_flag_underflow
) {
588 float_underflow_excp(env
);
589 } else if (status
& float_flag_inexact
) {
590 float_inexact_excp(env
);
591 inexact_happened
= true;
594 /* if the inexact flag was not set */
595 if (inexact_happened
== false) {
596 env
->fpscr
&= ~(1 << FPSCR_FI
); /* clear the FPSCR[FI] bit */
599 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
600 (env
->error_code
& POWERPC_EXCP_FP
)) {
601 /* Differred floating-point exception after target FPR update */
602 if (fp_exceptions_enabled(env
)) {
603 raise_exception_err_ra(env
, cs
->exception_index
,
604 env
->error_code
, raddr
);
609 static inline __attribute__((__always_inline__
))
610 void float_check_status(CPUPPCState
*env
)
612 /* GETPC() works here because this is inline */
613 do_float_check_status(env
, GETPC());
616 void helper_float_check_status(CPUPPCState
*env
)
618 do_float_check_status(env
, GETPC());
621 void helper_reset_fpstatus(CPUPPCState
*env
)
623 set_float_exception_flags(0, &env
->fp_status
);
627 float64
helper_fadd(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
629 float64 ret
= float64_add(arg1
, arg2
, &env
->fp_status
);
630 int status
= get_float_exception_flags(&env
->fp_status
);
632 if (unlikely(status
& float_flag_invalid
)) {
633 if (float64_is_infinity(arg1
) && float64_is_infinity(arg2
)) {
634 /* Magnitude subtraction of infinities */
635 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
636 } else if (float64_is_signaling_nan(arg1
, &env
->fp_status
) ||
637 float64_is_signaling_nan(arg2
, &env
->fp_status
)) {
639 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
647 float64
helper_fsub(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
649 float64 ret
= float64_sub(arg1
, arg2
, &env
->fp_status
);
650 int status
= get_float_exception_flags(&env
->fp_status
);
652 if (unlikely(status
& float_flag_invalid
)) {
653 if (float64_is_infinity(arg1
) && float64_is_infinity(arg2
)) {
654 /* Magnitude subtraction of infinities */
655 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
656 } else if (float64_is_signaling_nan(arg1
, &env
->fp_status
) ||
657 float64_is_signaling_nan(arg2
, &env
->fp_status
)) {
659 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
667 float64
helper_fmul(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
669 float64 ret
= float64_mul(arg1
, arg2
, &env
->fp_status
);
670 int status
= get_float_exception_flags(&env
->fp_status
);
672 if (unlikely(status
& float_flag_invalid
)) {
673 if ((float64_is_infinity(arg1
) && float64_is_zero(arg2
)) ||
674 (float64_is_zero(arg1
) && float64_is_infinity(arg2
))) {
675 /* Multiplication of zero by infinity */
676 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
677 } else if (float64_is_signaling_nan(arg1
, &env
->fp_status
) ||
678 float64_is_signaling_nan(arg2
, &env
->fp_status
)) {
679 /* sNaN multiplication */
680 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
688 float64
helper_fdiv(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
690 float64 ret
= float64_div(arg1
, arg2
, &env
->fp_status
);
691 int status
= get_float_exception_flags(&env
->fp_status
);
693 if (unlikely(status
)) {
694 if (status
& float_flag_invalid
) {
695 /* Determine what kind of invalid operation was seen. */
696 if (float64_is_infinity(arg1
) && float64_is_infinity(arg2
)) {
697 /* Division of infinity by infinity */
698 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIDI
, 1);
699 } else if (float64_is_zero(arg1
) && float64_is_zero(arg2
)) {
700 /* Division of zero by zero */
701 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXZDZ
, 1);
702 } else if (float64_is_signaling_nan(arg1
, &env
->fp_status
) ||
703 float64_is_signaling_nan(arg2
, &env
->fp_status
)) {
705 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
708 if (status
& float_flag_divbyzero
) {
709 float_zero_divide_excp(env
, GETPC());
717 #define FPU_FCTI(op, cvt, nanval) \
718 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
723 farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \
725 if (unlikely(env->fp_status.float_exception_flags)) { \
726 if (float64_is_any_nan(arg)) { \
727 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
728 if (float64_is_signaling_nan(arg, &env->fp_status)) { \
729 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
732 } else if (env->fp_status.float_exception_flags & \
733 float_flag_invalid) { \
734 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
736 float_check_status(env); \
741 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
742 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
743 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
744 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
745 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
746 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
747 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
748 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
750 #define FPU_FCFI(op, cvtr, is_single) \
751 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
756 float32 tmp = cvtr(arg, &env->fp_status); \
757 farg.d = float32_to_float64(tmp, &env->fp_status); \
759 farg.d = cvtr(arg, &env->fp_status); \
761 float_check_status(env); \
765 FPU_FCFI(fcfid
, int64_to_float64
, 0)
766 FPU_FCFI(fcfids
, int64_to_float32
, 1)
767 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
768 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
770 static inline uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
777 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
779 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
780 farg
.ll
= arg
| 0x0008000000000000ULL
;
782 int inexact
= get_float_exception_flags(&env
->fp_status
) &
784 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
785 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
786 /* Restore rounding mode from FPSCR */
787 fpscr_set_rounding_mode(env
);
789 /* fri* does not set FPSCR[XX] */
791 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
794 float_check_status(env
);
798 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
800 return do_fri(env
, arg
, float_round_ties_away
);
803 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
805 return do_fri(env
, arg
, float_round_to_zero
);
808 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
810 return do_fri(env
, arg
, float_round_up
);
813 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
815 return do_fri(env
, arg
, float_round_down
);
818 #define FPU_MADDSUB_UPDATE(NAME, TP) \
819 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
820 unsigned int madd_flags) \
822 if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
823 TP##_is_signaling_nan(arg2, &env->fp_status) || \
824 TP##_is_signaling_nan(arg3, &env->fp_status)) { \
825 /* sNaN operation */ \
826 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
828 if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
829 (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
830 /* Multiplication of zero by infinity */ \
831 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); \
833 if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
834 TP##_is_infinity(arg3)) { \
835 uint8_t aSign, bSign, cSign; \
837 aSign = TP##_is_neg(arg1); \
838 bSign = TP##_is_neg(arg2); \
839 cSign = TP##_is_neg(arg3); \
840 if (madd_flags & float_muladd_negate_c) { \
843 if (aSign ^ bSign ^ cSign) { \
844 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); \
848 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp
, float32
)
849 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp
, float64
)
851 #define FPU_FMADD(op, madd_flags) \
852 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
853 uint64_t arg2, uint64_t arg3) \
856 float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
858 flags = get_float_exception_flags(&env->fp_status); \
860 if (flags & float_flag_invalid) { \
861 float64_maddsub_update_excp(env, arg1, arg2, arg3, \
864 float_check_status(env); \
870 #define MSUB_FLGS float_muladd_negate_c
871 #define NMADD_FLGS float_muladd_negate_result
872 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
874 FPU_FMADD(fmadd
, MADD_FLGS
)
875 FPU_FMADD(fnmadd
, NMADD_FLGS
)
876 FPU_FMADD(fmsub
, MSUB_FLGS
)
877 FPU_FMADD(fnmsub
, NMSUB_FLGS
)
880 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
887 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
888 /* sNaN square root */
889 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
891 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
892 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
898 float64
helper_fsqrt(CPUPPCState
*env
, float64 arg
)
900 float64 ret
= float64_sqrt(arg
, &env
->fp_status
);
901 int status
= get_float_exception_flags(&env
->fp_status
);
903 if (unlikely(status
& float_flag_invalid
)) {
904 if (unlikely(float64_is_any_nan(arg
))) {
905 if (unlikely(float64_is_signaling_nan(arg
, &env
->fp_status
))) {
906 /* sNaN square root */
907 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
910 /* Square root of a negative nonzero number */
911 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
919 float64
helper_fre(CPUPPCState
*env
, float64 arg
)
921 /* "Estimate" the reciprocal with actual division. */
922 float64 ret
= float64_div(float64_one
, arg
, &env
->fp_status
);
923 int status
= get_float_exception_flags(&env
->fp_status
);
925 if (unlikely(status
)) {
926 if (status
& float_flag_invalid
) {
927 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
928 /* sNaN reciprocal */
929 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
932 if (status
& float_flag_divbyzero
) {
933 float_zero_divide_excp(env
, GETPC());
934 /* For FPSCR.ZE == 0, the result is 1/2. */
935 ret
= float64_set_sign(float64_half
, float64_is_neg(arg
));
943 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
950 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
951 /* sNaN reciprocal */
952 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
954 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
955 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
956 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
961 /* frsqrte - frsqrte. */
962 float64
helper_frsqrte(CPUPPCState
*env
, float64 arg
)
964 /* "Estimate" the reciprocal with actual division. */
965 float64 rets
= float64_sqrt(arg
, &env
->fp_status
);
966 float64 retd
= float64_div(float64_one
, rets
, &env
->fp_status
);
967 int status
= get_float_exception_flags(&env
->fp_status
);
969 if (unlikely(status
)) {
970 if (status
& float_flag_invalid
) {
971 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
972 /* sNaN reciprocal */
973 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
975 /* Square root of a negative nonzero number */
976 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
979 if (status
& float_flag_divbyzero
) {
980 /* Reciprocal of (square root of) zero. */
981 float_zero_divide_excp(env
, GETPC());
989 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
996 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
997 !float64_is_any_nan(farg1
.d
)) {
1004 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
1009 if (unlikely(float64_is_infinity(fra
) ||
1010 float64_is_infinity(frb
) ||
1011 float64_is_zero(frb
))) {
1015 int e_a
= ppc_float64_get_unbiased_exp(fra
);
1016 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1018 if (unlikely(float64_is_any_nan(fra
) ||
1019 float64_is_any_nan(frb
))) {
1021 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
1023 } else if (!float64_is_zero(fra
) &&
1024 (((e_a
- e_b
) >= 1023) ||
1025 ((e_a
- e_b
) <= -1021) ||
1030 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1031 /* XB is not zero because of the above check and */
1032 /* so must be denormalized. */
1037 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1040 uint32_t helper_ftsqrt(uint64_t frb
)
1045 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
1049 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1051 if (unlikely(float64_is_any_nan(frb
))) {
1053 } else if (unlikely(float64_is_zero(frb
))) {
1055 } else if (unlikely(float64_is_neg(frb
))) {
1057 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022+52))) {
1061 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1062 /* XB is not zero because of the above check and */
1063 /* therefore must be denormalized. */
1068 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1071 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1074 CPU_DoubleU farg1
, farg2
;
1080 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1081 float64_is_any_nan(farg2
.d
))) {
1083 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1085 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1091 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1092 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1093 env
->crf
[crfD
] = ret
;
1094 if (unlikely(ret
== 0x01UL
1095 && (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1096 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)))) {
1097 /* sNaN comparison */
1098 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1102 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1105 CPU_DoubleU farg1
, farg2
;
1111 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1112 float64_is_any_nan(farg2
.d
))) {
1114 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1116 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1122 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1123 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1124 env
->crf
[crfD
] = ret
;
1125 if (unlikely(ret
== 0x01UL
)) {
1126 if (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1127 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)) {
1128 /* sNaN comparison */
1129 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
1130 POWERPC_EXCP_FP_VXVC
, 1);
1132 /* qNaN comparison */
1133 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXVC
, 1);
1138 /* Single-precision floating-point conversions */
1139 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1143 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1148 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1152 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1157 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1162 /* NaN are not treated the same way IEEE 754 does */
1163 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1167 return float32_to_int32(u
.f
, &env
->vec_status
);
1170 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1175 /* NaN are not treated the same way IEEE 754 does */
1176 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1180 return float32_to_uint32(u
.f
, &env
->vec_status
);
1183 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1188 /* NaN are not treated the same way IEEE 754 does */
1189 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1193 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1196 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1201 /* NaN are not treated the same way IEEE 754 does */
1202 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1206 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1209 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1214 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1215 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1216 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1221 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1226 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1227 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1228 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1233 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1239 /* NaN are not treated the same way IEEE 754 does */
1240 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1243 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1244 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1246 return float32_to_int32(u
.f
, &env
->vec_status
);
1249 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1255 /* NaN are not treated the same way IEEE 754 does */
1256 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1259 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1260 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1262 return float32_to_uint32(u
.f
, &env
->vec_status
);
1265 #define HELPER_SPE_SINGLE_CONV(name) \
1266 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1268 return e##name(env, val); \
1271 HELPER_SPE_SINGLE_CONV(fscfsi
);
1273 HELPER_SPE_SINGLE_CONV(fscfui
);
1275 HELPER_SPE_SINGLE_CONV(fscfuf
);
1277 HELPER_SPE_SINGLE_CONV(fscfsf
);
1279 HELPER_SPE_SINGLE_CONV(fsctsi
);
1281 HELPER_SPE_SINGLE_CONV(fsctui
);
1283 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1285 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1287 HELPER_SPE_SINGLE_CONV(fsctsf
);
1289 HELPER_SPE_SINGLE_CONV(fsctuf
);
1291 #define HELPER_SPE_VECTOR_CONV(name) \
1292 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1294 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1295 (uint64_t)e##name(env, val); \
1298 HELPER_SPE_VECTOR_CONV(fscfsi
);
1300 HELPER_SPE_VECTOR_CONV(fscfui
);
1302 HELPER_SPE_VECTOR_CONV(fscfuf
);
1304 HELPER_SPE_VECTOR_CONV(fscfsf
);
1306 HELPER_SPE_VECTOR_CONV(fsctsi
);
1308 HELPER_SPE_VECTOR_CONV(fsctui
);
1310 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1312 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1314 HELPER_SPE_VECTOR_CONV(fsctsf
);
1316 HELPER_SPE_VECTOR_CONV(fsctuf
);
1318 /* Single-precision floating-point arithmetic */
1319 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1325 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1329 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1335 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1339 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1345 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1349 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1355 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1359 #define HELPER_SPE_SINGLE_ARITH(name) \
1360 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1362 return e##name(env, op1, op2); \
1365 HELPER_SPE_SINGLE_ARITH(fsadd
);
1367 HELPER_SPE_SINGLE_ARITH(fssub
);
1369 HELPER_SPE_SINGLE_ARITH(fsmul
);
1371 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1373 #define HELPER_SPE_VECTOR_ARITH(name) \
1374 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1376 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1377 (uint64_t)e##name(env, op1, op2); \
1380 HELPER_SPE_VECTOR_ARITH(fsadd
);
1382 HELPER_SPE_VECTOR_ARITH(fssub
);
1384 HELPER_SPE_VECTOR_ARITH(fsmul
);
1386 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1388 /* Single-precision floating-point comparisons */
1389 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1395 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1398 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1404 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1407 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1413 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1416 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1418 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1419 return efscmplt(env
, op1
, op2
);
1422 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1424 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1425 return efscmpgt(env
, op1
, op2
);
1428 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1430 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1431 return efscmpeq(env
, op1
, op2
);
1434 #define HELPER_SINGLE_SPE_CMP(name) \
1435 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1437 return e##name(env, op1, op2); \
1440 HELPER_SINGLE_SPE_CMP(fststlt
);
1442 HELPER_SINGLE_SPE_CMP(fststgt
);
1444 HELPER_SINGLE_SPE_CMP(fststeq
);
1446 HELPER_SINGLE_SPE_CMP(fscmplt
);
1448 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1450 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1452 static inline uint32_t evcmp_merge(int t0
, int t1
)
1454 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1457 #define HELPER_VECTOR_SPE_CMP(name) \
1458 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1460 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1461 e##name(env, op1, op2)); \
1464 HELPER_VECTOR_SPE_CMP(fststlt
);
1466 HELPER_VECTOR_SPE_CMP(fststgt
);
1468 HELPER_VECTOR_SPE_CMP(fststeq
);
1470 HELPER_VECTOR_SPE_CMP(fscmplt
);
1472 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1474 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1476 /* Double-precision floating-point conversion */
1477 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1481 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1486 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1490 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1495 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1499 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1504 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1508 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1513 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1518 /* NaN are not treated the same way IEEE 754 does */
1519 if (unlikely(float64_is_any_nan(u
.d
))) {
1523 return float64_to_int32(u
.d
, &env
->vec_status
);
1526 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1531 /* NaN are not treated the same way IEEE 754 does */
1532 if (unlikely(float64_is_any_nan(u
.d
))) {
1536 return float64_to_uint32(u
.d
, &env
->vec_status
);
1539 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1544 /* NaN are not treated the same way IEEE 754 does */
1545 if (unlikely(float64_is_any_nan(u
.d
))) {
1549 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1552 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1557 /* NaN are not treated the same way IEEE 754 does */
1558 if (unlikely(float64_is_any_nan(u
.d
))) {
1562 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1565 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1570 /* NaN are not treated the same way IEEE 754 does */
1571 if (unlikely(float64_is_any_nan(u
.d
))) {
1575 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1578 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1583 /* NaN are not treated the same way IEEE 754 does */
1584 if (unlikely(float64_is_any_nan(u
.d
))) {
1588 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1591 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1596 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1597 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1598 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1603 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1608 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1609 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1610 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1615 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1621 /* NaN are not treated the same way IEEE 754 does */
1622 if (unlikely(float64_is_any_nan(u
.d
))) {
1625 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1626 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1628 return float64_to_int32(u
.d
, &env
->vec_status
);
1631 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1637 /* NaN are not treated the same way IEEE 754 does */
1638 if (unlikely(float64_is_any_nan(u
.d
))) {
1641 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1642 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1644 return float64_to_uint32(u
.d
, &env
->vec_status
);
1647 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1653 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1658 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1664 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1669 /* Double precision fixed-point arithmetic */
1670 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1676 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1680 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1686 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1690 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1696 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1700 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1706 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1710 /* Double precision floating point helpers */
1711 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1717 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1720 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1726 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1729 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1735 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1738 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1740 /* XXX: TODO: test special values (NaN, infinites, ...) */
1741 return helper_efdtstlt(env
, op1
, op2
);
1744 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1746 /* XXX: TODO: test special values (NaN, infinites, ...) */
1747 return helper_efdtstgt(env
, op1
, op2
);
1750 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1752 /* XXX: TODO: test special values (NaN, infinites, ...) */
1753 return helper_efdtsteq(env
, op1
, op2
);
1756 #define float64_to_float64(x, env) x
1759 /* VSX_ADD_SUB - VSX floating point add/subract
1760 * name - instruction mnemonic
1761 * op - operation (add or sub)
1762 * nels - number of elements (1, 2 or 4)
1763 * tp - type (float32 or float64)
1764 * fld - vsr_t field (VsrD(*) or VsrW(*))
1767 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1768 void helper_##name(CPUPPCState *env, uint32_t opcode) \
1770 ppc_vsr_t xt, xa, xb; \
1773 getVSR(xA(opcode), &xa, env); \
1774 getVSR(xB(opcode), &xb, env); \
1775 getVSR(xT(opcode), &xt, env); \
1776 helper_reset_fpstatus(env); \
1778 for (i = 0; i < nels; i++) { \
1779 float_status tstat = env->fp_status; \
1780 set_float_exception_flags(0, &tstat); \
1781 xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
1782 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1784 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1785 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1786 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
1787 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1788 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1789 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1794 xt.fld = helper_frsp(env, xt.fld); \
1798 helper_compute_fprf_float64(env, xt.fld); \
1801 putVSR(xT(opcode), &xt, env); \
1802 float_check_status(env); \
1805 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1806 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1807 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1808 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1809 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1810 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1811 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1812 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1814 void helper_xsaddqp(CPUPPCState
*env
, uint32_t opcode
)
1816 ppc_vsr_t xt
, xa
, xb
;
1819 getVSR(rA(opcode
) + 32, &xa
, env
);
1820 getVSR(rB(opcode
) + 32, &xb
, env
);
1821 getVSR(rD(opcode
) + 32, &xt
, env
);
1822 helper_reset_fpstatus(env
);
1824 tstat
= env
->fp_status
;
1825 if (unlikely(Rc(opcode
) != 0)) {
1826 tstat
.float_rounding_mode
= float_round_to_odd
;
1829 set_float_exception_flags(0, &tstat
);
1830 xt
.f128
= float128_add(xa
.f128
, xb
.f128
, &tstat
);
1831 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1833 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1834 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
1835 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
1836 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1837 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1838 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1842 helper_compute_fprf_float128(env
, xt
.f128
);
1844 putVSR(rD(opcode
) + 32, &xt
, env
);
1845 float_check_status(env
);
1848 /* VSX_MUL - VSX floating point multiply
1849 * op - instruction mnemonic
1850 * nels - number of elements (1, 2 or 4)
1851 * tp - type (float32 or float64)
1852 * fld - vsr_t field (VsrD(*) or VsrW(*))
1855 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1856 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1858 ppc_vsr_t xt, xa, xb; \
1861 getVSR(xA(opcode), &xa, env); \
1862 getVSR(xB(opcode), &xb, env); \
1863 getVSR(xT(opcode), &xt, env); \
1864 helper_reset_fpstatus(env); \
1866 for (i = 0; i < nels; i++) { \
1867 float_status tstat = env->fp_status; \
1868 set_float_exception_flags(0, &tstat); \
1869 xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
1870 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1872 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1873 if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
1874 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
1875 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
1876 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1877 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1878 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1883 xt.fld = helper_frsp(env, xt.fld); \
1887 helper_compute_fprf_float64(env, xt.fld); \
1891 putVSR(xT(opcode), &xt, env); \
1892 float_check_status(env); \
1895 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1896 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1897 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1898 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1900 void helper_xsmulqp(CPUPPCState
*env
, uint32_t opcode
)
1902 ppc_vsr_t xt
, xa
, xb
;
1905 getVSR(rA(opcode
) + 32, &xa
, env
);
1906 getVSR(rB(opcode
) + 32, &xb
, env
);
1907 getVSR(rD(opcode
) + 32, &xt
, env
);
1909 helper_reset_fpstatus(env
);
1910 tstat
= env
->fp_status
;
1911 if (unlikely(Rc(opcode
) != 0)) {
1912 tstat
.float_rounding_mode
= float_round_to_odd
;
1915 set_float_exception_flags(0, &tstat
);
1916 xt
.f128
= float128_mul(xa
.f128
, xb
.f128
, &tstat
);
1917 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1919 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1920 if ((float128_is_infinity(xa
.f128
) && float128_is_zero(xb
.f128
)) ||
1921 (float128_is_infinity(xb
.f128
) && float128_is_zero(xa
.f128
))) {
1922 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
, 1);
1923 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
1924 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
1925 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
1928 helper_compute_fprf_float128(env
, xt
.f128
);
1930 putVSR(rD(opcode
) + 32, &xt
, env
);
1931 float_check_status(env
);
1934 /* VSX_DIV - VSX floating point divide
1935 * op - instruction mnemonic
1936 * nels - number of elements (1, 2 or 4)
1937 * tp - type (float32 or float64)
1938 * fld - vsr_t field (VsrD(*) or VsrW(*))
1941 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1942 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1944 ppc_vsr_t xt, xa, xb; \
1947 getVSR(xA(opcode), &xa, env); \
1948 getVSR(xB(opcode), &xb, env); \
1949 getVSR(xT(opcode), &xt, env); \
1950 helper_reset_fpstatus(env); \
1952 for (i = 0; i < nels; i++) { \
1953 float_status tstat = env->fp_status; \
1954 set_float_exception_flags(0, &tstat); \
1955 xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
1956 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1958 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1959 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1960 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
1961 } else if (tp##_is_zero(xa.fld) && \
1962 tp##_is_zero(xb.fld)) { \
1963 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
1964 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1965 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1966 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1969 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1970 float_zero_divide_excp(env, GETPC()); \
1974 xt.fld = helper_frsp(env, xt.fld); \
1978 helper_compute_fprf_float64(env, xt.fld); \
1982 putVSR(xT(opcode), &xt, env); \
1983 float_check_status(env); \
1986 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
1987 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
1988 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
1989 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
1991 void helper_xsdivqp(CPUPPCState
*env
, uint32_t opcode
)
1993 ppc_vsr_t xt
, xa
, xb
;
1996 getVSR(rA(opcode
) + 32, &xa
, env
);
1997 getVSR(rB(opcode
) + 32, &xb
, env
);
1998 getVSR(rD(opcode
) + 32, &xt
, env
);
2000 helper_reset_fpstatus(env
);
2001 tstat
= env
->fp_status
;
2002 if (unlikely(Rc(opcode
) != 0)) {
2003 tstat
.float_rounding_mode
= float_round_to_odd
;
2006 set_float_exception_flags(0, &tstat
);
2007 xt
.f128
= float128_div(xa
.f128
, xb
.f128
, &tstat
);
2008 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2010 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
2011 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
2012 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIDI
, 1);
2013 } else if (float128_is_zero(xa
.f128
) &&
2014 float128_is_zero(xb
.f128
)) {
2015 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXZDZ
, 1);
2016 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
2017 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
2018 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
2021 if (unlikely(tstat
.float_exception_flags
& float_flag_divbyzero
)) {
2022 float_zero_divide_excp(env
, GETPC());
2025 helper_compute_fprf_float128(env
, xt
.f128
);
2026 putVSR(rD(opcode
) + 32, &xt
, env
);
2027 float_check_status(env
);
2030 /* VSX_RE - VSX floating point reciprocal estimate
2031 * op - instruction mnemonic
2032 * nels - number of elements (1, 2 or 4)
2033 * tp - type (float32 or float64)
2034 * fld - vsr_t field (VsrD(*) or VsrW(*))
2037 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
2038 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2043 getVSR(xB(opcode), &xb, env); \
2044 getVSR(xT(opcode), &xt, env); \
2045 helper_reset_fpstatus(env); \
2047 for (i = 0; i < nels; i++) { \
2048 if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2049 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2051 xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
2054 xt.fld = helper_frsp(env, xt.fld); \
2058 helper_compute_fprf_float64(env, xt.fld); \
2062 putVSR(xT(opcode), &xt, env); \
2063 float_check_status(env); \
2066 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
2067 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
2068 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
2069 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
2071 /* VSX_SQRT - VSX floating point square root
2072 * op - instruction mnemonic
2073 * nels - number of elements (1, 2 or 4)
2074 * tp - type (float32 or float64)
2075 * fld - vsr_t field (VsrD(*) or VsrW(*))
2078 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
2079 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2084 getVSR(xB(opcode), &xb, env); \
2085 getVSR(xT(opcode), &xt, env); \
2086 helper_reset_fpstatus(env); \
2088 for (i = 0; i < nels; i++) { \
2089 float_status tstat = env->fp_status; \
2090 set_float_exception_flags(0, &tstat); \
2091 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2092 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2094 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2095 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2096 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2097 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2098 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2103 xt.fld = helper_frsp(env, xt.fld); \
2107 helper_compute_fprf_float64(env, xt.fld); \
2111 putVSR(xT(opcode), &xt, env); \
2112 float_check_status(env); \
2115 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
2116 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
2117 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
2118 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
2120 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2121 * op - instruction mnemonic
2122 * nels - number of elements (1, 2 or 4)
2123 * tp - type (float32 or float64)
2124 * fld - vsr_t field (VsrD(*) or VsrW(*))
2127 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2128 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2133 getVSR(xB(opcode), &xb, env); \
2134 getVSR(xT(opcode), &xt, env); \
2135 helper_reset_fpstatus(env); \
2137 for (i = 0; i < nels; i++) { \
2138 float_status tstat = env->fp_status; \
2139 set_float_exception_flags(0, &tstat); \
2140 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2141 xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
2142 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2144 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2145 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2146 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2147 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2148 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2153 xt.fld = helper_frsp(env, xt.fld); \
2157 helper_compute_fprf_float64(env, xt.fld); \
2161 putVSR(xT(opcode), &xt, env); \
2162 float_check_status(env); \
2165 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
2166 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
2167 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
2168 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
2170 /* VSX_TDIV - VSX floating point test for divide
2171 * op - instruction mnemonic
2172 * nels - number of elements (1, 2 or 4)
2173 * tp - type (float32 or float64)
2174 * fld - vsr_t field (VsrD(*) or VsrW(*))
2175 * emin - minimum unbiased exponent
2176 * emax - maximum unbiased exponent
2177 * nbits - number of fraction bits
2179 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2180 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2187 getVSR(xA(opcode), &xa, env); \
2188 getVSR(xB(opcode), &xb, env); \
2190 for (i = 0; i < nels; i++) { \
2191 if (unlikely(tp##_is_infinity(xa.fld) || \
2192 tp##_is_infinity(xb.fld) || \
2193 tp##_is_zero(xb.fld))) { \
2197 int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
2198 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2200 if (unlikely(tp##_is_any_nan(xa.fld) || \
2201 tp##_is_any_nan(xb.fld))) { \
2203 } else if ((e_b <= emin) || (e_b >= (emax-2))) { \
2205 } else if (!tp##_is_zero(xa.fld) && \
2206 (((e_a - e_b) >= emax) || \
2207 ((e_a - e_b) <= (emin+1)) || \
2208 (e_a <= (emin+nbits)))) { \
2212 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2213 /* XB is not zero because of the above check and */ \
2214 /* so must be denormalized. */ \
2220 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2223 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2224 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2225 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2227 /* VSX_TSQRT - VSX floating point test for square root
2228 * op - instruction mnemonic
2229 * nels - number of elements (1, 2 or 4)
2230 * tp - type (float32 or float64)
2231 * fld - vsr_t field (VsrD(*) or VsrW(*))
2232 * emin - minimum unbiased exponent
2233 * emax - maximum unbiased exponent
2234 * nbits - number of fraction bits
2236 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2237 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2244 getVSR(xA(opcode), &xa, env); \
2245 getVSR(xB(opcode), &xb, env); \
2247 for (i = 0; i < nels; i++) { \
2248 if (unlikely(tp##_is_infinity(xb.fld) || \
2249 tp##_is_zero(xb.fld))) { \
2253 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2255 if (unlikely(tp##_is_any_nan(xb.fld))) { \
2257 } else if (unlikely(tp##_is_zero(xb.fld))) { \
2259 } else if (unlikely(tp##_is_neg(xb.fld))) { \
2261 } else if (!tp##_is_zero(xb.fld) && \
2262 (e_b <= (emin+nbits))) { \
2266 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2267 /* XB is not zero because of the above check and */ \
2268 /* therefore must be denormalized. */ \
2274 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2277 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2278 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2279 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2281 /* VSX_MADD - VSX floating point muliply/add variations
2282 * op - instruction mnemonic
2283 * nels - number of elements (1, 2 or 4)
2284 * tp - type (float32 or float64)
2285 * fld - vsr_t field (VsrD(*) or VsrW(*))
2286 * maddflgs - flags for the float*muladd routine that control the
2287 * various forms (madd, msub, nmadd, nmsub)
2288 * afrm - A form (1=A, 0=M)
2291 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
2292 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2294 ppc_vsr_t xt_in, xa, xb, xt_out; \
2298 if (afrm) { /* AxB + T */ \
2301 } else { /* AxT + B */ \
2306 getVSR(xA(opcode), &xa, env); \
2307 getVSR(xB(opcode), &xb, env); \
2308 getVSR(xT(opcode), &xt_in, env); \
2312 helper_reset_fpstatus(env); \
2314 for (i = 0; i < nels; i++) { \
2315 float_status tstat = env->fp_status; \
2316 set_float_exception_flags(0, &tstat); \
2317 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2318 /* Avoid double rounding errors by rounding the intermediate */ \
2319 /* result to odd. */ \
2320 set_float_rounding_mode(float_round_to_zero, &tstat); \
2321 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2322 maddflgs, &tstat); \
2323 xt_out.fld |= (get_float_exception_flags(&tstat) & \
2324 float_flag_inexact) != 0; \
2326 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2327 maddflgs, &tstat); \
2329 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2331 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2332 tp##_maddsub_update_excp(env, xa.fld, b->fld, c->fld, maddflgs); \
2336 xt_out.fld = helper_frsp(env, xt_out.fld); \
2340 helper_compute_fprf_float64(env, xt_out.fld); \
2343 putVSR(xT(opcode), &xt_out, env); \
2344 float_check_status(env); \
2347 VSX_MADD(xsmaddadp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 0)
2348 VSX_MADD(xsmaddmdp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 0)
2349 VSX_MADD(xsmsubadp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 0)
2350 VSX_MADD(xsmsubmdp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 0)
2351 VSX_MADD(xsnmaddadp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 0)
2352 VSX_MADD(xsnmaddmdp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 0)
2353 VSX_MADD(xsnmsubadp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 0)
2354 VSX_MADD(xsnmsubmdp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 0)
2356 VSX_MADD(xsmaddasp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1, 1)
2357 VSX_MADD(xsmaddmsp
, 1, float64
, VsrD(0), MADD_FLGS
, 0, 1, 1)
2358 VSX_MADD(xsmsubasp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1, 1)
2359 VSX_MADD(xsmsubmsp
, 1, float64
, VsrD(0), MSUB_FLGS
, 0, 1, 1)
2360 VSX_MADD(xsnmaddasp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1, 1)
2361 VSX_MADD(xsnmaddmsp
, 1, float64
, VsrD(0), NMADD_FLGS
, 0, 1, 1)
2362 VSX_MADD(xsnmsubasp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1, 1)
2363 VSX_MADD(xsnmsubmsp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 0, 1, 1)
2365 VSX_MADD(xvmaddadp
, 2, float64
, VsrD(i
), MADD_FLGS
, 1, 0, 0)
2366 VSX_MADD(xvmaddmdp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0, 0, 0)
2367 VSX_MADD(xvmsubadp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 1, 0, 0)
2368 VSX_MADD(xvmsubmdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0, 0, 0)
2369 VSX_MADD(xvnmaddadp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 1, 0, 0)
2370 VSX_MADD(xvnmaddmdp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0, 0, 0)
2371 VSX_MADD(xvnmsubadp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 1, 0, 0)
2372 VSX_MADD(xvnmsubmdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0, 0, 0)
2374 VSX_MADD(xvmaddasp
, 4, float32
, VsrW(i
), MADD_FLGS
, 1, 0, 0)
2375 VSX_MADD(xvmaddmsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0, 0, 0)
2376 VSX_MADD(xvmsubasp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 1, 0, 0)
2377 VSX_MADD(xvmsubmsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0, 0, 0)
2378 VSX_MADD(xvnmaddasp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 1, 0, 0)
2379 VSX_MADD(xvnmaddmsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0, 0, 0)
2380 VSX_MADD(xvnmsubasp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 1, 0, 0)
2381 VSX_MADD(xvnmsubmsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0, 0, 0)
2383 /* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2384 * op - instruction mnemonic
2385 * cmp - comparison operation
2386 * exp - expected result of comparison
2387 * svxvc - set VXVC bit
2389 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2390 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2392 ppc_vsr_t xt, xa, xb; \
2393 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2395 getVSR(xA(opcode), &xa, env); \
2396 getVSR(xB(opcode), &xb, env); \
2397 getVSR(xT(opcode), &xt, env); \
2399 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2400 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2401 vxsnan_flag = true; \
2402 if (fpscr_ve == 0 && svxvc) { \
2405 } else if (svxvc) { \
2406 vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2407 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
2409 if (vxsnan_flag) { \
2410 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2413 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2415 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2418 if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
2426 putVSR(xT(opcode), &xt, env); \
2427 helper_float_check_status(env); \
2430 VSX_SCALAR_CMP_DP(xscmpeqdp
, eq
, 1, 0)
2431 VSX_SCALAR_CMP_DP(xscmpgedp
, le
, 1, 1)
2432 VSX_SCALAR_CMP_DP(xscmpgtdp
, lt
, 1, 1)
2433 VSX_SCALAR_CMP_DP(xscmpnedp
, eq
, 0, 0)
2435 void helper_xscmpexpdp(CPUPPCState
*env
, uint32_t opcode
)
2438 int64_t exp_a
, exp_b
;
2441 getVSR(xA(opcode
), &xa
, env
);
2442 getVSR(xB(opcode
), &xb
, env
);
2444 exp_a
= extract64(xa
.VsrD(0), 52, 11);
2445 exp_b
= extract64(xb
.VsrD(0), 52, 11);
2447 if (unlikely(float64_is_any_nan(xa
.VsrD(0)) ||
2448 float64_is_any_nan(xb
.VsrD(0)))) {
2451 if (exp_a
< exp_b
) {
2453 } else if (exp_a
> exp_b
) {
2460 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2461 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2462 env
->crf
[BF(opcode
)] = cc
;
2464 helper_float_check_status(env
);
2467 void helper_xscmpexpqp(CPUPPCState
*env
, uint32_t opcode
)
2470 int64_t exp_a
, exp_b
;
2473 getVSR(rA(opcode
) + 32, &xa
, env
);
2474 getVSR(rB(opcode
) + 32, &xb
, env
);
2476 exp_a
= extract64(xa
.VsrD(0), 48, 15);
2477 exp_b
= extract64(xb
.VsrD(0), 48, 15);
2479 if (unlikely(float128_is_any_nan(xa
.f128
) ||
2480 float128_is_any_nan(xb
.f128
))) {
2483 if (exp_a
< exp_b
) {
2485 } else if (exp_a
> exp_b
) {
2492 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2493 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2494 env
->crf
[BF(opcode
)] = cc
;
2496 helper_float_check_status(env
);
2499 #define VSX_SCALAR_CMP(op, ordered) \
2500 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2504 bool vxsnan_flag = false, vxvc_flag = false; \
2506 helper_reset_fpstatus(env); \
2507 getVSR(xA(opcode), &xa, env); \
2508 getVSR(xB(opcode), &xb, env); \
2510 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2511 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2512 vxsnan_flag = true; \
2514 if (fpscr_ve == 0 && ordered) { \
2517 } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2518 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) { \
2524 if (vxsnan_flag) { \
2525 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2528 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2531 if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2533 } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2539 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2540 env->fpscr |= cc << FPSCR_FPRF; \
2541 env->crf[BF(opcode)] = cc; \
2543 float_check_status(env); \
2546 VSX_SCALAR_CMP(xscmpodp
, 1)
2547 VSX_SCALAR_CMP(xscmpudp
, 0)
2549 #define VSX_SCALAR_CMPQ(op, ordered) \
2550 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2554 bool vxsnan_flag = false, vxvc_flag = false; \
2556 helper_reset_fpstatus(env); \
2557 getVSR(rA(opcode) + 32, &xa, env); \
2558 getVSR(rB(opcode) + 32, &xb, env); \
2560 if (float128_is_signaling_nan(xa.f128, &env->fp_status) || \
2561 float128_is_signaling_nan(xb.f128, &env->fp_status)) { \
2562 vxsnan_flag = true; \
2564 if (fpscr_ve == 0 && ordered) { \
2567 } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) || \
2568 float128_is_quiet_nan(xb.f128, &env->fp_status)) { \
2574 if (vxsnan_flag) { \
2575 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2578 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2581 if (float128_lt(xa.f128, xb.f128, &env->fp_status)) { \
2583 } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) { \
2589 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2590 env->fpscr |= cc << FPSCR_FPRF; \
2591 env->crf[BF(opcode)] = cc; \
2593 float_check_status(env); \
2596 VSX_SCALAR_CMPQ(xscmpoqp
, 1)
2597 VSX_SCALAR_CMPQ(xscmpuqp
, 0)
2599 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2600 * name - instruction mnemonic
2601 * op - operation (max or min)
2602 * nels - number of elements (1, 2 or 4)
2603 * tp - type (float32 or float64)
2604 * fld - vsr_t field (VsrD(*) or VsrW(*))
2606 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2607 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2609 ppc_vsr_t xt, xa, xb; \
2612 getVSR(xA(opcode), &xa, env); \
2613 getVSR(xB(opcode), &xb, env); \
2614 getVSR(xT(opcode), &xt, env); \
2616 for (i = 0; i < nels; i++) { \
2617 xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
2618 if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2619 tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2620 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2624 putVSR(xT(opcode), &xt, env); \
2625 float_check_status(env); \
2628 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2629 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2630 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2631 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2632 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2633 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2635 #define VSX_MAX_MINC(name, max) \
2636 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2638 ppc_vsr_t xt, xa, xb; \
2639 bool vxsnan_flag = false, vex_flag = false; \
2641 getVSR(rA(opcode) + 32, &xa, env); \
2642 getVSR(rB(opcode) + 32, &xb, env); \
2643 getVSR(rD(opcode) + 32, &xt, env); \
2645 if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
2646 float64_is_any_nan(xb.VsrD(0)))) { \
2647 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2648 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2649 vxsnan_flag = true; \
2651 xt.VsrD(0) = xb.VsrD(0); \
2652 } else if ((max && \
2653 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2655 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2656 xt.VsrD(0) = xa.VsrD(0); \
2658 xt.VsrD(0) = xb.VsrD(0); \
2661 vex_flag = fpscr_ve & vxsnan_flag; \
2662 if (vxsnan_flag) { \
2663 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2666 putVSR(rD(opcode) + 32, &xt, env); \
2670 VSX_MAX_MINC(xsmaxcdp, 1);
2671 VSX_MAX_MINC(xsmincdp
, 0);
2673 #define VSX_MAX_MINJ(name, max) \
2674 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2676 ppc_vsr_t xt, xa, xb; \
2677 bool vxsnan_flag = false, vex_flag = false; \
2679 getVSR(rA(opcode) + 32, &xa, env); \
2680 getVSR(rB(opcode) + 32, &xb, env); \
2681 getVSR(rD(opcode) + 32, &xt, env); \
2683 if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \
2684 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \
2685 vxsnan_flag = true; \
2687 xt.VsrD(0) = xa.VsrD(0); \
2688 } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \
2689 if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2690 vxsnan_flag = true; \
2692 xt.VsrD(0) = xb.VsrD(0); \
2693 } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \
2695 if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2696 xt.VsrD(0) = 0ULL; \
2698 xt.VsrD(0) = 0x8000000000000000ULL; \
2701 if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \
2702 xt.VsrD(0) = 0x8000000000000000ULL; \
2704 xt.VsrD(0) = 0ULL; \
2707 } else if ((max && \
2708 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2710 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2711 xt.VsrD(0) = xa.VsrD(0); \
2713 xt.VsrD(0) = xb.VsrD(0); \
2716 vex_flag = fpscr_ve & vxsnan_flag; \
2717 if (vxsnan_flag) { \
2718 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2721 putVSR(rD(opcode) + 32, &xt, env); \
2725 VSX_MAX_MINJ(xsmaxjdp, 1);
2726 VSX_MAX_MINJ(xsminjdp
, 0);
2728 /* VSX_CMP - VSX floating point compare
2729 * op - instruction mnemonic
2730 * nels - number of elements (1, 2 or 4)
2731 * tp - type (float32 or float64)
2732 * fld - vsr_t field (VsrD(*) or VsrW(*))
2733 * cmp - comparison operation
2734 * svxvc - set VXVC bit
2735 * exp - expected result of comparison
2737 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2738 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2740 ppc_vsr_t xt, xa, xb; \
2743 int all_false = 1; \
2745 getVSR(xA(opcode), &xa, env); \
2746 getVSR(xB(opcode), &xb, env); \
2747 getVSR(xT(opcode), &xt, env); \
2749 for (i = 0; i < nels; i++) { \
2750 if (unlikely(tp##_is_any_nan(xa.fld) || \
2751 tp##_is_any_nan(xb.fld))) { \
2752 if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2753 tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
2754 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2757 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2762 if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
2772 putVSR(xT(opcode), &xt, env); \
2773 if ((opcode >> (31-21)) & 1) { \
2774 env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2776 float_check_status(env); \
2779 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0, 1)
2780 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1, 1)
2781 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1, 1)
2782 VSX_CMP(xvcmpnedp
, 2, float64
, VsrD(i
), eq
, 0, 0)
2783 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0, 1)
2784 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1, 1)
2785 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1, 1)
2786 VSX_CMP(xvcmpnesp
, 4, float32
, VsrW(i
), eq
, 0, 0)
2788 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2789 * op - instruction mnemonic
2790 * nels - number of elements (1, 2 or 4)
2791 * stp - source type (float32 or float64)
2792 * ttp - target type (float32 or float64)
2793 * sfld - source vsr_t field
2794 * tfld - target vsr_t field (f32 or f64)
2797 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2798 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2803 getVSR(xB(opcode), &xb, env); \
2804 getVSR(xT(opcode), &xt, env); \
2806 for (i = 0; i < nels; i++) { \
2807 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2808 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2809 &env->fp_status))) { \
2810 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2811 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2814 helper_compute_fprf_##ttp(env, xt.tfld); \
2818 putVSR(xT(opcode), &xt, env); \
2819 float_check_status(env); \
2822 VSX_CVT_FP_TO_FP(xscvdpsp
, 1, float64
, float32
, VsrD(0), VsrW(0), 1)
2823 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2824 VSX_CVT_FP_TO_FP(xvcvdpsp
, 2, float64
, float32
, VsrD(i
), VsrW(2*i
), 0)
2825 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2*i
), VsrD(i
), 0)
2827 /* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2828 * op - instruction mnemonic
2829 * nels - number of elements (1, 2 or 4)
2830 * stp - source type (float32 or float64)
2831 * ttp - target type (float32 or float64)
2832 * sfld - source vsr_t field
2833 * tfld - target vsr_t field (f32 or f64)
2836 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2837 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2842 getVSR(rB(opcode) + 32, &xb, env); \
2843 getVSR(rD(opcode) + 32, &xt, env); \
2845 for (i = 0; i < nels; i++) { \
2846 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2847 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2848 &env->fp_status))) { \
2849 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2850 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2853 helper_compute_fprf_##ttp(env, xt.tfld); \
2857 putVSR(rD(opcode) + 32, &xt, env); \
2858 float_check_status(env); \
2861 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp
, 1, float64
, float128
, VsrD(0), f128
, 1)
2863 /* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2864 * involving one half precision value
2865 * op - instruction mnemonic
2866 * nels - number of elements (1, 2 or 4)
2869 * sfld - source vsr_t field
2870 * tfld - target vsr_t field
2873 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2874 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2879 getVSR(xB(opcode), &xb, env); \
2880 memset(&xt, 0, sizeof(xt)); \
2882 for (i = 0; i < nels; i++) { \
2883 xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status); \
2884 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2885 &env->fp_status))) { \
2886 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2887 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2890 helper_compute_fprf_##ttp(env, xt.tfld); \
2894 putVSR(xT(opcode), &xt, env); \
2895 float_check_status(env); \
2898 VSX_CVT_FP_TO_FP_HP(xscvdphp
, 1, float64
, float16
, VsrD(0), VsrH(3), 1)
2899 VSX_CVT_FP_TO_FP_HP(xscvhpdp
, 1, float16
, float64
, VsrH(3), VsrD(0), 1)
2900 VSX_CVT_FP_TO_FP_HP(xvcvsphp
, 4, float32
, float16
, VsrW(i
), VsrH(2 * i
+ 1), 0)
2901 VSX_CVT_FP_TO_FP_HP(xvcvhpsp
, 4, float16
, float32
, VsrH(2 * i
+ 1), VsrW(i
), 0)
2904 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2905 * added to this later.
2907 void helper_xscvqpdp(CPUPPCState
*env
, uint32_t opcode
)
2912 getVSR(rB(opcode
) + 32, &xb
, env
);
2913 memset(&xt
, 0, sizeof(xt
));
2915 tstat
= env
->fp_status
;
2916 if (unlikely(Rc(opcode
) != 0)) {
2917 tstat
.float_rounding_mode
= float_round_to_odd
;
2920 xt
.VsrD(0) = float128_to_float64(xb
.f128
, &tstat
);
2921 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2922 if (unlikely(float128_is_signaling_nan(xb
.f128
,
2924 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
2925 xt
.VsrD(0) = float64_snan_to_qnan(xt
.VsrD(0));
2927 helper_compute_fprf_float64(env
, xt
.VsrD(0));
2929 putVSR(rD(opcode
) + 32, &xt
, env
);
2930 float_check_status(env
);
2933 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2935 float_status tstat
= env
->fp_status
;
2936 set_float_exception_flags(0, &tstat
);
2938 return (uint64_t)float64_to_float32(xb
, &tstat
) << 32;
2941 uint64_t helper_xscvspdpn(CPUPPCState
*env
, uint64_t xb
)
2943 float_status tstat
= env
->fp_status
;
2944 set_float_exception_flags(0, &tstat
);
2946 return float32_to_float64(xb
>> 32, &tstat
);
2949 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2950 * op - instruction mnemonic
2951 * nels - number of elements (1, 2 or 4)
2952 * stp - source type (float32 or float64)
2953 * ttp - target type (int32, uint32, int64 or uint64)
2954 * sfld - source vsr_t field
2955 * tfld - target vsr_t field
2956 * rnan - resulting NaN
2958 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2959 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2964 getVSR(xB(opcode), &xb, env); \
2965 getVSR(xT(opcode), &xt, env); \
2967 for (i = 0; i < nels; i++) { \
2968 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2969 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
2970 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2972 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2975 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2977 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2978 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2983 putVSR(xT(opcode), &xt, env); \
2984 float_check_status(env); \
2987 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), \
2988 0x8000000000000000ULL
)
2989 VSX_CVT_FP_TO_INT(xscvdpsxws
, 1, float64
, int32
, VsrD(0), VsrW(1), \
2991 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), 0ULL)
2992 VSX_CVT_FP_TO_INT(xscvdpuxws
, 1, float64
, uint32
, VsrD(0), VsrW(1), 0U)
2993 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), \
2994 0x8000000000000000ULL
)
2995 VSX_CVT_FP_TO_INT(xvcvdpsxws
, 2, float64
, int32
, VsrD(i
), VsrW(2*i
), \
2997 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), 0ULL)
2998 VSX_CVT_FP_TO_INT(xvcvdpuxws
, 2, float64
, uint32
, VsrD(i
), VsrW(2*i
), 0U)
2999 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2*i
), VsrD(i
), \
3000 0x8000000000000000ULL
)
3001 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), 0x80000000U
)
3002 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2*i
), VsrD(i
), 0ULL)
3003 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), 0U)
3005 /* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
3006 * op - instruction mnemonic
3007 * stp - source type (float32 or float64)
3008 * ttp - target type (int32, uint32, int64 or uint64)
3009 * sfld - source vsr_t field
3010 * tfld - target vsr_t field
3011 * rnan - resulting NaN
3013 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
3014 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3018 getVSR(rB(opcode) + 32, &xb, env); \
3019 memset(&xt, 0, sizeof(xt)); \
3021 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
3022 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
3023 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
3025 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
3028 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
3030 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
3031 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
3035 putVSR(rD(opcode) + 32, &xt, env); \
3036 float_check_status(env); \
3039 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz
, float128
, int64
, f128
, VsrD(0), \
3040 0x8000000000000000ULL
)
3042 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz
, float128
, int32
, f128
, VsrD(0), \
3043 0xffffffff80000000ULL
)
3044 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz
, float128
, uint64
, f128
, VsrD(0), 0x0ULL
)
3045 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz
, float128
, uint32
, f128
, VsrD(0), 0x0ULL
)
3047 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3048 * op - instruction mnemonic
3049 * nels - number of elements (1, 2 or 4)
3050 * stp - source type (int32, uint32, int64 or uint64)
3051 * ttp - target type (float32 or float64)
3052 * sfld - source vsr_t field
3053 * tfld - target vsr_t field
3054 * jdef - definition of the j index (i or 2*i)
3057 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
3058 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3063 getVSR(xB(opcode), &xb, env); \
3064 getVSR(xT(opcode), &xt, env); \
3066 for (i = 0; i < nels; i++) { \
3067 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3069 xt.tfld = helper_frsp(env, xt.tfld); \
3072 helper_compute_fprf_float64(env, xt.tfld); \
3076 putVSR(xT(opcode), &xt, env); \
3077 float_check_status(env); \
3080 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
3081 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
3082 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
3083 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
3084 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3085 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3086 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2*i
), VsrD(i
), 0, 0)
3087 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2*i
), VsrD(i
), 0, 0)
3088 VSX_CVT_INT_TO_FP(xvcvsxdsp
, 2, int64
, float32
, VsrD(i
), VsrW(2*i
), 0, 0)
3089 VSX_CVT_INT_TO_FP(xvcvuxdsp
, 2, uint64
, float32
, VsrD(i
), VsrW(2*i
), 0, 0)
3090 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3091 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3093 /* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3094 * op - instruction mnemonic
3095 * stp - source type (int32, uint32, int64 or uint64)
3096 * ttp - target type (float32 or float64)
3097 * sfld - source vsr_t field
3098 * tfld - target vsr_t field
3100 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3101 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3105 getVSR(rB(opcode) + 32, &xb, env); \
3106 getVSR(rD(opcode) + 32, &xt, env); \
3108 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3109 helper_compute_fprf_##ttp(env, xt.tfld); \
3111 putVSR(xT(opcode) + 32, &xt, env); \
3112 float_check_status(env); \
3115 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp
, int64
, float128
, VsrD(0), f128
)
3116 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp
, uint64
, float128
, VsrD(0), f128
)
3118 /* For "use current rounding mode", define a value that will not be one of
3119 * the existing rounding model enums.
3121 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3122 float_round_up + float_round_to_zero)
3124 /* VSX_ROUND - VSX floating point round
3125 * op - instruction mnemonic
3126 * nels - number of elements (1, 2 or 4)
3127 * tp - type (float32 or float64)
3128 * fld - vsr_t field (VsrD(*) or VsrW(*))
3129 * rmode - rounding mode
3132 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
3133 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3137 getVSR(xB(opcode), &xb, env); \
3138 getVSR(xT(opcode), &xt, env); \
3140 if (rmode != FLOAT_ROUND_CURRENT) { \
3141 set_float_rounding_mode(rmode, &env->fp_status); \
3144 for (i = 0; i < nels; i++) { \
3145 if (unlikely(tp##_is_signaling_nan(xb.fld, \
3146 &env->fp_status))) { \
3147 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
3148 xt.fld = tp##_snan_to_qnan(xb.fld); \
3150 xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
3153 helper_compute_fprf_float64(env, xt.fld); \
3157 /* If this is not a "use current rounding mode" instruction, \
3158 * then inhibit setting of the XX bit and restore rounding \
3159 * mode from FPSCR */ \
3160 if (rmode != FLOAT_ROUND_CURRENT) { \
3161 fpscr_set_rounding_mode(env); \
3162 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3165 putVSR(xT(opcode), &xt, env); \
3166 float_check_status(env); \
3169 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_ties_away
, 1)
3170 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
3171 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
3172 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
3173 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
3175 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_ties_away
, 0)
3176 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
3177 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
3178 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
3179 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
3181 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_ties_away
, 0)
3182 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
3183 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
3184 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
3185 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
3187 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
3189 helper_reset_fpstatus(env
);
3191 uint64_t xt
= helper_frsp(env
, xb
);
3193 helper_compute_fprf_float64(env
, xt
);
3194 float_check_status(env
);
3198 #define VSX_XXPERM(op, indexed) \
3199 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3201 ppc_vsr_t xt, xa, pcv, xto; \
3204 getVSR(xA(opcode), &xa, env); \
3205 getVSR(xT(opcode), &xt, env); \
3206 getVSR(xB(opcode), &pcv, env); \
3208 for (i = 0; i < 16; i++) { \
3209 idx = pcv.VsrB(i) & 0x1F; \
3213 xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3215 putVSR(xT(opcode), &xto, env); \
3218 VSX_XXPERM(xxperm
, 0)
3219 VSX_XXPERM(xxpermr
, 1)
3221 void helper_xvxsigsp(CPUPPCState
*env
, uint32_t opcode
)
3224 uint32_t exp
, i
, fraction
;
3226 getVSR(xB(opcode
), &xb
, env
);
3227 memset(&xt
, 0, sizeof(xt
));
3229 for (i
= 0; i
< 4; i
++) {
3230 exp
= (xb
.VsrW(i
) >> 23) & 0xFF;
3231 fraction
= xb
.VsrW(i
) & 0x7FFFFF;
3232 if (exp
!= 0 && exp
!= 255) {
3233 xt
.VsrW(i
) = fraction
| 0x00800000;
3235 xt
.VsrW(i
) = fraction
;
3238 putVSR(xT(opcode
), &xt
, env
);
3241 /* VSX_TEST_DC - VSX floating point test data class
3242 * op - instruction mnemonic
3243 * nels - number of elements (1, 2 or 4)
3244 * xbn - VSR register number
3245 * tp - type (float32 or float64)
3246 * fld - vsr_t field (VsrD(*) or VsrW(*))
3247 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3248 * fld_max - target field max
3249 * scrf - set result in CR and FPCC
3251 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3252 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3255 uint32_t i, sign, dcmx; \
3256 uint32_t cc, match = 0; \
3258 getVSR(xbn, &xb, env); \
3260 memset(&xt, 0, sizeof(xt)); \
3261 dcmx = DCMX_XV(opcode); \
3263 dcmx = DCMX(opcode); \
3266 for (i = 0; i < nels; i++) { \
3267 sign = tp##_is_neg(xb.fld); \
3268 if (tp##_is_any_nan(xb.fld)) { \
3269 match = extract32(dcmx, 6, 1); \
3270 } else if (tp##_is_infinity(xb.fld)) { \
3271 match = extract32(dcmx, 4 + !sign, 1); \
3272 } else if (tp##_is_zero(xb.fld)) { \
3273 match = extract32(dcmx, 2 + !sign, 1); \
3274 } else if (tp##_is_zero_or_denormal(xb.fld)) { \
3275 match = extract32(dcmx, 0 + !sign, 1); \
3279 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3280 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
3281 env->fpscr |= cc << FPSCR_FPRF; \
3282 env->crf[BF(opcode)] = cc; \
3284 xt.tfld = match ? fld_max : 0; \
3289 putVSR(xT(opcode), &xt, env); \
3293 VSX_TEST_DC(xvtstdcdp
, 2, xB(opcode
), float64
, VsrD(i
), VsrD(i
), UINT64_MAX
, 0)
3294 VSX_TEST_DC(xvtstdcsp
, 4, xB(opcode
), float32
, VsrW(i
), VsrW(i
), UINT32_MAX
, 0)
3295 VSX_TEST_DC(xststdcdp
, 1, xB(opcode
), float64
, VsrD(0), VsrD(0), 0, 1)
3296 VSX_TEST_DC(xststdcqp
, 1, (rB(opcode
) + 32), float128
, f128
, VsrD(0), 0, 1)
3298 void helper_xststdcsp(CPUPPCState
*env
, uint32_t opcode
)
3301 uint32_t dcmx
, sign
, exp
;
3302 uint32_t cc
, match
= 0, not_sp
= 0;
3304 getVSR(xB(opcode
), &xb
, env
);
3305 dcmx
= DCMX(opcode
);
3306 exp
= (xb
.VsrD(0) >> 52) & 0x7FF;
3308 sign
= float64_is_neg(xb
.VsrD(0));
3309 if (float64_is_any_nan(xb
.VsrD(0))) {
3310 match
= extract32(dcmx
, 6, 1);
3311 } else if (float64_is_infinity(xb
.VsrD(0))) {
3312 match
= extract32(dcmx
, 4 + !sign
, 1);
3313 } else if (float64_is_zero(xb
.VsrD(0))) {
3314 match
= extract32(dcmx
, 2 + !sign
, 1);
3315 } else if (float64_is_zero_or_denormal(xb
.VsrD(0)) ||
3316 (exp
> 0 && exp
< 0x381)) {
3317 match
= extract32(dcmx
, 0 + !sign
, 1);
3320 not_sp
= !float64_eq(xb
.VsrD(0),
3322 float64_to_float32(xb
.VsrD(0), &env
->fp_status
),
3323 &env
->fp_status
), &env
->fp_status
);
3325 cc
= sign
<< CRF_LT_BIT
| match
<< CRF_EQ_BIT
| not_sp
<< CRF_SO_BIT
;
3326 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
3327 env
->fpscr
|= cc
<< FPSCR_FPRF
;
3328 env
->crf
[BF(opcode
)] = cc
;
3331 void helper_xsrqpi(CPUPPCState
*env
, uint32_t opcode
)
3335 uint8_t r
= Rrm(opcode
);
3336 uint8_t ex
= Rc(opcode
);
3337 uint8_t rmc
= RMC(opcode
);
3341 getVSR(rB(opcode
) + 32, &xb
, env
);
3342 memset(&xt
, 0, sizeof(xt
));
3343 helper_reset_fpstatus(env
);
3345 if (r
== 0 && rmc
== 0) {
3346 rmode
= float_round_ties_away
;
3347 } else if (r
== 0 && rmc
== 0x3) {
3349 } else if (r
== 1) {
3352 rmode
= float_round_nearest_even
;
3355 rmode
= float_round_to_zero
;
3358 rmode
= float_round_up
;
3361 rmode
= float_round_down
;
3368 tstat
= env
->fp_status
;
3369 set_float_exception_flags(0, &tstat
);
3370 set_float_rounding_mode(rmode
, &tstat
);
3371 xt
.f128
= float128_round_to_int(xb
.f128
, &tstat
);
3372 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3374 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3375 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3376 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
3377 xt
.f128
= float128_snan_to_qnan(xt
.f128
);
3381 if (ex
== 0 && (tstat
.float_exception_flags
& float_flag_inexact
)) {
3382 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
3385 helper_compute_fprf_float128(env
, xt
.f128
);
3386 float_check_status(env
);
3387 putVSR(rD(opcode
) + 32, &xt
, env
);
3390 void helper_xsrqpxp(CPUPPCState
*env
, uint32_t opcode
)
3394 uint8_t r
= Rrm(opcode
);
3395 uint8_t rmc
= RMC(opcode
);
3400 getVSR(rB(opcode
) + 32, &xb
, env
);
3401 memset(&xt
, 0, sizeof(xt
));
3402 helper_reset_fpstatus(env
);
3404 if (r
== 0 && rmc
== 0) {
3405 rmode
= float_round_ties_away
;
3406 } else if (r
== 0 && rmc
== 0x3) {
3408 } else if (r
== 1) {
3411 rmode
= float_round_nearest_even
;
3414 rmode
= float_round_to_zero
;
3417 rmode
= float_round_up
;
3420 rmode
= float_round_down
;
3427 tstat
= env
->fp_status
;
3428 set_float_exception_flags(0, &tstat
);
3429 set_float_rounding_mode(rmode
, &tstat
);
3430 round_res
= float128_to_floatx80(xb
.f128
, &tstat
);
3431 xt
.f128
= floatx80_to_float128(round_res
, &tstat
);
3432 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3434 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3435 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3436 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 0);
3437 xt
.f128
= float128_snan_to_qnan(xt
.f128
);
3441 helper_compute_fprf_float128(env
, xt
.f128
);
3442 putVSR(rD(opcode
) + 32, &xt
, env
);
3443 float_check_status(env
);
3446 void helper_xssqrtqp(CPUPPCState
*env
, uint32_t opcode
)
3452 getVSR(rB(opcode
) + 32, &xb
, env
);
3453 memset(&xt
, 0, sizeof(xt
));
3454 helper_reset_fpstatus(env
);
3456 tstat
= env
->fp_status
;
3457 if (unlikely(Rc(opcode
) != 0)) {
3458 tstat
.float_rounding_mode
= float_round_to_odd
;
3461 set_float_exception_flags(0, &tstat
);
3462 xt
.f128
= float128_sqrt(xb
.f128
, &tstat
);
3463 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3465 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3466 if (float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3467 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
3468 xt
.f128
= float128_snan_to_qnan(xb
.f128
);
3469 } else if (float128_is_quiet_nan(xb
.f128
, &tstat
)) {
3471 } else if (float128_is_neg(xb
.f128
) && !float128_is_zero(xb
.f128
)) {
3472 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
, 1);
3473 xt
.f128
= float128_default_nan(&env
->fp_status
);
3477 helper_compute_fprf_float128(env
, xt
.f128
);
3478 putVSR(rD(opcode
) + 32, &xt
, env
);
3479 float_check_status(env
);
3482 void helper_xssubqp(CPUPPCState
*env
, uint32_t opcode
)
3484 ppc_vsr_t xt
, xa
, xb
;
3487 getVSR(rA(opcode
) + 32, &xa
, env
);
3488 getVSR(rB(opcode
) + 32, &xb
, env
);
3489 getVSR(rD(opcode
) + 32, &xt
, env
);
3490 helper_reset_fpstatus(env
);
3492 tstat
= env
->fp_status
;
3493 if (unlikely(Rc(opcode
) != 0)) {
3494 tstat
.float_rounding_mode
= float_round_to_odd
;
3497 set_float_exception_flags(0, &tstat
);
3498 xt
.f128
= float128_sub(xa
.f128
, xb
.f128
, &tstat
);
3499 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3501 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3502 if (float128_is_infinity(xa
.f128
) && float128_is_infinity(xb
.f128
)) {
3503 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
, 1);
3504 } else if (float128_is_signaling_nan(xa
.f128
, &tstat
) ||
3505 float128_is_signaling_nan(xb
.f128
, &tstat
)) {
3506 float_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, 1);
3510 helper_compute_fprf_float128(env
, xt
.f128
);
3511 putVSR(rD(opcode
) + 32, &xt
, env
);
3512 float_check_status(env
);