2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
26 static inline float128
float128_snan_to_qnan(float128 x
)
30 r
.high
= x
.high
| 0x0000800000000000;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 static inline bool fp_exceptions_enabled(CPUPPCState
*env
)
41 #ifdef CONFIG_USER_ONLY
44 return (env
->msr
& ((1U << MSR_FE0
) | (1U << MSR_FE1
))) != 0;
48 /*****************************************************************************/
49 /* Floating point operations helpers */
52 * This is the non-arithmatic conversion that happens e.g. on loads.
53 * In the Power ISA pseudocode, this is called DOUBLE.
55 uint64_t helper_todouble(uint32_t arg
)
57 uint32_t abs_arg
= arg
& 0x7fffffff;
60 if (likely(abs_arg
>= 0x00800000)) {
61 if (unlikely(extract32(arg
, 23, 8) == 0xff)) {
63 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
64 ret
|= (uint64_t)0x7ff << 52;
65 ret
|= (uint64_t)extract32(arg
, 0, 23) << 29;
67 /* Normalized operand. */
68 ret
= (uint64_t)extract32(arg
, 30, 2) << 62;
69 ret
|= ((extract32(arg
, 30, 1) ^ 1) * (uint64_t)7) << 59;
70 ret
|= (uint64_t)extract32(arg
, 0, 30) << 29;
73 /* Zero or Denormalized operand. */
74 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
75 if (unlikely(abs_arg
!= 0)) {
76 /* Denormalized operand. */
77 int shift
= clz32(abs_arg
) - 9;
78 int exp
= -126 - shift
+ 1023;
79 ret
|= (uint64_t)exp
<< 52;
80 ret
|= abs_arg
<< (shift
+ 29);
87 * This is the non-arithmatic conversion that happens e.g. on stores.
88 * In the Power ISA pseudocode, this is called SINGLE.
90 uint32_t helper_tosingle(uint64_t arg
)
92 int exp
= extract64(arg
, 52, 11);
95 if (likely(exp
> 896)) {
96 /* No denormalization required (includes Inf, NaN). */
97 ret
= extract64(arg
, 62, 2) << 30;
98 ret
|= extract64(arg
, 29, 30);
101 * Zero or Denormal result. If the exponent is in bounds for
102 * a single-precision denormal result, extract the proper
103 * bits. If the input is not zero, and the exponent is out of
104 * bounds, then the result is undefined; this underflows to
107 ret
= extract64(arg
, 63, 1) << 31;
108 if (unlikely(exp
>= 874)) {
109 /* Denormal result. */
110 ret
|= ((1ULL << 52) | extract64(arg
, 0, 52)) >> (896 + 30 - exp
);
116 static inline int ppc_float32_get_unbiased_exp(float32 f
)
118 return ((f
>> 23) & 0xFF) - 127;
121 static inline int ppc_float64_get_unbiased_exp(float64 f
)
123 return ((f
>> 52) & 0x7FF) - 1023;
126 /* Classify a floating-point number. */
137 #define COMPUTE_CLASS(tp) \
138 static int tp##_classify(tp arg) \
140 int ret = tp##_is_neg(arg) * is_neg; \
141 if (unlikely(tp##_is_any_nan(arg))) { \
142 float_status dummy = { }; /* snan_bit_is_one = 0 */ \
143 ret |= (tp##_is_signaling_nan(arg, &dummy) \
144 ? is_snan : is_qnan); \
145 } else if (unlikely(tp##_is_infinity(arg))) { \
147 } else if (tp##_is_zero(arg)) { \
149 } else if (tp##_is_zero_or_denormal(arg)) { \
150 ret |= is_denormal; \
157 COMPUTE_CLASS(float16
)
158 COMPUTE_CLASS(float32
)
159 COMPUTE_CLASS(float64
)
160 COMPUTE_CLASS(float128
)
162 static void set_fprf_from_class(CPUPPCState
*env
, int class)
164 static const uint8_t fprf
[6][2] = {
165 { 0x04, 0x08 }, /* normalized */
166 { 0x02, 0x12 }, /* zero */
167 { 0x14, 0x18 }, /* denormalized */
168 { 0x05, 0x09 }, /* infinity */
169 { 0x11, 0x11 }, /* qnan */
170 { 0x00, 0x00 }, /* snan -- flags are undefined */
172 bool isneg
= class & is_neg
;
174 env
->fpscr
&= ~(0x1F << FPSCR_FPRF
);
175 env
->fpscr
|= fprf
[ctz32(class)][isneg
] << FPSCR_FPRF
;
178 #define COMPUTE_FPRF(tp) \
179 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
181 set_fprf_from_class(env, tp##_classify(arg)); \
184 COMPUTE_FPRF(float16
)
185 COMPUTE_FPRF(float32
)
186 COMPUTE_FPRF(float64
)
187 COMPUTE_FPRF(float128
)
189 /* Floating-point invalid operations exception */
190 static void finish_invalid_op_excp(CPUPPCState
*env
, int op
, uintptr_t retaddr
)
192 /* Update the floating-point invalid operation summary */
193 env
->fpscr
|= 1 << FPSCR_VX
;
194 /* Update the floating-point exception summary */
197 /* Update the floating-point enabled exception summary */
198 env
->fpscr
|= 1 << FPSCR_FEX
;
199 if (fp_exceptions_enabled(env
)) {
200 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
201 POWERPC_EXCP_FP
| op
, retaddr
);
206 static void finish_invalid_op_arith(CPUPPCState
*env
, int op
,
207 bool set_fpcc
, uintptr_t retaddr
)
209 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
212 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
213 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
216 finish_invalid_op_excp(env
, op
, retaddr
);
220 static void float_invalid_op_vxsnan(CPUPPCState
*env
, uintptr_t retaddr
)
222 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
223 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, retaddr
);
226 /* Magnitude subtraction of infinities */
227 static void float_invalid_op_vxisi(CPUPPCState
*env
, bool set_fpcc
,
230 env
->fpscr
|= 1 << FPSCR_VXISI
;
231 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXISI
, set_fpcc
, retaddr
);
234 /* Division of infinity by infinity */
235 static void float_invalid_op_vxidi(CPUPPCState
*env
, bool set_fpcc
,
238 env
->fpscr
|= 1 << FPSCR_VXIDI
;
239 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIDI
, set_fpcc
, retaddr
);
242 /* Division of zero by zero */
243 static void float_invalid_op_vxzdz(CPUPPCState
*env
, bool set_fpcc
,
246 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
247 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXZDZ
, set_fpcc
, retaddr
);
250 /* Multiplication of zero by infinity */
251 static void float_invalid_op_vximz(CPUPPCState
*env
, bool set_fpcc
,
254 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
255 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIMZ
, set_fpcc
, retaddr
);
258 /* Square root of a negative number */
259 static void float_invalid_op_vxsqrt(CPUPPCState
*env
, bool set_fpcc
,
262 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
263 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXSQRT
, set_fpcc
, retaddr
);
266 /* Ordered comparison of NaN */
267 static void float_invalid_op_vxvc(CPUPPCState
*env
, bool set_fpcc
,
270 env
->fpscr
|= 1 << FPSCR_VXVC
;
272 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
273 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
275 /* Update the floating-point invalid operation summary */
276 env
->fpscr
|= 1 << FPSCR_VX
;
277 /* Update the floating-point exception summary */
279 /* We must update the target FPR before raising the exception */
281 CPUState
*cs
= env_cpu(env
);
283 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
284 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
285 /* Update the floating-point enabled exception summary */
286 env
->fpscr
|= 1 << FPSCR_FEX
;
287 /* Exception is differed */
291 /* Invalid conversion */
292 static void float_invalid_op_vxcvi(CPUPPCState
*env
, bool set_fpcc
,
295 env
->fpscr
|= 1 << FPSCR_VXCVI
;
296 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
299 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
300 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
303 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
, retaddr
);
306 static inline void float_zero_divide_excp(CPUPPCState
*env
, uintptr_t raddr
)
308 env
->fpscr
|= 1 << FPSCR_ZX
;
309 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
310 /* Update the floating-point exception summary */
313 /* Update the floating-point enabled exception summary */
314 env
->fpscr
|= 1 << FPSCR_FEX
;
315 if (fp_exceptions_enabled(env
)) {
316 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
317 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
,
323 static inline void float_overflow_excp(CPUPPCState
*env
)
325 CPUState
*cs
= env_cpu(env
);
327 env
->fpscr
|= 1 << FPSCR_OX
;
328 /* Update the floating-point exception summary */
331 /* XXX: should adjust the result */
332 /* Update the floating-point enabled exception summary */
333 env
->fpscr
|= 1 << FPSCR_FEX
;
334 /* We must update the target FPR before raising the exception */
335 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
336 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
338 env
->fpscr
|= 1 << FPSCR_XX
;
339 env
->fpscr
|= 1 << FPSCR_FI
;
343 static inline void float_underflow_excp(CPUPPCState
*env
)
345 CPUState
*cs
= env_cpu(env
);
347 env
->fpscr
|= 1 << FPSCR_UX
;
348 /* Update the floating-point exception summary */
351 /* XXX: should adjust the result */
352 /* Update the floating-point enabled exception summary */
353 env
->fpscr
|= 1 << FPSCR_FEX
;
354 /* We must update the target FPR before raising the exception */
355 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
356 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
360 static inline void float_inexact_excp(CPUPPCState
*env
)
362 CPUState
*cs
= env_cpu(env
);
364 env
->fpscr
|= 1 << FPSCR_FI
;
365 env
->fpscr
|= 1 << FPSCR_XX
;
366 /* Update the floating-point exception summary */
369 /* Update the floating-point enabled exception summary */
370 env
->fpscr
|= 1 << FPSCR_FEX
;
371 /* We must update the target FPR before raising the exception */
372 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
373 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
377 static inline void fpscr_set_rounding_mode(CPUPPCState
*env
)
381 /* Set rounding mode */
384 /* Best approximation (round to nearest) */
385 rnd_type
= float_round_nearest_even
;
388 /* Smaller magnitude (round toward zero) */
389 rnd_type
= float_round_to_zero
;
392 /* Round toward +infinite */
393 rnd_type
= float_round_up
;
397 /* Round toward -infinite */
398 rnd_type
= float_round_down
;
401 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
404 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
408 prev
= (env
->fpscr
>> bit
) & 1;
409 env
->fpscr
&= ~(1 << bit
);
414 fpscr_set_rounding_mode(env
);
426 /* Set VX bit to zero */
427 env
->fpscr
&= ~(1 << FPSCR_VX
);
440 /* Set the FEX bit */
441 env
->fpscr
&= ~(1 << FPSCR_FEX
);
450 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
452 CPUState
*cs
= env_cpu(env
);
455 prev
= (env
->fpscr
>> bit
) & 1;
456 env
->fpscr
|= 1 << bit
;
498 env
->fpscr
|= 1 << FPSCR_VX
;
507 env
->error_code
= POWERPC_EXCP_FP
;
509 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
512 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
515 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
518 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
521 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
524 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
527 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
530 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
533 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
541 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
548 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
555 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
562 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
568 fpscr_set_rounding_mode(env
);
573 /* Update the floating-point enabled exception summary */
574 env
->fpscr
|= 1 << FPSCR_FEX
;
575 /* We have to update Rc1 before raising the exception */
576 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
582 void helper_store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
584 CPUState
*cs
= env_cpu(env
);
585 target_ulong prev
, new;
589 new = (target_ulong
)arg
;
590 new &= ~0x60000000LL
;
591 new |= prev
& 0x60000000LL
;
592 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
593 if (mask
& (1 << i
)) {
594 env
->fpscr
&= ~(0xFLL
<< (4 * i
));
595 env
->fpscr
|= new & (0xFLL
<< (4 * i
));
598 /* Update VX and FEX */
600 env
->fpscr
|= 1 << FPSCR_VX
;
602 env
->fpscr
&= ~(1 << FPSCR_VX
);
604 if ((fpscr_ex
& fpscr_eex
) != 0) {
605 env
->fpscr
|= 1 << FPSCR_FEX
;
606 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
607 /* XXX: we should compute it properly */
608 env
->error_code
= POWERPC_EXCP_FP
;
610 env
->fpscr
&= ~(1 << FPSCR_FEX
);
612 fpscr_set_rounding_mode(env
);
615 void store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
617 helper_store_fpscr(env
, arg
, mask
);
620 static void do_float_check_status(CPUPPCState
*env
, uintptr_t raddr
)
622 CPUState
*cs
= env_cpu(env
);
623 int status
= get_float_exception_flags(&env
->fp_status
);
624 bool inexact_happened
= false;
626 if (status
& float_flag_overflow
) {
627 float_overflow_excp(env
);
628 } else if (status
& float_flag_underflow
) {
629 float_underflow_excp(env
);
630 } else if (status
& float_flag_inexact
) {
631 float_inexact_excp(env
);
632 inexact_happened
= true;
635 /* if the inexact flag was not set */
636 if (inexact_happened
== false) {
637 env
->fpscr
&= ~(1 << FPSCR_FI
); /* clear the FPSCR[FI] bit */
640 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
641 (env
->error_code
& POWERPC_EXCP_FP
)) {
642 /* Differred floating-point exception after target FPR update */
643 if (fp_exceptions_enabled(env
)) {
644 raise_exception_err_ra(env
, cs
->exception_index
,
645 env
->error_code
, raddr
);
650 void helper_float_check_status(CPUPPCState
*env
)
652 do_float_check_status(env
, GETPC());
655 void helper_reset_fpstatus(CPUPPCState
*env
)
657 set_float_exception_flags(0, &env
->fp_status
);
660 static void float_invalid_op_addsub(CPUPPCState
*env
, bool set_fpcc
,
661 uintptr_t retaddr
, int classes
)
663 if ((classes
& ~is_neg
) == is_inf
) {
664 /* Magnitude subtraction of infinities */
665 float_invalid_op_vxisi(env
, set_fpcc
, retaddr
);
666 } else if (classes
& is_snan
) {
667 float_invalid_op_vxsnan(env
, retaddr
);
672 float64
helper_fadd(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
674 float64 ret
= float64_add(arg1
, arg2
, &env
->fp_status
);
675 int status
= get_float_exception_flags(&env
->fp_status
);
677 if (unlikely(status
& float_flag_invalid
)) {
678 float_invalid_op_addsub(env
, 1, GETPC(),
679 float64_classify(arg1
) |
680 float64_classify(arg2
));
687 float64
helper_fsub(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
689 float64 ret
= float64_sub(arg1
, arg2
, &env
->fp_status
);
690 int status
= get_float_exception_flags(&env
->fp_status
);
692 if (unlikely(status
& float_flag_invalid
)) {
693 float_invalid_op_addsub(env
, 1, GETPC(),
694 float64_classify(arg1
) |
695 float64_classify(arg2
));
701 static void float_invalid_op_mul(CPUPPCState
*env
, bool set_fprc
,
702 uintptr_t retaddr
, int classes
)
704 if ((classes
& (is_zero
| is_inf
)) == (is_zero
| is_inf
)) {
705 /* Multiplication of zero by infinity */
706 float_invalid_op_vximz(env
, set_fprc
, retaddr
);
707 } else if (classes
& is_snan
) {
708 float_invalid_op_vxsnan(env
, retaddr
);
713 float64
helper_fmul(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
715 float64 ret
= float64_mul(arg1
, arg2
, &env
->fp_status
);
716 int status
= get_float_exception_flags(&env
->fp_status
);
718 if (unlikely(status
& float_flag_invalid
)) {
719 float_invalid_op_mul(env
, 1, GETPC(),
720 float64_classify(arg1
) |
721 float64_classify(arg2
));
727 static void float_invalid_op_div(CPUPPCState
*env
, bool set_fprc
,
728 uintptr_t retaddr
, int classes
)
731 if (classes
== is_inf
) {
732 /* Division of infinity by infinity */
733 float_invalid_op_vxidi(env
, set_fprc
, retaddr
);
734 } else if (classes
== is_zero
) {
735 /* Division of zero by zero */
736 float_invalid_op_vxzdz(env
, set_fprc
, retaddr
);
737 } else if (classes
& is_snan
) {
738 float_invalid_op_vxsnan(env
, retaddr
);
743 float64
helper_fdiv(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
745 float64 ret
= float64_div(arg1
, arg2
, &env
->fp_status
);
746 int status
= get_float_exception_flags(&env
->fp_status
);
748 if (unlikely(status
)) {
749 if (status
& float_flag_invalid
) {
750 float_invalid_op_div(env
, 1, GETPC(),
751 float64_classify(arg1
) |
752 float64_classify(arg2
));
754 if (status
& float_flag_divbyzero
) {
755 float_zero_divide_excp(env
, GETPC());
762 static void float_invalid_cvt(CPUPPCState
*env
, bool set_fprc
,
763 uintptr_t retaddr
, int class1
)
765 float_invalid_op_vxcvi(env
, set_fprc
, retaddr
);
766 if (class1
& is_snan
) {
767 float_invalid_op_vxsnan(env
, retaddr
);
771 #define FPU_FCTI(op, cvt, nanval) \
772 uint64_t helper_##op(CPUPPCState *env, float64 arg) \
774 uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \
775 int status = get_float_exception_flags(&env->fp_status); \
777 if (unlikely(status)) { \
778 if (status & float_flag_invalid) { \
779 float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
782 do_float_check_status(env, GETPC()); \
787 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
788 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
789 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
790 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
791 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
792 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
793 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
794 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
796 #define FPU_FCFI(op, cvtr, is_single) \
797 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
802 float32 tmp = cvtr(arg, &env->fp_status); \
803 farg.d = float32_to_float64(tmp, &env->fp_status); \
805 farg.d = cvtr(arg, &env->fp_status); \
807 do_float_check_status(env, GETPC()); \
811 FPU_FCFI(fcfid
, int64_to_float64
, 0)
812 FPU_FCFI(fcfids
, int64_to_float32
, 1)
813 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
814 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
816 static inline uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
823 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
825 float_invalid_op_vxsnan(env
, GETPC());
826 farg
.ll
= arg
| 0x0008000000000000ULL
;
828 int inexact
= get_float_exception_flags(&env
->fp_status
) &
830 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
831 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
832 /* Restore rounding mode from FPSCR */
833 fpscr_set_rounding_mode(env
);
835 /* fri* does not set FPSCR[XX] */
837 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
840 do_float_check_status(env
, GETPC());
844 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
846 return do_fri(env
, arg
, float_round_ties_away
);
849 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
851 return do_fri(env
, arg
, float_round_to_zero
);
854 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
856 return do_fri(env
, arg
, float_round_up
);
859 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
861 return do_fri(env
, arg
, float_round_down
);
864 #define FPU_MADDSUB_UPDATE(NAME, TP) \
865 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
866 unsigned int madd_flags, uintptr_t retaddr) \
868 if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
869 TP##_is_signaling_nan(arg2, &env->fp_status) || \
870 TP##_is_signaling_nan(arg3, &env->fp_status)) { \
871 /* sNaN operation */ \
872 float_invalid_op_vxsnan(env, retaddr); \
874 if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
875 (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
876 /* Multiplication of zero by infinity */ \
877 float_invalid_op_vximz(env, 1, retaddr); \
879 if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
880 TP##_is_infinity(arg3)) { \
881 uint8_t aSign, bSign, cSign; \
883 aSign = TP##_is_neg(arg1); \
884 bSign = TP##_is_neg(arg2); \
885 cSign = TP##_is_neg(arg3); \
886 if (madd_flags & float_muladd_negate_c) { \
889 if (aSign ^ bSign ^ cSign) { \
890 float_invalid_op_vxisi(env, 1, retaddr); \
894 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp
, float32
)
895 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp
, float64
)
897 #define FPU_FMADD(op, madd_flags) \
898 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
899 uint64_t arg2, uint64_t arg3) \
902 float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
904 flags = get_float_exception_flags(&env->fp_status); \
906 if (flags & float_flag_invalid) { \
907 float64_maddsub_update_excp(env, arg1, arg2, arg3, \
908 madd_flags, GETPC()); \
910 do_float_check_status(env, GETPC()); \
916 #define MSUB_FLGS float_muladd_negate_c
917 #define NMADD_FLGS float_muladd_negate_result
918 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
920 FPU_FMADD(fmadd
, MADD_FLGS
)
921 FPU_FMADD(fnmadd
, NMADD_FLGS
)
922 FPU_FMADD(fmsub
, MSUB_FLGS
)
923 FPU_FMADD(fnmsub
, NMSUB_FLGS
)
926 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
933 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
934 float_invalid_op_vxsnan(env
, GETPC());
936 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
937 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
943 float64
helper_fsqrt(CPUPPCState
*env
, float64 arg
)
945 float64 ret
= float64_sqrt(arg
, &env
->fp_status
);
946 int status
= get_float_exception_flags(&env
->fp_status
);
948 if (unlikely(status
& float_flag_invalid
)) {
949 if (unlikely(float64_is_any_nan(arg
))) {
950 if (unlikely(float64_is_signaling_nan(arg
, &env
->fp_status
))) {
951 /* sNaN square root */
952 float_invalid_op_vxsnan(env
, GETPC());
955 /* Square root of a negative nonzero number */
956 float_invalid_op_vxsqrt(env
, 1, GETPC());
964 float64
helper_fre(CPUPPCState
*env
, float64 arg
)
966 /* "Estimate" the reciprocal with actual division. */
967 float64 ret
= float64_div(float64_one
, arg
, &env
->fp_status
);
968 int status
= get_float_exception_flags(&env
->fp_status
);
970 if (unlikely(status
)) {
971 if (status
& float_flag_invalid
) {
972 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
973 /* sNaN reciprocal */
974 float_invalid_op_vxsnan(env
, GETPC());
977 if (status
& float_flag_divbyzero
) {
978 float_zero_divide_excp(env
, GETPC());
979 /* For FPSCR.ZE == 0, the result is 1/2. */
980 ret
= float64_set_sign(float64_half
, float64_is_neg(arg
));
988 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
995 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
996 /* sNaN reciprocal */
997 float_invalid_op_vxsnan(env
, GETPC());
999 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1000 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1001 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1006 /* frsqrte - frsqrte. */
1007 float64
helper_frsqrte(CPUPPCState
*env
, float64 arg
)
1009 /* "Estimate" the reciprocal with actual division. */
1010 float64 rets
= float64_sqrt(arg
, &env
->fp_status
);
1011 float64 retd
= float64_div(float64_one
, rets
, &env
->fp_status
);
1012 int status
= get_float_exception_flags(&env
->fp_status
);
1014 if (unlikely(status
)) {
1015 if (status
& float_flag_invalid
) {
1016 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
1017 /* sNaN reciprocal */
1018 float_invalid_op_vxsnan(env
, GETPC());
1020 /* Square root of a negative nonzero number */
1021 float_invalid_op_vxsqrt(env
, 1, GETPC());
1024 if (status
& float_flag_divbyzero
) {
1025 /* Reciprocal of (square root of) zero. */
1026 float_zero_divide_excp(env
, GETPC());
1034 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1041 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
1042 !float64_is_any_nan(farg1
.d
)) {
1049 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
1054 if (unlikely(float64_is_infinity(fra
) ||
1055 float64_is_infinity(frb
) ||
1056 float64_is_zero(frb
))) {
1060 int e_a
= ppc_float64_get_unbiased_exp(fra
);
1061 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1063 if (unlikely(float64_is_any_nan(fra
) ||
1064 float64_is_any_nan(frb
))) {
1066 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
1068 } else if (!float64_is_zero(fra
) &&
1069 (((e_a
- e_b
) >= 1023) ||
1070 ((e_a
- e_b
) <= -1021) ||
1075 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1076 /* XB is not zero because of the above check and */
1077 /* so must be denormalized. */
1082 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1085 uint32_t helper_ftsqrt(uint64_t frb
)
1090 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
1094 int e_b
= ppc_float64_get_unbiased_exp(frb
);
1096 if (unlikely(float64_is_any_nan(frb
))) {
1098 } else if (unlikely(float64_is_zero(frb
))) {
1100 } else if (unlikely(float64_is_neg(frb
))) {
1102 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022 + 52))) {
1106 if (unlikely(float64_is_zero_or_denormal(frb
))) {
1107 /* XB is not zero because of the above check and */
1108 /* therefore must be denormalized. */
1113 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1116 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1119 CPU_DoubleU farg1
, farg2
;
1125 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1126 float64_is_any_nan(farg2
.d
))) {
1128 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1130 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1136 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1137 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1138 env
->crf
[crfD
] = ret
;
1139 if (unlikely(ret
== 0x01UL
1140 && (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1141 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)))) {
1142 /* sNaN comparison */
1143 float_invalid_op_vxsnan(env
, GETPC());
1147 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1150 CPU_DoubleU farg1
, farg2
;
1156 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1157 float64_is_any_nan(farg2
.d
))) {
1159 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1161 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1167 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1168 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1169 env
->crf
[crfD
] = ret
;
1170 if (unlikely(ret
== 0x01UL
)) {
1171 float_invalid_op_vxvc(env
, 1, GETPC());
1172 if (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1173 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)) {
1174 /* sNaN comparison */
1175 float_invalid_op_vxsnan(env
, GETPC());
1180 /* Single-precision floating-point conversions */
1181 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1185 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1190 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1194 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1199 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1204 /* NaN are not treated the same way IEEE 754 does */
1205 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1209 return float32_to_int32(u
.f
, &env
->vec_status
);
1212 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1217 /* NaN are not treated the same way IEEE 754 does */
1218 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1222 return float32_to_uint32(u
.f
, &env
->vec_status
);
1225 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1230 /* NaN are not treated the same way IEEE 754 does */
1231 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1235 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1238 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1243 /* NaN are not treated the same way IEEE 754 does */
1244 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1248 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1251 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1256 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1257 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1258 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1263 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1268 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1269 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1270 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1275 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1281 /* NaN are not treated the same way IEEE 754 does */
1282 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1285 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1286 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1288 return float32_to_int32(u
.f
, &env
->vec_status
);
1291 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1297 /* NaN are not treated the same way IEEE 754 does */
1298 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1301 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1302 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1304 return float32_to_uint32(u
.f
, &env
->vec_status
);
1307 #define HELPER_SPE_SINGLE_CONV(name) \
1308 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1310 return e##name(env, val); \
1313 HELPER_SPE_SINGLE_CONV(fscfsi
);
1315 HELPER_SPE_SINGLE_CONV(fscfui
);
1317 HELPER_SPE_SINGLE_CONV(fscfuf
);
1319 HELPER_SPE_SINGLE_CONV(fscfsf
);
1321 HELPER_SPE_SINGLE_CONV(fsctsi
);
1323 HELPER_SPE_SINGLE_CONV(fsctui
);
1325 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1327 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1329 HELPER_SPE_SINGLE_CONV(fsctsf
);
1331 HELPER_SPE_SINGLE_CONV(fsctuf
);
1333 #define HELPER_SPE_VECTOR_CONV(name) \
1334 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1336 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1337 (uint64_t)e##name(env, val); \
1340 HELPER_SPE_VECTOR_CONV(fscfsi
);
1342 HELPER_SPE_VECTOR_CONV(fscfui
);
1344 HELPER_SPE_VECTOR_CONV(fscfuf
);
1346 HELPER_SPE_VECTOR_CONV(fscfsf
);
1348 HELPER_SPE_VECTOR_CONV(fsctsi
);
1350 HELPER_SPE_VECTOR_CONV(fsctui
);
1352 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1354 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1356 HELPER_SPE_VECTOR_CONV(fsctsf
);
1358 HELPER_SPE_VECTOR_CONV(fsctuf
);
1360 /* Single-precision floating-point arithmetic */
1361 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1367 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1371 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1377 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1381 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1387 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1391 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1397 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1401 #define HELPER_SPE_SINGLE_ARITH(name) \
1402 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1404 return e##name(env, op1, op2); \
1407 HELPER_SPE_SINGLE_ARITH(fsadd
);
1409 HELPER_SPE_SINGLE_ARITH(fssub
);
1411 HELPER_SPE_SINGLE_ARITH(fsmul
);
1413 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1415 #define HELPER_SPE_VECTOR_ARITH(name) \
1416 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1418 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1419 (uint64_t)e##name(env, op1, op2); \
1422 HELPER_SPE_VECTOR_ARITH(fsadd
);
1424 HELPER_SPE_VECTOR_ARITH(fssub
);
1426 HELPER_SPE_VECTOR_ARITH(fsmul
);
1428 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1430 /* Single-precision floating-point comparisons */
1431 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1437 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1440 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1446 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1449 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1455 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1458 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1460 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1461 return efscmplt(env
, op1
, op2
);
1464 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1466 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1467 return efscmpgt(env
, op1
, op2
);
1470 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1472 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1473 return efscmpeq(env
, op1
, op2
);
1476 #define HELPER_SINGLE_SPE_CMP(name) \
1477 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1479 return e##name(env, op1, op2); \
1482 HELPER_SINGLE_SPE_CMP(fststlt
);
1484 HELPER_SINGLE_SPE_CMP(fststgt
);
1486 HELPER_SINGLE_SPE_CMP(fststeq
);
1488 HELPER_SINGLE_SPE_CMP(fscmplt
);
1490 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1492 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1494 static inline uint32_t evcmp_merge(int t0
, int t1
)
1496 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1499 #define HELPER_VECTOR_SPE_CMP(name) \
1500 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1502 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1503 e##name(env, op1, op2)); \
1506 HELPER_VECTOR_SPE_CMP(fststlt
);
1508 HELPER_VECTOR_SPE_CMP(fststgt
);
1510 HELPER_VECTOR_SPE_CMP(fststeq
);
1512 HELPER_VECTOR_SPE_CMP(fscmplt
);
1514 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1516 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1518 /* Double-precision floating-point conversion */
1519 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1523 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1528 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1532 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1537 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1541 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1546 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1550 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1555 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1560 /* NaN are not treated the same way IEEE 754 does */
1561 if (unlikely(float64_is_any_nan(u
.d
))) {
1565 return float64_to_int32(u
.d
, &env
->vec_status
);
1568 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1573 /* NaN are not treated the same way IEEE 754 does */
1574 if (unlikely(float64_is_any_nan(u
.d
))) {
1578 return float64_to_uint32(u
.d
, &env
->vec_status
);
1581 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1586 /* NaN are not treated the same way IEEE 754 does */
1587 if (unlikely(float64_is_any_nan(u
.d
))) {
1591 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1594 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1599 /* NaN are not treated the same way IEEE 754 does */
1600 if (unlikely(float64_is_any_nan(u
.d
))) {
1604 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1607 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1612 /* NaN are not treated the same way IEEE 754 does */
1613 if (unlikely(float64_is_any_nan(u
.d
))) {
1617 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1620 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1625 /* NaN are not treated the same way IEEE 754 does */
1626 if (unlikely(float64_is_any_nan(u
.d
))) {
1630 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1633 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1638 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1639 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1640 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1645 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1650 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1651 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1652 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1657 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1663 /* NaN are not treated the same way IEEE 754 does */
1664 if (unlikely(float64_is_any_nan(u
.d
))) {
1667 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1668 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1670 return float64_to_int32(u
.d
, &env
->vec_status
);
1673 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1679 /* NaN are not treated the same way IEEE 754 does */
1680 if (unlikely(float64_is_any_nan(u
.d
))) {
1683 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1684 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1686 return float64_to_uint32(u
.d
, &env
->vec_status
);
1689 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1695 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1700 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1706 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1711 /* Double precision fixed-point arithmetic */
1712 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1718 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1722 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1728 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1732 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1738 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1742 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1748 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1752 /* Double precision floating point helpers */
1753 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1759 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1762 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1768 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1771 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1777 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1780 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1782 /* XXX: TODO: test special values (NaN, infinites, ...) */
1783 return helper_efdtstlt(env
, op1
, op2
);
1786 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1788 /* XXX: TODO: test special values (NaN, infinites, ...) */
1789 return helper_efdtstgt(env
, op1
, op2
);
1792 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1794 /* XXX: TODO: test special values (NaN, infinites, ...) */
1795 return helper_efdtsteq(env
, op1
, op2
);
1798 #define float64_to_float64(x, env) x
1802 * VSX_ADD_SUB - VSX floating point add/subract
1803 * name - instruction mnemonic
1804 * op - operation (add or sub)
1805 * nels - number of elements (1, 2 or 4)
1806 * tp - type (float32 or float64)
1807 * fld - vsr_t field (VsrD(*) or VsrW(*))
1810 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1811 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
1812 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1814 ppc_vsr_t t = *xt; \
1817 helper_reset_fpstatus(env); \
1819 for (i = 0; i < nels; i++) { \
1820 float_status tstat = env->fp_status; \
1821 set_float_exception_flags(0, &tstat); \
1822 t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
1823 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1825 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1826 float_invalid_op_addsub(env, sfprf, GETPC(), \
1827 tp##_classify(xa->fld) | \
1828 tp##_classify(xb->fld)); \
1832 t.fld = helper_frsp(env, t.fld); \
1836 helper_compute_fprf_float64(env, t.fld); \
1840 do_float_check_status(env, GETPC()); \
1843 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1844 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1845 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1846 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1847 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1848 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1849 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1850 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1852 void helper_xsaddqp(CPUPPCState
*env
, uint32_t opcode
,
1853 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1858 helper_reset_fpstatus(env
);
1860 tstat
= env
->fp_status
;
1861 if (unlikely(Rc(opcode
) != 0)) {
1862 tstat
.float_rounding_mode
= float_round_to_odd
;
1865 set_float_exception_flags(0, &tstat
);
1866 t
.f128
= float128_add(xa
->f128
, xb
->f128
, &tstat
);
1867 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1869 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1870 float_invalid_op_addsub(env
, 1, GETPC(),
1871 float128_classify(xa
->f128
) |
1872 float128_classify(xb
->f128
));
1875 helper_compute_fprf_float128(env
, t
.f128
);
1878 do_float_check_status(env
, GETPC());
1882 * VSX_MUL - VSX floating point multiply
1883 * op - instruction mnemonic
1884 * nels - number of elements (1, 2 or 4)
1885 * tp - type (float32 or float64)
1886 * fld - vsr_t field (VsrD(*) or VsrW(*))
1889 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1890 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1891 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1893 ppc_vsr_t t = *xt; \
1896 helper_reset_fpstatus(env); \
1898 for (i = 0; i < nels; i++) { \
1899 float_status tstat = env->fp_status; \
1900 set_float_exception_flags(0, &tstat); \
1901 t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
1902 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1904 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1905 float_invalid_op_mul(env, sfprf, GETPC(), \
1906 tp##_classify(xa->fld) | \
1907 tp##_classify(xb->fld)); \
1911 t.fld = helper_frsp(env, t.fld); \
1915 helper_compute_fprf_float64(env, t.fld); \
1920 do_float_check_status(env, GETPC()); \
1923 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1924 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1925 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1926 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1928 void helper_xsmulqp(CPUPPCState
*env
, uint32_t opcode
,
1929 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1934 helper_reset_fpstatus(env
);
1935 tstat
= env
->fp_status
;
1936 if (unlikely(Rc(opcode
) != 0)) {
1937 tstat
.float_rounding_mode
= float_round_to_odd
;
1940 set_float_exception_flags(0, &tstat
);
1941 t
.f128
= float128_mul(xa
->f128
, xb
->f128
, &tstat
);
1942 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1944 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1945 float_invalid_op_mul(env
, 1, GETPC(),
1946 float128_classify(xa
->f128
) |
1947 float128_classify(xb
->f128
));
1949 helper_compute_fprf_float128(env
, t
.f128
);
1952 do_float_check_status(env
, GETPC());
1956 * VSX_DIV - VSX floating point divide
1957 * op - instruction mnemonic
1958 * nels - number of elements (1, 2 or 4)
1959 * tp - type (float32 or float64)
1960 * fld - vsr_t field (VsrD(*) or VsrW(*))
1963 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1964 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1965 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1967 ppc_vsr_t t = *xt; \
1970 helper_reset_fpstatus(env); \
1972 for (i = 0; i < nels; i++) { \
1973 float_status tstat = env->fp_status; \
1974 set_float_exception_flags(0, &tstat); \
1975 t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
1976 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1978 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1979 float_invalid_op_div(env, sfprf, GETPC(), \
1980 tp##_classify(xa->fld) | \
1981 tp##_classify(xb->fld)); \
1983 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1984 float_zero_divide_excp(env, GETPC()); \
1988 t.fld = helper_frsp(env, t.fld); \
1992 helper_compute_fprf_float64(env, t.fld); \
1997 do_float_check_status(env, GETPC()); \
2000 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
2001 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
2002 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
2003 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
2005 void helper_xsdivqp(CPUPPCState
*env
, uint32_t opcode
,
2006 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2011 helper_reset_fpstatus(env
);
2012 tstat
= env
->fp_status
;
2013 if (unlikely(Rc(opcode
) != 0)) {
2014 tstat
.float_rounding_mode
= float_round_to_odd
;
2017 set_float_exception_flags(0, &tstat
);
2018 t
.f128
= float128_div(xa
->f128
, xb
->f128
, &tstat
);
2019 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2021 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
2022 float_invalid_op_div(env
, 1, GETPC(),
2023 float128_classify(xa
->f128
) |
2024 float128_classify(xb
->f128
));
2026 if (unlikely(tstat
.float_exception_flags
& float_flag_divbyzero
)) {
2027 float_zero_divide_excp(env
, GETPC());
2030 helper_compute_fprf_float128(env
, t
.f128
);
2032 do_float_check_status(env
, GETPC());
2036 * VSX_RE - VSX floating point reciprocal estimate
2037 * op - instruction mnemonic
2038 * nels - number of elements (1, 2 or 4)
2039 * tp - type (float32 or float64)
2040 * fld - vsr_t field (VsrD(*) or VsrW(*))
2043 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
2044 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2046 ppc_vsr_t t = *xt; \
2049 helper_reset_fpstatus(env); \
2051 for (i = 0; i < nels; i++) { \
2052 if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
2053 float_invalid_op_vxsnan(env, GETPC()); \
2055 t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
2058 t.fld = helper_frsp(env, t.fld); \
2062 helper_compute_fprf_float64(env, t.fld); \
2067 do_float_check_status(env, GETPC()); \
2070 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
2071 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
2072 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
2073 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
2076 * VSX_SQRT - VSX floating point square root
2077 * op - instruction mnemonic
2078 * nels - number of elements (1, 2 or 4)
2079 * tp - type (float32 or float64)
2080 * fld - vsr_t field (VsrD(*) or VsrW(*))
2083 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
2084 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2086 ppc_vsr_t t = *xt; \
2089 helper_reset_fpstatus(env); \
2091 for (i = 0; i < nels; i++) { \
2092 float_status tstat = env->fp_status; \
2093 set_float_exception_flags(0, &tstat); \
2094 t.fld = tp##_sqrt(xb->fld, &tstat); \
2095 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2097 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2098 if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
2099 float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
2100 } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
2101 float_invalid_op_vxsnan(env, GETPC()); \
2106 t.fld = helper_frsp(env, t.fld); \
2110 helper_compute_fprf_float64(env, t.fld); \
2115 do_float_check_status(env, GETPC()); \
2118 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
2119 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
2120 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
2121 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
2124 *VSX_RSQRTE - VSX floating point reciprocal square root estimate
2125 * op - instruction mnemonic
2126 * nels - number of elements (1, 2 or 4)
2127 * tp - type (float32 or float64)
2128 * fld - vsr_t field (VsrD(*) or VsrW(*))
2131 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2132 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2134 ppc_vsr_t t = *xt; \
2137 helper_reset_fpstatus(env); \
2139 for (i = 0; i < nels; i++) { \
2140 float_status tstat = env->fp_status; \
2141 set_float_exception_flags(0, &tstat); \
2142 t.fld = tp##_sqrt(xb->fld, &tstat); \
2143 t.fld = tp##_div(tp##_one, t.fld, &tstat); \
2144 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2146 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2147 if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
2148 float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
2149 } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
2150 float_invalid_op_vxsnan(env, GETPC()); \
2155 t.fld = helper_frsp(env, t.fld); \
2159 helper_compute_fprf_float64(env, t.fld); \
2164 do_float_check_status(env, GETPC()); \
2167 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
2168 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
2169 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
2170 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
2173 * VSX_TDIV - VSX floating point test for divide
2174 * op - instruction mnemonic
2175 * nels - number of elements (1, 2 or 4)
2176 * tp - type (float32 or float64)
2177 * fld - vsr_t field (VsrD(*) or VsrW(*))
2178 * emin - minimum unbiased exponent
2179 * emax - maximum unbiased exponent
2180 * nbits - number of fraction bits
2182 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2183 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2184 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2190 for (i = 0; i < nels; i++) { \
2191 if (unlikely(tp##_is_infinity(xa->fld) || \
2192 tp##_is_infinity(xb->fld) || \
2193 tp##_is_zero(xb->fld))) { \
2197 int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \
2198 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2200 if (unlikely(tp##_is_any_nan(xa->fld) || \
2201 tp##_is_any_nan(xb->fld))) { \
2203 } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
2205 } else if (!tp##_is_zero(xa->fld) && \
2206 (((e_a - e_b) >= emax) || \
2207 ((e_a - e_b) <= (emin + 1)) || \
2208 (e_a <= (emin + nbits)))) { \
2212 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2214 * XB is not zero because of the above check and so \
2215 * must be denormalized. \
2222 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2225 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2226 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2227 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2230 * VSX_TSQRT - VSX floating point test for square root
2231 * op - instruction mnemonic
2232 * nels - number of elements (1, 2 or 4)
2233 * tp - type (float32 or float64)
2234 * fld - vsr_t field (VsrD(*) or VsrW(*))
2235 * emin - minimum unbiased exponent
2236 * emax - maximum unbiased exponent
2237 * nbits - number of fraction bits
2239 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2240 void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
2246 for (i = 0; i < nels; i++) { \
2247 if (unlikely(tp##_is_infinity(xb->fld) || \
2248 tp##_is_zero(xb->fld))) { \
2252 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2254 if (unlikely(tp##_is_any_nan(xb->fld))) { \
2256 } else if (unlikely(tp##_is_zero(xb->fld))) { \
2258 } else if (unlikely(tp##_is_neg(xb->fld))) { \
2260 } else if (!tp##_is_zero(xb->fld) && \
2261 (e_b <= (emin + nbits))) { \
2265 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2267 * XB is not zero because of the above check and \
2268 * therefore must be denormalized. \
2275 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2278 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2279 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2280 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2283 * VSX_MADD - VSX floating point muliply/add variations
2284 * op - instruction mnemonic
2285 * nels - number of elements (1, 2 or 4)
2286 * tp - type (float32 or float64)
2287 * fld - vsr_t field (VsrD(*) or VsrW(*))
2288 * maddflgs - flags for the float*muladd routine that control the
2289 * various forms (madd, msub, nmadd, nmsub)
2292 #define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \
2293 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2294 ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \
2296 ppc_vsr_t t = *xt; \
2299 helper_reset_fpstatus(env); \
2301 for (i = 0; i < nels; i++) { \
2302 float_status tstat = env->fp_status; \
2303 set_float_exception_flags(0, &tstat); \
2304 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2306 * Avoid double rounding errors by rounding the intermediate \
2309 set_float_rounding_mode(float_round_to_zero, &tstat); \
2310 t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
2311 maddflgs, &tstat); \
2312 t.fld |= (get_float_exception_flags(&tstat) & \
2313 float_flag_inexact) != 0; \
2315 t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
2316 maddflgs, &tstat); \
2318 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2320 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2321 tp##_maddsub_update_excp(env, xa->fld, b->fld, \
2322 c->fld, maddflgs, GETPC()); \
2326 t.fld = helper_frsp(env, t.fld); \
2330 helper_compute_fprf_float64(env, t.fld); \
2334 do_float_check_status(env, GETPC()); \
2337 VSX_MADD(xsmadddp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 0)
2338 VSX_MADD(xsmsubdp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 0)
2339 VSX_MADD(xsnmadddp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 0)
2340 VSX_MADD(xsnmsubdp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 0)
2341 VSX_MADD(xsmaddsp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1)
2342 VSX_MADD(xsmsubsp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1)
2343 VSX_MADD(xsnmaddsp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1)
2344 VSX_MADD(xsnmsubsp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1)
2346 VSX_MADD(xvmadddp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0, 0)
2347 VSX_MADD(xvmsubdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0, 0)
2348 VSX_MADD(xvnmadddp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0, 0)
2349 VSX_MADD(xvnmsubdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0, 0)
2351 VSX_MADD(xvmaddsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0, 0)
2352 VSX_MADD(xvmsubsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0, 0)
2353 VSX_MADD(xvnmaddsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0, 0)
2354 VSX_MADD(xvnmsubsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0, 0)
2357 * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2358 * op - instruction mnemonic
2359 * cmp - comparison operation
2360 * exp - expected result of comparison
2361 * svxvc - set VXVC bit
2363 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2364 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2365 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2367 ppc_vsr_t t = *xt; \
2368 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2370 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
2371 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2372 vxsnan_flag = true; \
2373 if (fpscr_ve == 0 && svxvc) { \
2376 } else if (svxvc) { \
2377 vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
2378 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \
2380 if (vxsnan_flag) { \
2381 float_invalid_op_vxsnan(env, GETPC()); \
2384 float_invalid_op_vxvc(env, 0, GETPC()); \
2386 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2389 if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \
2390 &env->fp_status) == exp) { \
2399 do_float_check_status(env, GETPC()); \
2402 VSX_SCALAR_CMP_DP(xscmpeqdp
, eq
, 1, 0)
2403 VSX_SCALAR_CMP_DP(xscmpgedp
, le
, 1, 1)
2404 VSX_SCALAR_CMP_DP(xscmpgtdp
, lt
, 1, 1)
2405 VSX_SCALAR_CMP_DP(xscmpnedp
, eq
, 0, 0)
2407 void helper_xscmpexpdp(CPUPPCState
*env
, uint32_t opcode
,
2408 ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2410 int64_t exp_a
, exp_b
;
2413 exp_a
= extract64(xa
->VsrD(0), 52, 11);
2414 exp_b
= extract64(xb
->VsrD(0), 52, 11);
2416 if (unlikely(float64_is_any_nan(xa
->VsrD(0)) ||
2417 float64_is_any_nan(xb
->VsrD(0)))) {
2420 if (exp_a
< exp_b
) {
2422 } else if (exp_a
> exp_b
) {
2429 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2430 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2431 env
->crf
[BF(opcode
)] = cc
;
2433 do_float_check_status(env
, GETPC());
2436 void helper_xscmpexpqp(CPUPPCState
*env
, uint32_t opcode
,
2437 ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2439 int64_t exp_a
, exp_b
;
2442 exp_a
= extract64(xa
->VsrD(0), 48, 15);
2443 exp_b
= extract64(xb
->VsrD(0), 48, 15);
2445 if (unlikely(float128_is_any_nan(xa
->f128
) ||
2446 float128_is_any_nan(xb
->f128
))) {
2449 if (exp_a
< exp_b
) {
2451 } else if (exp_a
> exp_b
) {
2458 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
2459 env
->fpscr
|= cc
<< FPSCR_FPRF
;
2460 env
->crf
[BF(opcode
)] = cc
;
2462 do_float_check_status(env
, GETPC());
2465 #define VSX_SCALAR_CMP(op, ordered) \
2466 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2467 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2470 bool vxsnan_flag = false, vxvc_flag = false; \
2472 helper_reset_fpstatus(env); \
2474 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
2475 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2476 vxsnan_flag = true; \
2478 if (fpscr_ve == 0 && ordered) { \
2481 } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
2482 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) { \
2488 if (vxsnan_flag) { \
2489 float_invalid_op_vxsnan(env, GETPC()); \
2492 float_invalid_op_vxvc(env, 0, GETPC()); \
2495 if (float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \
2497 } else if (!float64_le(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \
2503 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2504 env->fpscr |= cc << FPSCR_FPRF; \
2505 env->crf[BF(opcode)] = cc; \
2507 do_float_check_status(env, GETPC()); \
2510 VSX_SCALAR_CMP(xscmpodp
, 1)
2511 VSX_SCALAR_CMP(xscmpudp
, 0)
2513 #define VSX_SCALAR_CMPQ(op, ordered) \
2514 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2515 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2518 bool vxsnan_flag = false, vxvc_flag = false; \
2520 helper_reset_fpstatus(env); \
2522 if (float128_is_signaling_nan(xa->f128, &env->fp_status) || \
2523 float128_is_signaling_nan(xb->f128, &env->fp_status)) { \
2524 vxsnan_flag = true; \
2526 if (fpscr_ve == 0 && ordered) { \
2529 } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) || \
2530 float128_is_quiet_nan(xb->f128, &env->fp_status)) { \
2536 if (vxsnan_flag) { \
2537 float_invalid_op_vxsnan(env, GETPC()); \
2540 float_invalid_op_vxvc(env, 0, GETPC()); \
2543 if (float128_lt(xa->f128, xb->f128, &env->fp_status)) { \
2545 } else if (!float128_le(xa->f128, xb->f128, &env->fp_status)) { \
2551 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2552 env->fpscr |= cc << FPSCR_FPRF; \
2553 env->crf[BF(opcode)] = cc; \
2555 do_float_check_status(env, GETPC()); \
2558 VSX_SCALAR_CMPQ(xscmpoqp
, 1)
2559 VSX_SCALAR_CMPQ(xscmpuqp
, 0)
2562 * VSX_MAX_MIN - VSX floating point maximum/minimum
2563 * name - instruction mnemonic
2564 * op - operation (max or min)
2565 * nels - number of elements (1, 2 or 4)
2566 * tp - type (float32 or float64)
2567 * fld - vsr_t field (VsrD(*) or VsrW(*))
2569 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2570 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
2571 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2573 ppc_vsr_t t = *xt; \
2576 for (i = 0; i < nels; i++) { \
2577 t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
2578 if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2579 tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
2580 float_invalid_op_vxsnan(env, GETPC()); \
2585 do_float_check_status(env, GETPC()); \
2588 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2589 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2590 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2591 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2592 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2593 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2595 #define VSX_MAX_MINC(name, max) \
2596 void helper_##name(CPUPPCState *env, uint32_t opcode, \
2597 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2599 ppc_vsr_t t = *xt; \
2600 bool vxsnan_flag = false, vex_flag = false; \
2602 if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \
2603 float64_is_any_nan(xb->VsrD(0)))) { \
2604 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
2605 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2606 vxsnan_flag = true; \
2608 t.VsrD(0) = xb->VsrD(0); \
2609 } else if ((max && \
2610 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2612 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2613 t.VsrD(0) = xa->VsrD(0); \
2615 t.VsrD(0) = xb->VsrD(0); \
2618 vex_flag = fpscr_ve & vxsnan_flag; \
2619 if (vxsnan_flag) { \
2620 float_invalid_op_vxsnan(env, GETPC()); \
2627 VSX_MAX_MINC(xsmaxcdp, 1);
2628 VSX_MAX_MINC(xsmincdp
, 0);
2630 #define VSX_MAX_MINJ(name, max) \
2631 void helper_##name(CPUPPCState *env, uint32_t opcode, \
2632 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2634 ppc_vsr_t t = *xt; \
2635 bool vxsnan_flag = false, vex_flag = false; \
2637 if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
2638 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
2639 vxsnan_flag = true; \
2641 t.VsrD(0) = xa->VsrD(0); \
2642 } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
2643 if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2644 vxsnan_flag = true; \
2646 t.VsrD(0) = xb->VsrD(0); \
2647 } else if (float64_is_zero(xa->VsrD(0)) && \
2648 float64_is_zero(xb->VsrD(0))) { \
2650 if (!float64_is_neg(xa->VsrD(0)) || \
2651 !float64_is_neg(xb->VsrD(0))) { \
2654 t.VsrD(0) = 0x8000000000000000ULL; \
2657 if (float64_is_neg(xa->VsrD(0)) || \
2658 float64_is_neg(xb->VsrD(0))) { \
2659 t.VsrD(0) = 0x8000000000000000ULL; \
2664 } else if ((max && \
2665 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2667 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2668 t.VsrD(0) = xa->VsrD(0); \
2670 t.VsrD(0) = xb->VsrD(0); \
2673 vex_flag = fpscr_ve & vxsnan_flag; \
2674 if (vxsnan_flag) { \
2675 float_invalid_op_vxsnan(env, GETPC()); \
2682 VSX_MAX_MINJ(xsmaxjdp, 1);
2683 VSX_MAX_MINJ(xsminjdp
, 0);
2686 * VSX_CMP - VSX floating point compare
2687 * op - instruction mnemonic
2688 * nels - number of elements (1, 2 or 4)
2689 * tp - type (float32 or float64)
2690 * fld - vsr_t field (VsrD(*) or VsrW(*))
2691 * cmp - comparison operation
2692 * svxvc - set VXVC bit
2693 * exp - expected result of comparison
2695 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2696 uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2697 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2699 ppc_vsr_t t = *xt; \
2700 uint32_t crf6 = 0; \
2703 int all_false = 1; \
2705 for (i = 0; i < nels; i++) { \
2706 if (unlikely(tp##_is_any_nan(xa->fld) || \
2707 tp##_is_any_nan(xb->fld))) { \
2708 if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2709 tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
2710 float_invalid_op_vxsnan(env, GETPC()); \
2713 float_invalid_op_vxvc(env, 0, GETPC()); \
2718 if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
2729 crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2733 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0, 1)
2734 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1, 1)
2735 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1, 1)
2736 VSX_CMP(xvcmpnedp
, 2, float64
, VsrD(i
), eq
, 0, 0)
2737 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0, 1)
2738 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1, 1)
2739 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1, 1)
2740 VSX_CMP(xvcmpnesp
, 4, float32
, VsrW(i
), eq
, 0, 0)
2743 * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2744 * op - instruction mnemonic
2745 * nels - number of elements (1, 2 or 4)
2746 * stp - source type (float32 or float64)
2747 * ttp - target type (float32 or float64)
2748 * sfld - source vsr_t field
2749 * tfld - target vsr_t field (f32 or f64)
2752 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2753 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2755 ppc_vsr_t t = *xt; \
2758 for (i = 0; i < nels; i++) { \
2759 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2760 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2761 &env->fp_status))) { \
2762 float_invalid_op_vxsnan(env, GETPC()); \
2763 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2766 helper_compute_fprf_##ttp(env, t.tfld); \
2771 do_float_check_status(env, GETPC()); \
2774 VSX_CVT_FP_TO_FP(xscvdpsp
, 1, float64
, float32
, VsrD(0), VsrW(0), 1)
2775 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2776 VSX_CVT_FP_TO_FP(xvcvdpsp
, 2, float64
, float32
, VsrD(i
), VsrW(2 * i
), 0)
2777 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2 * i
), VsrD(i
), 0)
2780 * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2781 * op - instruction mnemonic
2782 * nels - number of elements (1, 2 or 4)
2783 * stp - source type (float32 or float64)
2784 * ttp - target type (float32 or float64)
2785 * sfld - source vsr_t field
2786 * tfld - target vsr_t field (f32 or f64)
2789 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2790 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2791 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2793 ppc_vsr_t t = *xt; \
2796 for (i = 0; i < nels; i++) { \
2797 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2798 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2799 &env->fp_status))) { \
2800 float_invalid_op_vxsnan(env, GETPC()); \
2801 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2804 helper_compute_fprf_##ttp(env, t.tfld); \
2809 do_float_check_status(env, GETPC()); \
2812 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp
, 1, float64
, float128
, VsrD(0), f128
, 1)
2815 * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2816 * involving one half precision value
2817 * op - instruction mnemonic
2818 * nels - number of elements (1, 2 or 4)
2821 * sfld - source vsr_t field
2822 * tfld - target vsr_t field
2825 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2826 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2828 ppc_vsr_t t = { }; \
2831 for (i = 0; i < nels; i++) { \
2832 t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
2833 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2834 &env->fp_status))) { \
2835 float_invalid_op_vxsnan(env, GETPC()); \
2836 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2839 helper_compute_fprf_##ttp(env, t.tfld); \
2844 do_float_check_status(env, GETPC()); \
2847 VSX_CVT_FP_TO_FP_HP(xscvdphp
, 1, float64
, float16
, VsrD(0), VsrH(3), 1)
2848 VSX_CVT_FP_TO_FP_HP(xscvhpdp
, 1, float16
, float64
, VsrH(3), VsrD(0), 1)
2849 VSX_CVT_FP_TO_FP_HP(xvcvsphp
, 4, float32
, float16
, VsrW(i
), VsrH(2 * i
+ 1), 0)
2850 VSX_CVT_FP_TO_FP_HP(xvcvhpsp
, 4, float16
, float32
, VsrH(2 * i
+ 1), VsrW(i
), 0)
2853 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2854 * added to this later.
2856 void helper_xscvqpdp(CPUPPCState
*env
, uint32_t opcode
,
2857 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
2862 tstat
= env
->fp_status
;
2863 if (unlikely(Rc(opcode
) != 0)) {
2864 tstat
.float_rounding_mode
= float_round_to_odd
;
2867 t
.VsrD(0) = float128_to_float64(xb
->f128
, &tstat
);
2868 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2869 if (unlikely(float128_is_signaling_nan(xb
->f128
, &tstat
))) {
2870 float_invalid_op_vxsnan(env
, GETPC());
2871 t
.VsrD(0) = float64_snan_to_qnan(t
.VsrD(0));
2873 helper_compute_fprf_float64(env
, t
.VsrD(0));
2876 do_float_check_status(env
, GETPC());
2879 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2883 float_status tstat
= env
->fp_status
;
2884 set_float_exception_flags(0, &tstat
);
2886 result
= (uint64_t)float64_to_float32(xb
, &tstat
);
2887 /* hardware replicates result to both words of the doubleword result. */
2888 return (result
<< 32) | result
;
2891 uint64_t helper_xscvspdpn(CPUPPCState
*env
, uint64_t xb
)
2893 float_status tstat
= env
->fp_status
;
2894 set_float_exception_flags(0, &tstat
);
2896 return float32_to_float64(xb
>> 32, &tstat
);
2900 * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2901 * op - instruction mnemonic
2902 * nels - number of elements (1, 2 or 4)
2903 * stp - source type (float32 or float64)
2904 * ttp - target type (int32, uint32, int64 or uint64)
2905 * sfld - source vsr_t field
2906 * tfld - target vsr_t field
2907 * rnan - resulting NaN
2909 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2910 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2912 int all_flags = env->fp_status.float_exception_flags, flags; \
2913 ppc_vsr_t t = *xt; \
2916 for (i = 0; i < nels; i++) { \
2917 env->fp_status.float_exception_flags = 0; \
2918 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2919 flags = env->fp_status.float_exception_flags; \
2920 if (unlikely(flags & float_flag_invalid)) { \
2921 float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
2924 all_flags |= flags; \
2928 env->fp_status.float_exception_flags = all_flags; \
2929 do_float_check_status(env, GETPC()); \
2932 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), \
2933 0x8000000000000000ULL
)
2934 VSX_CVT_FP_TO_INT(xscvdpsxws
, 1, float64
, int32
, VsrD(0), VsrW(1), \
2936 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), 0ULL)
2937 VSX_CVT_FP_TO_INT(xscvdpuxws
, 1, float64
, uint32
, VsrD(0), VsrW(1), 0U)
2938 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), \
2939 0x8000000000000000ULL
)
2940 VSX_CVT_FP_TO_INT(xvcvdpsxws
, 2, float64
, int32
, VsrD(i
), VsrW(2 * i
), \
2942 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), 0ULL)
2943 VSX_CVT_FP_TO_INT(xvcvdpuxws
, 2, float64
, uint32
, VsrD(i
), VsrW(2 * i
), 0U)
2944 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2 * i
), VsrD(i
), \
2945 0x8000000000000000ULL
)
2946 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), 0x80000000U
)
2947 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2 * i
), VsrD(i
), 0ULL)
2948 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), 0U)
2951 * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2952 * op - instruction mnemonic
2953 * stp - source type (float32 or float64)
2954 * ttp - target type (int32, uint32, int64 or uint64)
2955 * sfld - source vsr_t field
2956 * tfld - target vsr_t field
2957 * rnan - resulting NaN
2959 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
2960 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2961 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2963 ppc_vsr_t t = { }; \
2965 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2966 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2967 float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
2972 do_float_check_status(env, GETPC()); \
2975 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz
, float128
, int64
, f128
, VsrD(0), \
2976 0x8000000000000000ULL
)
2978 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz
, float128
, int32
, f128
, VsrD(0), \
2979 0xffffffff80000000ULL
)
2980 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz
, float128
, uint64
, f128
, VsrD(0), 0x0ULL
)
2981 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz
, float128
, uint32
, f128
, VsrD(0), 0x0ULL
)
2984 * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2985 * op - instruction mnemonic
2986 * nels - number of elements (1, 2 or 4)
2987 * stp - source type (int32, uint32, int64 or uint64)
2988 * ttp - target type (float32 or float64)
2989 * sfld - source vsr_t field
2990 * tfld - target vsr_t field
2991 * jdef - definition of the j index (i or 2*i)
2994 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
2995 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2997 ppc_vsr_t t = *xt; \
3000 for (i = 0; i < nels; i++) { \
3001 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3003 t.tfld = helper_frsp(env, t.tfld); \
3006 helper_compute_fprf_float64(env, t.tfld); \
3011 do_float_check_status(env, GETPC()); \
3014 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
3015 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
3016 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
3017 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
3018 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3019 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3020 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
3021 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
3022 VSX_CVT_INT_TO_FP(xvcvsxdsp
, 2, int64
, float32
, VsrD(i
), VsrW(2 * i
), 0, 0)
3023 VSX_CVT_INT_TO_FP(xvcvuxdsp
, 2, uint64
, float32
, VsrD(i
), VsrW(2 * i
), 0, 0)
3024 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3025 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3028 * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3029 * op - instruction mnemonic
3030 * stp - source type (int32, uint32, int64 or uint64)
3031 * ttp - target type (float32 or float64)
3032 * sfld - source vsr_t field
3033 * tfld - target vsr_t field
3035 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3036 void helper_##op(CPUPPCState *env, uint32_t opcode, \
3037 ppc_vsr_t *xt, ppc_vsr_t *xb) \
3039 ppc_vsr_t t = *xt; \
3041 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3042 helper_compute_fprf_##ttp(env, t.tfld); \
3045 do_float_check_status(env, GETPC()); \
3048 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp
, int64
, float128
, VsrD(0), f128
)
3049 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp
, uint64
, float128
, VsrD(0), f128
)
3052 * For "use current rounding mode", define a value that will not be
3053 * one of the existing rounding model enums.
3055 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3056 float_round_up + float_round_to_zero)
3059 * VSX_ROUND - VSX floating point round
3060 * op - instruction mnemonic
3061 * nels - number of elements (1, 2 or 4)
3062 * tp - type (float32 or float64)
3063 * fld - vsr_t field (VsrD(*) or VsrW(*))
3064 * rmode - rounding mode
3067 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
3068 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3070 ppc_vsr_t t = *xt; \
3073 if (rmode != FLOAT_ROUND_CURRENT) { \
3074 set_float_rounding_mode(rmode, &env->fp_status); \
3077 for (i = 0; i < nels; i++) { \
3078 if (unlikely(tp##_is_signaling_nan(xb->fld, \
3079 &env->fp_status))) { \
3080 float_invalid_op_vxsnan(env, GETPC()); \
3081 t.fld = tp##_snan_to_qnan(xb->fld); \
3083 t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
3086 helper_compute_fprf_float64(env, t.fld); \
3091 * If this is not a "use current rounding mode" instruction, \
3092 * then inhibit setting of the XX bit and restore rounding \
3095 if (rmode != FLOAT_ROUND_CURRENT) { \
3096 fpscr_set_rounding_mode(env); \
3097 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3101 do_float_check_status(env, GETPC()); \
3104 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_ties_away
, 1)
3105 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
3106 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
3107 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
3108 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
3110 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_ties_away
, 0)
3111 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
3112 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
3113 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
3114 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
3116 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_ties_away
, 0)
3117 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
3118 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
3119 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
3120 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
3122 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
3124 helper_reset_fpstatus(env
);
3126 uint64_t xt
= helper_frsp(env
, xb
);
3128 helper_compute_fprf_float64(env
, xt
);
3129 do_float_check_status(env
, GETPC());
3133 #define VSX_XXPERM(op, indexed) \
3134 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
3135 ppc_vsr_t *xa, ppc_vsr_t *pcv) \
3137 ppc_vsr_t t = *xt; \
3140 for (i = 0; i < 16; i++) { \
3141 idx = pcv->VsrB(i) & 0x1F; \
3145 t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \
3146 : xt->VsrB(idx - 16); \
3151 VSX_XXPERM(xxperm
, 0)
3152 VSX_XXPERM(xxpermr
, 1)
3154 void helper_xvxsigsp(CPUPPCState
*env
, ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3157 uint32_t exp
, i
, fraction
;
3159 for (i
= 0; i
< 4; i
++) {
3160 exp
= (xb
->VsrW(i
) >> 23) & 0xFF;
3161 fraction
= xb
->VsrW(i
) & 0x7FFFFF;
3162 if (exp
!= 0 && exp
!= 255) {
3163 t
.VsrW(i
) = fraction
| 0x00800000;
3165 t
.VsrW(i
) = fraction
;
3172 * VSX_TEST_DC - VSX floating point test data class
3173 * op - instruction mnemonic
3174 * nels - number of elements (1, 2 or 4)
3175 * xbn - VSR register number
3176 * tp - type (float32 or float64)
3177 * fld - vsr_t field (VsrD(*) or VsrW(*))
3178 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3179 * fld_max - target field max
3180 * scrf - set result in CR and FPCC
3182 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3183 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3185 ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \
3186 ppc_vsr_t *xb = &env->vsr[xbn]; \
3187 ppc_vsr_t t = { }; \
3188 uint32_t i, sign, dcmx; \
3189 uint32_t cc, match = 0; \
3192 dcmx = DCMX_XV(opcode); \
3195 dcmx = DCMX(opcode); \
3198 for (i = 0; i < nels; i++) { \
3199 sign = tp##_is_neg(xb->fld); \
3200 if (tp##_is_any_nan(xb->fld)) { \
3201 match = extract32(dcmx, 6, 1); \
3202 } else if (tp##_is_infinity(xb->fld)) { \
3203 match = extract32(dcmx, 4 + !sign, 1); \
3204 } else if (tp##_is_zero(xb->fld)) { \
3205 match = extract32(dcmx, 2 + !sign, 1); \
3206 } else if (tp##_is_zero_or_denormal(xb->fld)) { \
3207 match = extract32(dcmx, 0 + !sign, 1); \
3211 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3212 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
3213 env->fpscr |= cc << FPSCR_FPRF; \
3214 env->crf[BF(opcode)] = cc; \
3216 t.tfld = match ? fld_max : 0; \
3225 VSX_TEST_DC(xvtstdcdp
, 2, xB(opcode
), float64
, VsrD(i
), VsrD(i
), UINT64_MAX
, 0)
3226 VSX_TEST_DC(xvtstdcsp
, 4, xB(opcode
), float32
, VsrW(i
), VsrW(i
), UINT32_MAX
, 0)
3227 VSX_TEST_DC(xststdcdp
, 1, xB(opcode
), float64
, VsrD(0), VsrD(0), 0, 1)
3228 VSX_TEST_DC(xststdcqp
, 1, (rB(opcode
) + 32), float128
, f128
, VsrD(0), 0, 1)
3230 void helper_xststdcsp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xb
)
3232 uint32_t dcmx
, sign
, exp
;
3233 uint32_t cc
, match
= 0, not_sp
= 0;
3235 dcmx
= DCMX(opcode
);
3236 exp
= (xb
->VsrD(0) >> 52) & 0x7FF;
3238 sign
= float64_is_neg(xb
->VsrD(0));
3239 if (float64_is_any_nan(xb
->VsrD(0))) {
3240 match
= extract32(dcmx
, 6, 1);
3241 } else if (float64_is_infinity(xb
->VsrD(0))) {
3242 match
= extract32(dcmx
, 4 + !sign
, 1);
3243 } else if (float64_is_zero(xb
->VsrD(0))) {
3244 match
= extract32(dcmx
, 2 + !sign
, 1);
3245 } else if (float64_is_zero_or_denormal(xb
->VsrD(0)) ||
3246 (exp
> 0 && exp
< 0x381)) {
3247 match
= extract32(dcmx
, 0 + !sign
, 1);
3250 not_sp
= !float64_eq(xb
->VsrD(0),
3252 float64_to_float32(xb
->VsrD(0), &env
->fp_status
),
3253 &env
->fp_status
), &env
->fp_status
);
3255 cc
= sign
<< CRF_LT_BIT
| match
<< CRF_EQ_BIT
| not_sp
<< CRF_SO_BIT
;
3256 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
3257 env
->fpscr
|= cc
<< FPSCR_FPRF
;
3258 env
->crf
[BF(opcode
)] = cc
;
3261 void helper_xsrqpi(CPUPPCState
*env
, uint32_t opcode
,
3262 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3265 uint8_t r
= Rrm(opcode
);
3266 uint8_t ex
= Rc(opcode
);
3267 uint8_t rmc
= RMC(opcode
);
3271 helper_reset_fpstatus(env
);
3273 if (r
== 0 && rmc
== 0) {
3274 rmode
= float_round_ties_away
;
3275 } else if (r
== 0 && rmc
== 0x3) {
3277 } else if (r
== 1) {
3280 rmode
= float_round_nearest_even
;
3283 rmode
= float_round_to_zero
;
3286 rmode
= float_round_up
;
3289 rmode
= float_round_down
;
3296 tstat
= env
->fp_status
;
3297 set_float_exception_flags(0, &tstat
);
3298 set_float_rounding_mode(rmode
, &tstat
);
3299 t
.f128
= float128_round_to_int(xb
->f128
, &tstat
);
3300 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3302 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3303 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3304 float_invalid_op_vxsnan(env
, GETPC());
3305 t
.f128
= float128_snan_to_qnan(t
.f128
);
3309 if (ex
== 0 && (tstat
.float_exception_flags
& float_flag_inexact
)) {
3310 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
3313 helper_compute_fprf_float128(env
, t
.f128
);
3314 do_float_check_status(env
, GETPC());
3318 void helper_xsrqpxp(CPUPPCState
*env
, uint32_t opcode
,
3319 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3322 uint8_t r
= Rrm(opcode
);
3323 uint8_t rmc
= RMC(opcode
);
3328 helper_reset_fpstatus(env
);
3330 if (r
== 0 && rmc
== 0) {
3331 rmode
= float_round_ties_away
;
3332 } else if (r
== 0 && rmc
== 0x3) {
3334 } else if (r
== 1) {
3337 rmode
= float_round_nearest_even
;
3340 rmode
= float_round_to_zero
;
3343 rmode
= float_round_up
;
3346 rmode
= float_round_down
;
3353 tstat
= env
->fp_status
;
3354 set_float_exception_flags(0, &tstat
);
3355 set_float_rounding_mode(rmode
, &tstat
);
3356 round_res
= float128_to_floatx80(xb
->f128
, &tstat
);
3357 t
.f128
= floatx80_to_float128(round_res
, &tstat
);
3358 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3360 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3361 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3362 float_invalid_op_vxsnan(env
, GETPC());
3363 t
.f128
= float128_snan_to_qnan(t
.f128
);
3367 helper_compute_fprf_float128(env
, t
.f128
);
3369 do_float_check_status(env
, GETPC());
3372 void helper_xssqrtqp(CPUPPCState
*env
, uint32_t opcode
,
3373 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3378 helper_reset_fpstatus(env
);
3380 tstat
= env
->fp_status
;
3381 if (unlikely(Rc(opcode
) != 0)) {
3382 tstat
.float_rounding_mode
= float_round_to_odd
;
3385 set_float_exception_flags(0, &tstat
);
3386 t
.f128
= float128_sqrt(xb
->f128
, &tstat
);
3387 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3389 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3390 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3391 float_invalid_op_vxsnan(env
, GETPC());
3392 t
.f128
= float128_snan_to_qnan(xb
->f128
);
3393 } else if (float128_is_quiet_nan(xb
->f128
, &tstat
)) {
3395 } else if (float128_is_neg(xb
->f128
) && !float128_is_zero(xb
->f128
)) {
3396 float_invalid_op_vxsqrt(env
, 1, GETPC());
3397 t
.f128
= float128_default_nan(&env
->fp_status
);
3401 helper_compute_fprf_float128(env
, t
.f128
);
3403 do_float_check_status(env
, GETPC());
3406 void helper_xssubqp(CPUPPCState
*env
, uint32_t opcode
,
3407 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
3412 helper_reset_fpstatus(env
);
3414 tstat
= env
->fp_status
;
3415 if (unlikely(Rc(opcode
) != 0)) {
3416 tstat
.float_rounding_mode
= float_round_to_odd
;
3419 set_float_exception_flags(0, &tstat
);
3420 t
.f128
= float128_sub(xa
->f128
, xb
->f128
, &tstat
);
3421 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3423 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3424 float_invalid_op_addsub(env
, 1, GETPC(),
3425 float128_classify(xa
->f128
) |
3426 float128_classify(xb
->f128
));
3429 helper_compute_fprf_float128(env
, t
.f128
);
3431 do_float_check_status(env
, GETPC());