2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
26 static inline float128
float128_snan_to_qnan(float128 x
)
30 r
.high
= x
.high
| 0x0000800000000000;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 static inline bool fp_exceptions_enabled(CPUPPCState
*env
)
41 #ifdef CONFIG_USER_ONLY
44 return (env
->msr
& ((1U << MSR_FE0
) | (1U << MSR_FE1
))) != 0;
48 /*****************************************************************************/
49 /* Floating point operations helpers */
52 * This is the non-arithmatic conversion that happens e.g. on loads.
53 * In the Power ISA pseudocode, this is called DOUBLE.
55 uint64_t helper_todouble(uint32_t arg
)
57 uint32_t abs_arg
= arg
& 0x7fffffff;
60 if (likely(abs_arg
>= 0x00800000)) {
61 if (unlikely(extract32(arg
, 23, 8) == 0xff)) {
63 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
64 ret
|= (uint64_t)0x7ff << 52;
65 ret
|= (uint64_t)extract32(arg
, 0, 23) << 29;
67 /* Normalized operand. */
68 ret
= (uint64_t)extract32(arg
, 30, 2) << 62;
69 ret
|= ((extract32(arg
, 30, 1) ^ 1) * (uint64_t)7) << 59;
70 ret
|= (uint64_t)extract32(arg
, 0, 30) << 29;
73 /* Zero or Denormalized operand. */
74 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
75 if (unlikely(abs_arg
!= 0)) {
77 * Denormalized operand.
78 * Shift fraction so that the msb is in the implicit bit position.
79 * Thus, shift is in the range [1:23].
81 int shift
= clz32(abs_arg
) - 8;
83 * The first 3 terms compute the float64 exponent. We then bias
84 * this result by -1 so that we can swallow the implicit bit below.
86 int exp
= -126 - shift
+ 1023 - 1;
88 ret
|= (uint64_t)exp
<< 52;
89 ret
+= (uint64_t)abs_arg
<< (52 - 23 + shift
);
96 * This is the non-arithmatic conversion that happens e.g. on stores.
97 * In the Power ISA pseudocode, this is called SINGLE.
99 uint32_t helper_tosingle(uint64_t arg
)
101 int exp
= extract64(arg
, 52, 11);
104 if (likely(exp
> 896)) {
105 /* No denormalization required (includes Inf, NaN). */
106 ret
= extract64(arg
, 62, 2) << 30;
107 ret
|= extract64(arg
, 29, 30);
110 * Zero or Denormal result. If the exponent is in bounds for
111 * a single-precision denormal result, extract the proper
112 * bits. If the input is not zero, and the exponent is out of
113 * bounds, then the result is undefined; this underflows to
116 ret
= extract64(arg
, 63, 1) << 31;
117 if (unlikely(exp
>= 874)) {
118 /* Denormal result. */
119 ret
|= ((1ULL << 52) | extract64(arg
, 0, 52)) >> (896 + 30 - exp
);
125 static inline int ppc_float32_get_unbiased_exp(float32 f
)
127 return ((f
>> 23) & 0xFF) - 127;
130 static inline int ppc_float64_get_unbiased_exp(float64 f
)
132 return ((f
>> 52) & 0x7FF) - 1023;
135 /* Classify a floating-point number. */
146 #define COMPUTE_CLASS(tp) \
147 static int tp##_classify(tp arg) \
149 int ret = tp##_is_neg(arg) * is_neg; \
150 if (unlikely(tp##_is_any_nan(arg))) { \
151 float_status dummy = { }; /* snan_bit_is_one = 0 */ \
152 ret |= (tp##_is_signaling_nan(arg, &dummy) \
153 ? is_snan : is_qnan); \
154 } else if (unlikely(tp##_is_infinity(arg))) { \
156 } else if (tp##_is_zero(arg)) { \
158 } else if (tp##_is_zero_or_denormal(arg)) { \
159 ret |= is_denormal; \
166 COMPUTE_CLASS(float16
)
167 COMPUTE_CLASS(float32
)
168 COMPUTE_CLASS(float64
)
169 COMPUTE_CLASS(float128
)
171 static void set_fprf_from_class(CPUPPCState
*env
, int class)
173 static const uint8_t fprf
[6][2] = {
174 { 0x04, 0x08 }, /* normalized */
175 { 0x02, 0x12 }, /* zero */
176 { 0x14, 0x18 }, /* denormalized */
177 { 0x05, 0x09 }, /* infinity */
178 { 0x11, 0x11 }, /* qnan */
179 { 0x00, 0x00 }, /* snan -- flags are undefined */
181 bool isneg
= class & is_neg
;
183 env
->fpscr
&= ~FP_FPRF
;
184 env
->fpscr
|= fprf
[ctz32(class)][isneg
] << FPSCR_FPRF
;
187 #define COMPUTE_FPRF(tp) \
188 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
190 set_fprf_from_class(env, tp##_classify(arg)); \
193 COMPUTE_FPRF(float16
)
194 COMPUTE_FPRF(float32
)
195 COMPUTE_FPRF(float64
)
196 COMPUTE_FPRF(float128
)
198 /* Floating-point invalid operations exception */
199 static void finish_invalid_op_excp(CPUPPCState
*env
, int op
, uintptr_t retaddr
)
201 /* Update the floating-point invalid operation summary */
203 /* Update the floating-point exception summary */
206 /* Update the floating-point enabled exception summary */
207 env
->fpscr
|= FP_FEX
;
208 if (fp_exceptions_enabled(env
)) {
209 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
210 POWERPC_EXCP_FP
| op
, retaddr
);
215 static void finish_invalid_op_arith(CPUPPCState
*env
, int op
,
216 bool set_fpcc
, uintptr_t retaddr
)
218 env
->fpscr
&= ~(FP_FR
| FP_FI
);
221 env
->fpscr
&= ~FP_FPCC
;
222 env
->fpscr
|= (FP_C
| FP_FU
);
225 finish_invalid_op_excp(env
, op
, retaddr
);
229 static void float_invalid_op_vxsnan(CPUPPCState
*env
, uintptr_t retaddr
)
231 env
->fpscr
|= FP_VXSNAN
;
232 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, retaddr
);
235 /* Magnitude subtraction of infinities */
236 static void float_invalid_op_vxisi(CPUPPCState
*env
, bool set_fpcc
,
239 env
->fpscr
|= FP_VXISI
;
240 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXISI
, set_fpcc
, retaddr
);
243 /* Division of infinity by infinity */
244 static void float_invalid_op_vxidi(CPUPPCState
*env
, bool set_fpcc
,
247 env
->fpscr
|= FP_VXIDI
;
248 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIDI
, set_fpcc
, retaddr
);
251 /* Division of zero by zero */
252 static void float_invalid_op_vxzdz(CPUPPCState
*env
, bool set_fpcc
,
255 env
->fpscr
|= FP_VXZDZ
;
256 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXZDZ
, set_fpcc
, retaddr
);
259 /* Multiplication of zero by infinity */
260 static void float_invalid_op_vximz(CPUPPCState
*env
, bool set_fpcc
,
263 env
->fpscr
|= FP_VXIMZ
;
264 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIMZ
, set_fpcc
, retaddr
);
267 /* Square root of a negative number */
268 static void float_invalid_op_vxsqrt(CPUPPCState
*env
, bool set_fpcc
,
271 env
->fpscr
|= FP_VXSQRT
;
272 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXSQRT
, set_fpcc
, retaddr
);
275 /* Ordered comparison of NaN */
276 static void float_invalid_op_vxvc(CPUPPCState
*env
, bool set_fpcc
,
279 env
->fpscr
|= FP_VXVC
;
281 env
->fpscr
&= ~FP_FPCC
;
282 env
->fpscr
|= (FP_C
| FP_FU
);
284 /* Update the floating-point invalid operation summary */
286 /* Update the floating-point exception summary */
288 /* We must update the target FPR before raising the exception */
290 CPUState
*cs
= env_cpu(env
);
292 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
293 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
294 /* Update the floating-point enabled exception summary */
295 env
->fpscr
|= FP_FEX
;
296 /* Exception is deferred */
300 /* Invalid conversion */
301 static void float_invalid_op_vxcvi(CPUPPCState
*env
, bool set_fpcc
,
304 env
->fpscr
|= FP_VXCVI
;
305 env
->fpscr
&= ~(FP_FR
| FP_FI
);
308 env
->fpscr
&= ~FP_FPCC
;
309 env
->fpscr
|= (FP_C
| FP_FU
);
312 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
, retaddr
);
315 static inline void float_zero_divide_excp(CPUPPCState
*env
, uintptr_t raddr
)
318 env
->fpscr
&= ~(FP_FR
| FP_FI
);
319 /* Update the floating-point exception summary */
322 /* Update the floating-point enabled exception summary */
323 env
->fpscr
|= FP_FEX
;
324 if (fp_exceptions_enabled(env
)) {
325 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
326 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
,
332 static inline void float_overflow_excp(CPUPPCState
*env
)
334 CPUState
*cs
= env_cpu(env
);
337 /* Update the floating-point exception summary */
340 /* XXX: should adjust the result */
341 /* Update the floating-point enabled exception summary */
342 env
->fpscr
|= FP_FEX
;
343 /* We must update the target FPR before raising the exception */
344 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
345 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
352 static inline void float_underflow_excp(CPUPPCState
*env
)
354 CPUState
*cs
= env_cpu(env
);
357 /* Update the floating-point exception summary */
360 /* XXX: should adjust the result */
361 /* Update the floating-point enabled exception summary */
362 env
->fpscr
|= FP_FEX
;
363 /* We must update the target FPR before raising the exception */
364 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
365 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
369 static inline void float_inexact_excp(CPUPPCState
*env
)
371 CPUState
*cs
= env_cpu(env
);
375 /* Update the floating-point exception summary */
378 /* Update the floating-point enabled exception summary */
379 env
->fpscr
|= FP_FEX
;
380 /* We must update the target FPR before raising the exception */
381 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
382 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
386 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
388 uint32_t mask
= 1u << bit
;
389 if (env
->fpscr
& mask
) {
390 ppc_store_fpscr(env
, env
->fpscr
& ~(target_ulong
)mask
);
394 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
396 uint32_t mask
= 1u << bit
;
397 if (!(env
->fpscr
& mask
)) {
398 ppc_store_fpscr(env
, env
->fpscr
| mask
);
402 void helper_store_fpscr(CPUPPCState
*env
, uint64_t val
, uint32_t nibbles
)
404 target_ulong mask
= 0;
407 /* TODO: push this extension back to translation time */
408 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
409 if (nibbles
& (1 << i
)) {
410 mask
|= (target_ulong
) 0xf << (4 * i
);
413 val
= (val
& mask
) | (env
->fpscr
& ~mask
);
414 ppc_store_fpscr(env
, val
);
417 void helper_fpscr_check_status(CPUPPCState
*env
)
419 CPUState
*cs
= env_cpu(env
);
420 target_ulong fpscr
= env
->fpscr
;
423 if ((fpscr
& FP_OX
) && (fpscr
& FP_OE
)) {
424 error
= POWERPC_EXCP_FP_OX
;
425 } else if ((fpscr
& FP_UX
) && (fpscr
& FP_UE
)) {
426 error
= POWERPC_EXCP_FP_UX
;
427 } else if ((fpscr
& FP_XX
) && (fpscr
& FP_XE
)) {
428 error
= POWERPC_EXCP_FP_XX
;
429 } else if ((fpscr
& FP_ZX
) && (fpscr
& FP_ZE
)) {
430 error
= POWERPC_EXCP_FP_ZX
;
431 } else if (fpscr
& FP_VE
) {
432 if (fpscr
& FP_VXSOFT
) {
433 error
= POWERPC_EXCP_FP_VXSOFT
;
434 } else if (fpscr
& FP_VXSNAN
) {
435 error
= POWERPC_EXCP_FP_VXSNAN
;
436 } else if (fpscr
& FP_VXISI
) {
437 error
= POWERPC_EXCP_FP_VXISI
;
438 } else if (fpscr
& FP_VXIDI
) {
439 error
= POWERPC_EXCP_FP_VXIDI
;
440 } else if (fpscr
& FP_VXZDZ
) {
441 error
= POWERPC_EXCP_FP_VXZDZ
;
442 } else if (fpscr
& FP_VXIMZ
) {
443 error
= POWERPC_EXCP_FP_VXIMZ
;
444 } else if (fpscr
& FP_VXVC
) {
445 error
= POWERPC_EXCP_FP_VXVC
;
446 } else if (fpscr
& FP_VXSQRT
) {
447 error
= POWERPC_EXCP_FP_VXSQRT
;
448 } else if (fpscr
& FP_VXCVI
) {
449 error
= POWERPC_EXCP_FP_VXCVI
;
456 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
457 env
->error_code
= error
| POWERPC_EXCP_FP
;
458 /* Deferred floating-point exception after target FPSCR update */
459 if (fp_exceptions_enabled(env
)) {
460 raise_exception_err_ra(env
, cs
->exception_index
,
461 env
->error_code
, GETPC());
465 static void do_float_check_status(CPUPPCState
*env
, uintptr_t raddr
)
467 CPUState
*cs
= env_cpu(env
);
468 int status
= get_float_exception_flags(&env
->fp_status
);
470 if (status
& float_flag_overflow
) {
471 float_overflow_excp(env
);
472 } else if (status
& float_flag_underflow
) {
473 float_underflow_excp(env
);
475 if (status
& float_flag_inexact
) {
476 float_inexact_excp(env
);
478 env
->fpscr
&= ~FP_FI
; /* clear the FPSCR[FI] bit */
481 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
482 (env
->error_code
& POWERPC_EXCP_FP
)) {
483 /* Deferred floating-point exception after target FPR update */
484 if (fp_exceptions_enabled(env
)) {
485 raise_exception_err_ra(env
, cs
->exception_index
,
486 env
->error_code
, raddr
);
491 void helper_float_check_status(CPUPPCState
*env
)
493 do_float_check_status(env
, GETPC());
496 void helper_reset_fpstatus(CPUPPCState
*env
)
498 set_float_exception_flags(0, &env
->fp_status
);
501 static void float_invalid_op_addsub(CPUPPCState
*env
, int flags
,
502 bool set_fpcc
, uintptr_t retaddr
)
504 if (flags
& float_flag_invalid_isi
) {
505 float_invalid_op_vxisi(env
, set_fpcc
, retaddr
);
506 } else if (flags
& float_flag_invalid_snan
) {
507 float_invalid_op_vxsnan(env
, retaddr
);
512 float64
helper_fadd(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
514 float64 ret
= float64_add(arg1
, arg2
, &env
->fp_status
);
515 int flags
= get_float_exception_flags(&env
->fp_status
);
517 if (unlikely(flags
& float_flag_invalid
)) {
518 float_invalid_op_addsub(env
, flags
, 1, GETPC());
525 float64
helper_fsub(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
527 float64 ret
= float64_sub(arg1
, arg2
, &env
->fp_status
);
528 int flags
= get_float_exception_flags(&env
->fp_status
);
530 if (unlikely(flags
& float_flag_invalid
)) {
531 float_invalid_op_addsub(env
, flags
, 1, GETPC());
537 static void float_invalid_op_mul(CPUPPCState
*env
, int flags
,
538 bool set_fprc
, uintptr_t retaddr
)
540 if (flags
& float_flag_invalid_imz
) {
541 float_invalid_op_vximz(env
, set_fprc
, retaddr
);
542 } else if (flags
& float_flag_invalid_snan
) {
543 float_invalid_op_vxsnan(env
, retaddr
);
548 float64
helper_fmul(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
550 float64 ret
= float64_mul(arg1
, arg2
, &env
->fp_status
);
551 int flags
= get_float_exception_flags(&env
->fp_status
);
553 if (unlikely(flags
& float_flag_invalid
)) {
554 float_invalid_op_mul(env
, flags
, 1, GETPC());
560 static void float_invalid_op_div(CPUPPCState
*env
, bool set_fprc
,
561 uintptr_t retaddr
, int classes
)
564 if (classes
== is_inf
) {
565 /* Division of infinity by infinity */
566 float_invalid_op_vxidi(env
, set_fprc
, retaddr
);
567 } else if (classes
== is_zero
) {
568 /* Division of zero by zero */
569 float_invalid_op_vxzdz(env
, set_fprc
, retaddr
);
570 } else if (classes
& is_snan
) {
571 float_invalid_op_vxsnan(env
, retaddr
);
576 float64
helper_fdiv(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
578 float64 ret
= float64_div(arg1
, arg2
, &env
->fp_status
);
579 int status
= get_float_exception_flags(&env
->fp_status
);
581 if (unlikely(status
)) {
582 if (status
& float_flag_invalid
) {
583 float_invalid_op_div(env
, 1, GETPC(),
584 float64_classify(arg1
) |
585 float64_classify(arg2
));
587 if (status
& float_flag_divbyzero
) {
588 float_zero_divide_excp(env
, GETPC());
595 static void float_invalid_cvt(CPUPPCState
*env
, bool set_fprc
,
596 uintptr_t retaddr
, int class1
)
598 float_invalid_op_vxcvi(env
, set_fprc
, retaddr
);
599 if (class1
& is_snan
) {
600 float_invalid_op_vxsnan(env
, retaddr
);
604 #define FPU_FCTI(op, cvt, nanval) \
605 uint64_t helper_##op(CPUPPCState *env, float64 arg) \
607 uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \
608 int status = get_float_exception_flags(&env->fp_status); \
610 if (unlikely(status)) { \
611 if (status & float_flag_invalid) { \
612 float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
615 do_float_check_status(env, GETPC()); \
620 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
621 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
622 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
623 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
624 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
625 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
626 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
627 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
629 #define FPU_FCFI(op, cvtr, is_single) \
630 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
635 float32 tmp = cvtr(arg, &env->fp_status); \
636 farg.d = float32_to_float64(tmp, &env->fp_status); \
638 farg.d = cvtr(arg, &env->fp_status); \
640 do_float_check_status(env, GETPC()); \
644 FPU_FCFI(fcfid
, int64_to_float64
, 0)
645 FPU_FCFI(fcfids
, int64_to_float32
, 1)
646 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
647 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
649 static inline uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
653 FloatRoundMode old_rounding_mode
= get_float_rounding_mode(&env
->fp_status
);
657 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
659 float_invalid_op_vxsnan(env
, GETPC());
660 farg
.ll
= arg
| 0x0008000000000000ULL
;
662 int inexact
= get_float_exception_flags(&env
->fp_status
) &
664 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
665 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
666 set_float_rounding_mode(old_rounding_mode
, &env
->fp_status
);
668 /* fri* does not set FPSCR[XX] */
670 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
673 do_float_check_status(env
, GETPC());
677 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
679 return do_fri(env
, arg
, float_round_ties_away
);
682 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
684 return do_fri(env
, arg
, float_round_to_zero
);
687 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
689 return do_fri(env
, arg
, float_round_up
);
692 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
694 return do_fri(env
, arg
, float_round_down
);
697 #define FPU_MADDSUB_UPDATE(NAME, TP) \
698 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
699 unsigned int madd_flags, uintptr_t retaddr) \
701 if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
702 TP##_is_signaling_nan(arg2, &env->fp_status) || \
703 TP##_is_signaling_nan(arg3, &env->fp_status)) { \
704 /* sNaN operation */ \
705 float_invalid_op_vxsnan(env, retaddr); \
707 if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
708 (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
709 /* Multiplication of zero by infinity */ \
710 float_invalid_op_vximz(env, 1, retaddr); \
712 if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
713 TP##_is_infinity(arg3)) { \
714 uint8_t aSign, bSign, cSign; \
716 aSign = TP##_is_neg(arg1); \
717 bSign = TP##_is_neg(arg2); \
718 cSign = TP##_is_neg(arg3); \
719 if (madd_flags & float_muladd_negate_c) { \
722 if (aSign ^ bSign ^ cSign) { \
723 float_invalid_op_vxisi(env, 1, retaddr); \
727 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp
, float32
)
728 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp
, float64
)
730 #define FPU_FMADD(op, madd_flags) \
731 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
732 uint64_t arg2, uint64_t arg3) \
735 float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
737 flags = get_float_exception_flags(&env->fp_status); \
739 if (flags & float_flag_invalid) { \
740 float64_maddsub_update_excp(env, arg1, arg2, arg3, \
741 madd_flags, GETPC()); \
743 do_float_check_status(env, GETPC()); \
749 #define MSUB_FLGS float_muladd_negate_c
750 #define NMADD_FLGS float_muladd_negate_result
751 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
753 FPU_FMADD(fmadd
, MADD_FLGS
)
754 FPU_FMADD(fnmadd
, NMADD_FLGS
)
755 FPU_FMADD(fmsub
, MSUB_FLGS
)
756 FPU_FMADD(fnmsub
, NMSUB_FLGS
)
759 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
766 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
767 float_invalid_op_vxsnan(env
, GETPC());
769 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
770 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
776 float64
helper_fsqrt(CPUPPCState
*env
, float64 arg
)
778 float64 ret
= float64_sqrt(arg
, &env
->fp_status
);
779 int status
= get_float_exception_flags(&env
->fp_status
);
781 if (unlikely(status
& float_flag_invalid
)) {
782 if (unlikely(float64_is_any_nan(arg
))) {
783 if (unlikely(float64_is_signaling_nan(arg
, &env
->fp_status
))) {
784 /* sNaN square root */
785 float_invalid_op_vxsnan(env
, GETPC());
788 /* Square root of a negative nonzero number */
789 float_invalid_op_vxsqrt(env
, 1, GETPC());
797 float64
helper_fre(CPUPPCState
*env
, float64 arg
)
799 /* "Estimate" the reciprocal with actual division. */
800 float64 ret
= float64_div(float64_one
, arg
, &env
->fp_status
);
801 int status
= get_float_exception_flags(&env
->fp_status
);
803 if (unlikely(status
)) {
804 if (status
& float_flag_invalid
) {
805 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
806 /* sNaN reciprocal */
807 float_invalid_op_vxsnan(env
, GETPC());
810 if (status
& float_flag_divbyzero
) {
811 float_zero_divide_excp(env
, GETPC());
812 /* For FPSCR.ZE == 0, the result is 1/2. */
813 ret
= float64_set_sign(float64_half
, float64_is_neg(arg
));
821 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
828 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
829 /* sNaN reciprocal */
830 float_invalid_op_vxsnan(env
, GETPC());
832 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
833 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
834 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
839 /* frsqrte - frsqrte. */
840 float64
helper_frsqrte(CPUPPCState
*env
, float64 arg
)
842 /* "Estimate" the reciprocal with actual division. */
843 float64 rets
= float64_sqrt(arg
, &env
->fp_status
);
844 float64 retd
= float64_div(float64_one
, rets
, &env
->fp_status
);
845 int status
= get_float_exception_flags(&env
->fp_status
);
847 if (unlikely(status
)) {
848 if (status
& float_flag_invalid
) {
849 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
850 /* sNaN reciprocal */
851 float_invalid_op_vxsnan(env
, GETPC());
853 /* Square root of a negative nonzero number */
854 float_invalid_op_vxsqrt(env
, 1, GETPC());
857 if (status
& float_flag_divbyzero
) {
858 /* Reciprocal of (square root of) zero. */
859 float_zero_divide_excp(env
, GETPC());
867 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
874 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
875 !float64_is_any_nan(farg1
.d
)) {
882 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
887 if (unlikely(float64_is_infinity(fra
) ||
888 float64_is_infinity(frb
) ||
889 float64_is_zero(frb
))) {
893 int e_a
= ppc_float64_get_unbiased_exp(fra
);
894 int e_b
= ppc_float64_get_unbiased_exp(frb
);
896 if (unlikely(float64_is_any_nan(fra
) ||
897 float64_is_any_nan(frb
))) {
899 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
901 } else if (!float64_is_zero(fra
) &&
902 (((e_a
- e_b
) >= 1023) ||
903 ((e_a
- e_b
) <= -1021) ||
908 if (unlikely(float64_is_zero_or_denormal(frb
))) {
909 /* XB is not zero because of the above check and */
910 /* so must be denormalized. */
915 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
918 uint32_t helper_ftsqrt(uint64_t frb
)
923 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
927 int e_b
= ppc_float64_get_unbiased_exp(frb
);
929 if (unlikely(float64_is_any_nan(frb
))) {
931 } else if (unlikely(float64_is_zero(frb
))) {
933 } else if (unlikely(float64_is_neg(frb
))) {
935 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022 + 52))) {
939 if (unlikely(float64_is_zero_or_denormal(frb
))) {
940 /* XB is not zero because of the above check and */
941 /* therefore must be denormalized. */
946 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
949 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
952 CPU_DoubleU farg1
, farg2
;
958 if (unlikely(float64_is_any_nan(farg1
.d
) ||
959 float64_is_any_nan(farg2
.d
))) {
961 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
963 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
969 env
->fpscr
&= ~FP_FPCC
;
970 env
->fpscr
|= ret
<< FPSCR_FPCC
;
971 env
->crf
[crfD
] = ret
;
972 if (unlikely(ret
== 0x01UL
973 && (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
974 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)))) {
975 /* sNaN comparison */
976 float_invalid_op_vxsnan(env
, GETPC());
980 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
983 CPU_DoubleU farg1
, farg2
;
989 if (unlikely(float64_is_any_nan(farg1
.d
) ||
990 float64_is_any_nan(farg2
.d
))) {
992 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
994 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1000 env
->fpscr
&= ~FP_FPCC
;
1001 env
->fpscr
|= ret
<< FPSCR_FPCC
;
1002 env
->crf
[crfD
] = (uint32_t) ret
;
1003 if (unlikely(ret
== 0x01UL
)) {
1004 float_invalid_op_vxvc(env
, 1, GETPC());
1005 if (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1006 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)) {
1007 /* sNaN comparison */
1008 float_invalid_op_vxsnan(env
, GETPC());
1013 /* Single-precision floating-point conversions */
1014 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1018 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1023 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1027 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1032 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1037 /* NaN are not treated the same way IEEE 754 does */
1038 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1042 return float32_to_int32(u
.f
, &env
->vec_status
);
1045 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1050 /* NaN are not treated the same way IEEE 754 does */
1051 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1055 return float32_to_uint32(u
.f
, &env
->vec_status
);
1058 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1063 /* NaN are not treated the same way IEEE 754 does */
1064 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1068 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1071 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1076 /* NaN are not treated the same way IEEE 754 does */
1077 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1081 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1084 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1089 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1090 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1091 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1096 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1101 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1102 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1103 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1108 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1114 /* NaN are not treated the same way IEEE 754 does */
1115 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1118 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1119 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1121 return float32_to_int32(u
.f
, &env
->vec_status
);
1124 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1130 /* NaN are not treated the same way IEEE 754 does */
1131 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1134 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1135 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1137 return float32_to_uint32(u
.f
, &env
->vec_status
);
1140 #define HELPER_SPE_SINGLE_CONV(name) \
1141 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1143 return e##name(env, val); \
1146 HELPER_SPE_SINGLE_CONV(fscfsi
);
1148 HELPER_SPE_SINGLE_CONV(fscfui
);
1150 HELPER_SPE_SINGLE_CONV(fscfuf
);
1152 HELPER_SPE_SINGLE_CONV(fscfsf
);
1154 HELPER_SPE_SINGLE_CONV(fsctsi
);
1156 HELPER_SPE_SINGLE_CONV(fsctui
);
1158 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1160 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1162 HELPER_SPE_SINGLE_CONV(fsctsf
);
1164 HELPER_SPE_SINGLE_CONV(fsctuf
);
1166 #define HELPER_SPE_VECTOR_CONV(name) \
1167 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1169 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1170 (uint64_t)e##name(env, val); \
1173 HELPER_SPE_VECTOR_CONV(fscfsi
);
1175 HELPER_SPE_VECTOR_CONV(fscfui
);
1177 HELPER_SPE_VECTOR_CONV(fscfuf
);
1179 HELPER_SPE_VECTOR_CONV(fscfsf
);
1181 HELPER_SPE_VECTOR_CONV(fsctsi
);
1183 HELPER_SPE_VECTOR_CONV(fsctui
);
1185 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1187 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1189 HELPER_SPE_VECTOR_CONV(fsctsf
);
1191 HELPER_SPE_VECTOR_CONV(fsctuf
);
1193 /* Single-precision floating-point arithmetic */
1194 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1200 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1204 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1210 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1214 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1220 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1224 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1230 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1234 #define HELPER_SPE_SINGLE_ARITH(name) \
1235 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1237 return e##name(env, op1, op2); \
1240 HELPER_SPE_SINGLE_ARITH(fsadd
);
1242 HELPER_SPE_SINGLE_ARITH(fssub
);
1244 HELPER_SPE_SINGLE_ARITH(fsmul
);
1246 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1248 #define HELPER_SPE_VECTOR_ARITH(name) \
1249 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1251 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1252 (uint64_t)e##name(env, op1, op2); \
1255 HELPER_SPE_VECTOR_ARITH(fsadd
);
1257 HELPER_SPE_VECTOR_ARITH(fssub
);
1259 HELPER_SPE_VECTOR_ARITH(fsmul
);
1261 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1263 /* Single-precision floating-point comparisons */
1264 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1270 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1273 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1279 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1282 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1288 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1291 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1293 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1294 return efscmplt(env
, op1
, op2
);
1297 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1299 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1300 return efscmpgt(env
, op1
, op2
);
1303 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1305 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1306 return efscmpeq(env
, op1
, op2
);
1309 #define HELPER_SINGLE_SPE_CMP(name) \
1310 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1312 return e##name(env, op1, op2); \
1315 HELPER_SINGLE_SPE_CMP(fststlt
);
1317 HELPER_SINGLE_SPE_CMP(fststgt
);
1319 HELPER_SINGLE_SPE_CMP(fststeq
);
1321 HELPER_SINGLE_SPE_CMP(fscmplt
);
1323 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1325 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1327 static inline uint32_t evcmp_merge(int t0
, int t1
)
1329 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1332 #define HELPER_VECTOR_SPE_CMP(name) \
1333 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1335 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1336 e##name(env, op1, op2)); \
1339 HELPER_VECTOR_SPE_CMP(fststlt
);
1341 HELPER_VECTOR_SPE_CMP(fststgt
);
1343 HELPER_VECTOR_SPE_CMP(fststeq
);
1345 HELPER_VECTOR_SPE_CMP(fscmplt
);
1347 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1349 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1351 /* Double-precision floating-point conversion */
1352 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1356 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1361 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1365 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1370 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1374 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1379 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1383 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1388 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1393 /* NaN are not treated the same way IEEE 754 does */
1394 if (unlikely(float64_is_any_nan(u
.d
))) {
1398 return float64_to_int32(u
.d
, &env
->vec_status
);
1401 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1406 /* NaN are not treated the same way IEEE 754 does */
1407 if (unlikely(float64_is_any_nan(u
.d
))) {
1411 return float64_to_uint32(u
.d
, &env
->vec_status
);
1414 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1419 /* NaN are not treated the same way IEEE 754 does */
1420 if (unlikely(float64_is_any_nan(u
.d
))) {
1424 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1427 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1432 /* NaN are not treated the same way IEEE 754 does */
1433 if (unlikely(float64_is_any_nan(u
.d
))) {
1437 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1440 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1445 /* NaN are not treated the same way IEEE 754 does */
1446 if (unlikely(float64_is_any_nan(u
.d
))) {
1450 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1453 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1458 /* NaN are not treated the same way IEEE 754 does */
1459 if (unlikely(float64_is_any_nan(u
.d
))) {
1463 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1466 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1471 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1472 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1473 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1478 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1483 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1484 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1485 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1490 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1496 /* NaN are not treated the same way IEEE 754 does */
1497 if (unlikely(float64_is_any_nan(u
.d
))) {
1500 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1501 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1503 return float64_to_int32(u
.d
, &env
->vec_status
);
1506 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1512 /* NaN are not treated the same way IEEE 754 does */
1513 if (unlikely(float64_is_any_nan(u
.d
))) {
1516 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1517 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1519 return float64_to_uint32(u
.d
, &env
->vec_status
);
1522 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1528 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1533 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1539 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1544 /* Double precision fixed-point arithmetic */
1545 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1551 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1555 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1561 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1565 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1571 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1575 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1581 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1585 /* Double precision floating point helpers */
1586 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1592 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1595 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1601 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1604 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1610 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1613 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1615 /* XXX: TODO: test special values (NaN, infinites, ...) */
1616 return helper_efdtstlt(env
, op1
, op2
);
1619 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1621 /* XXX: TODO: test special values (NaN, infinites, ...) */
1622 return helper_efdtstgt(env
, op1
, op2
);
1625 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1627 /* XXX: TODO: test special values (NaN, infinites, ...) */
1628 return helper_efdtsteq(env
, op1
, op2
);
1631 #define float64_to_float64(x, env) x
1635 * VSX_ADD_SUB - VSX floating point add/subtract
1636 * name - instruction mnemonic
1637 * op - operation (add or sub)
1638 * nels - number of elements (1, 2 or 4)
1639 * tp - type (float32 or float64)
1640 * fld - vsr_t field (VsrD(*) or VsrW(*))
1643 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1644 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
1645 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1647 ppc_vsr_t t = *xt; \
1650 helper_reset_fpstatus(env); \
1652 for (i = 0; i < nels; i++) { \
1653 float_status tstat = env->fp_status; \
1654 set_float_exception_flags(0, &tstat); \
1655 t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
1656 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1658 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1659 float_invalid_op_addsub(env, tstat.float_exception_flags, \
1664 t.fld = helper_frsp(env, t.fld); \
1668 helper_compute_fprf_float64(env, t.fld); \
1672 do_float_check_status(env, GETPC()); \
1675 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1676 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1677 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1678 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1679 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1680 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1681 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1682 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1684 void helper_xsaddqp(CPUPPCState
*env
, uint32_t opcode
,
1685 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1690 helper_reset_fpstatus(env
);
1692 tstat
= env
->fp_status
;
1693 if (unlikely(Rc(opcode
) != 0)) {
1694 tstat
.float_rounding_mode
= float_round_to_odd
;
1697 set_float_exception_flags(0, &tstat
);
1698 t
.f128
= float128_add(xa
->f128
, xb
->f128
, &tstat
);
1699 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1701 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1702 float_invalid_op_addsub(env
, tstat
.float_exception_flags
, 1, GETPC());
1705 helper_compute_fprf_float128(env
, t
.f128
);
1708 do_float_check_status(env
, GETPC());
1712 * VSX_MUL - VSX floating point multiply
1713 * op - instruction mnemonic
1714 * nels - number of elements (1, 2 or 4)
1715 * tp - type (float32 or float64)
1716 * fld - vsr_t field (VsrD(*) or VsrW(*))
1719 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1720 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1721 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1723 ppc_vsr_t t = *xt; \
1726 helper_reset_fpstatus(env); \
1728 for (i = 0; i < nels; i++) { \
1729 float_status tstat = env->fp_status; \
1730 set_float_exception_flags(0, &tstat); \
1731 t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
1732 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1734 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1735 float_invalid_op_mul(env, tstat.float_exception_flags, \
1740 t.fld = helper_frsp(env, t.fld); \
1744 helper_compute_fprf_float64(env, t.fld); \
1749 do_float_check_status(env, GETPC()); \
1752 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1753 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1754 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1755 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1757 void helper_xsmulqp(CPUPPCState
*env
, uint32_t opcode
,
1758 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1763 helper_reset_fpstatus(env
);
1764 tstat
= env
->fp_status
;
1765 if (unlikely(Rc(opcode
) != 0)) {
1766 tstat
.float_rounding_mode
= float_round_to_odd
;
1769 set_float_exception_flags(0, &tstat
);
1770 t
.f128
= float128_mul(xa
->f128
, xb
->f128
, &tstat
);
1771 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1773 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1774 float_invalid_op_mul(env
, tstat
.float_exception_flags
, 1, GETPC());
1776 helper_compute_fprf_float128(env
, t
.f128
);
1779 do_float_check_status(env
, GETPC());
1783 * VSX_DIV - VSX floating point divide
1784 * op - instruction mnemonic
1785 * nels - number of elements (1, 2 or 4)
1786 * tp - type (float32 or float64)
1787 * fld - vsr_t field (VsrD(*) or VsrW(*))
1790 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1791 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1792 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1794 ppc_vsr_t t = *xt; \
1797 helper_reset_fpstatus(env); \
1799 for (i = 0; i < nels; i++) { \
1800 float_status tstat = env->fp_status; \
1801 set_float_exception_flags(0, &tstat); \
1802 t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
1803 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1805 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1806 float_invalid_op_div(env, sfprf, GETPC(), \
1807 tp##_classify(xa->fld) | \
1808 tp##_classify(xb->fld)); \
1810 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1811 float_zero_divide_excp(env, GETPC()); \
1815 t.fld = helper_frsp(env, t.fld); \
1819 helper_compute_fprf_float64(env, t.fld); \
1824 do_float_check_status(env, GETPC()); \
1827 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
1828 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
1829 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
1830 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
1832 void helper_xsdivqp(CPUPPCState
*env
, uint32_t opcode
,
1833 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1838 helper_reset_fpstatus(env
);
1839 tstat
= env
->fp_status
;
1840 if (unlikely(Rc(opcode
) != 0)) {
1841 tstat
.float_rounding_mode
= float_round_to_odd
;
1844 set_float_exception_flags(0, &tstat
);
1845 t
.f128
= float128_div(xa
->f128
, xb
->f128
, &tstat
);
1846 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1848 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1849 float_invalid_op_div(env
, 1, GETPC(),
1850 float128_classify(xa
->f128
) |
1851 float128_classify(xb
->f128
));
1853 if (unlikely(tstat
.float_exception_flags
& float_flag_divbyzero
)) {
1854 float_zero_divide_excp(env
, GETPC());
1857 helper_compute_fprf_float128(env
, t
.f128
);
1859 do_float_check_status(env
, GETPC());
1863 * VSX_RE - VSX floating point reciprocal estimate
1864 * op - instruction mnemonic
1865 * nels - number of elements (1, 2 or 4)
1866 * tp - type (float32 or float64)
1867 * fld - vsr_t field (VsrD(*) or VsrW(*))
1870 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
1871 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1873 ppc_vsr_t t = *xt; \
1876 helper_reset_fpstatus(env); \
1878 for (i = 0; i < nels; i++) { \
1879 if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
1880 float_invalid_op_vxsnan(env, GETPC()); \
1882 t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
1885 t.fld = helper_frsp(env, t.fld); \
1889 helper_compute_fprf_float64(env, t.fld); \
1894 do_float_check_status(env, GETPC()); \
1897 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
1898 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
1899 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
1900 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
1903 * VSX_SQRT - VSX floating point square root
1904 * op - instruction mnemonic
1905 * nels - number of elements (1, 2 or 4)
1906 * tp - type (float32 or float64)
1907 * fld - vsr_t field (VsrD(*) or VsrW(*))
1910 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
1911 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1913 ppc_vsr_t t = *xt; \
1916 helper_reset_fpstatus(env); \
1918 for (i = 0; i < nels; i++) { \
1919 float_status tstat = env->fp_status; \
1920 set_float_exception_flags(0, &tstat); \
1921 t.fld = tp##_sqrt(xb->fld, &tstat); \
1922 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1924 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1925 if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
1926 float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
1927 } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
1928 float_invalid_op_vxsnan(env, GETPC()); \
1933 t.fld = helper_frsp(env, t.fld); \
1937 helper_compute_fprf_float64(env, t.fld); \
1942 do_float_check_status(env, GETPC()); \
1945 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
1946 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
1947 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
1948 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
1951 *VSX_RSQRTE - VSX floating point reciprocal square root estimate
1952 * op - instruction mnemonic
1953 * nels - number of elements (1, 2 or 4)
1954 * tp - type (float32 or float64)
1955 * fld - vsr_t field (VsrD(*) or VsrW(*))
1958 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
1959 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1961 ppc_vsr_t t = *xt; \
1964 helper_reset_fpstatus(env); \
1966 for (i = 0; i < nels; i++) { \
1967 float_status tstat = env->fp_status; \
1968 set_float_exception_flags(0, &tstat); \
1969 t.fld = tp##_sqrt(xb->fld, &tstat); \
1970 t.fld = tp##_div(tp##_one, t.fld, &tstat); \
1971 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1973 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1974 if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
1975 float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
1976 } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
1977 float_invalid_op_vxsnan(env, GETPC()); \
1982 t.fld = helper_frsp(env, t.fld); \
1986 helper_compute_fprf_float64(env, t.fld); \
1991 do_float_check_status(env, GETPC()); \
1994 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
1995 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
1996 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
1997 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
2000 * VSX_TDIV - VSX floating point test for divide
2001 * op - instruction mnemonic
2002 * nels - number of elements (1, 2 or 4)
2003 * tp - type (float32 or float64)
2004 * fld - vsr_t field (VsrD(*) or VsrW(*))
2005 * emin - minimum unbiased exponent
2006 * emax - maximum unbiased exponent
2007 * nbits - number of fraction bits
2009 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2010 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2011 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2017 for (i = 0; i < nels; i++) { \
2018 if (unlikely(tp##_is_infinity(xa->fld) || \
2019 tp##_is_infinity(xb->fld) || \
2020 tp##_is_zero(xb->fld))) { \
2024 int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \
2025 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2027 if (unlikely(tp##_is_any_nan(xa->fld) || \
2028 tp##_is_any_nan(xb->fld))) { \
2030 } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
2032 } else if (!tp##_is_zero(xa->fld) && \
2033 (((e_a - e_b) >= emax) || \
2034 ((e_a - e_b) <= (emin + 1)) || \
2035 (e_a <= (emin + nbits)))) { \
2039 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2041 * XB is not zero because of the above check and so \
2042 * must be denormalized. \
2049 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2052 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2053 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2054 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2057 * VSX_TSQRT - VSX floating point test for square root
2058 * op - instruction mnemonic
2059 * nels - number of elements (1, 2 or 4)
2060 * tp - type (float32 or float64)
2061 * fld - vsr_t field (VsrD(*) or VsrW(*))
2062 * emin - minimum unbiased exponent
2063 * emax - maximum unbiased exponent
2064 * nbits - number of fraction bits
2066 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2067 void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
2073 for (i = 0; i < nels; i++) { \
2074 if (unlikely(tp##_is_infinity(xb->fld) || \
2075 tp##_is_zero(xb->fld))) { \
2079 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2081 if (unlikely(tp##_is_any_nan(xb->fld))) { \
2083 } else if (unlikely(tp##_is_zero(xb->fld))) { \
2085 } else if (unlikely(tp##_is_neg(xb->fld))) { \
2087 } else if (!tp##_is_zero(xb->fld) && \
2088 (e_b <= (emin + nbits))) { \
2092 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2094 * XB is not zero because of the above check and \
2095 * therefore must be denormalized. \
2102 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2105 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2106 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2107 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2110 * VSX_MADD - VSX floating point muliply/add variations
2111 * op - instruction mnemonic
2112 * nels - number of elements (1, 2 or 4)
2113 * tp - type (float32 or float64)
2114 * fld - vsr_t field (VsrD(*) or VsrW(*))
2115 * maddflgs - flags for the float*muladd routine that control the
2116 * various forms (madd, msub, nmadd, nmsub)
2119 #define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \
2120 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2121 ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \
2123 ppc_vsr_t t = *xt; \
2126 helper_reset_fpstatus(env); \
2128 for (i = 0; i < nels; i++) { \
2129 float_status tstat = env->fp_status; \
2130 set_float_exception_flags(0, &tstat); \
2131 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2133 * Avoid double rounding errors by rounding the intermediate \
2136 set_float_rounding_mode(float_round_to_zero, &tstat); \
2137 t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
2138 maddflgs, &tstat); \
2139 t.fld |= (get_float_exception_flags(&tstat) & \
2140 float_flag_inexact) != 0; \
2142 t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
2143 maddflgs, &tstat); \
2145 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2147 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2148 tp##_maddsub_update_excp(env, xa->fld, b->fld, \
2149 c->fld, maddflgs, GETPC()); \
2153 t.fld = helper_frsp(env, t.fld); \
2157 helper_compute_fprf_float64(env, t.fld); \
2161 do_float_check_status(env, GETPC()); \
2164 VSX_MADD(xsmadddp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 0)
2165 VSX_MADD(xsmsubdp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 0)
2166 VSX_MADD(xsnmadddp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 0)
2167 VSX_MADD(xsnmsubdp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 0)
2168 VSX_MADD(xsmaddsp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1)
2169 VSX_MADD(xsmsubsp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1)
2170 VSX_MADD(xsnmaddsp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1)
2171 VSX_MADD(xsnmsubsp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1)
2173 VSX_MADD(xvmadddp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0, 0)
2174 VSX_MADD(xvmsubdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0, 0)
2175 VSX_MADD(xvnmadddp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0, 0)
2176 VSX_MADD(xvnmsubdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0, 0)
2178 VSX_MADD(xvmaddsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0, 0)
2179 VSX_MADD(xvmsubsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0, 0)
2180 VSX_MADD(xvnmaddsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0, 0)
2181 VSX_MADD(xvnmsubsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0, 0)
2184 * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2185 * op - instruction mnemonic
2186 * cmp - comparison operation
2187 * exp - expected result of comparison
2188 * svxvc - set VXVC bit
2190 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2191 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2192 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2194 ppc_vsr_t t = *xt; \
2195 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2197 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
2198 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2199 vxsnan_flag = true; \
2200 if (fpscr_ve == 0 && svxvc) { \
2203 } else if (svxvc) { \
2204 vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
2205 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \
2207 if (vxsnan_flag) { \
2208 float_invalid_op_vxsnan(env, GETPC()); \
2211 float_invalid_op_vxvc(env, 0, GETPC()); \
2213 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2216 if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \
2217 &env->fp_status) == exp) { \
2226 do_float_check_status(env, GETPC()); \
2229 VSX_SCALAR_CMP_DP(xscmpeqdp
, eq
, 1, 0)
2230 VSX_SCALAR_CMP_DP(xscmpgedp
, le
, 1, 1)
2231 VSX_SCALAR_CMP_DP(xscmpgtdp
, lt
, 1, 1)
2232 VSX_SCALAR_CMP_DP(xscmpnedp
, eq
, 0, 0)
2234 void helper_xscmpexpdp(CPUPPCState
*env
, uint32_t opcode
,
2235 ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2237 int64_t exp_a
, exp_b
;
2240 exp_a
= extract64(xa
->VsrD(0), 52, 11);
2241 exp_b
= extract64(xb
->VsrD(0), 52, 11);
2243 if (unlikely(float64_is_any_nan(xa
->VsrD(0)) ||
2244 float64_is_any_nan(xb
->VsrD(0)))) {
2247 if (exp_a
< exp_b
) {
2249 } else if (exp_a
> exp_b
) {
2256 env
->fpscr
&= ~FP_FPCC
;
2257 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2258 env
->crf
[BF(opcode
)] = cc
;
2260 do_float_check_status(env
, GETPC());
2263 void helper_xscmpexpqp(CPUPPCState
*env
, uint32_t opcode
,
2264 ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2266 int64_t exp_a
, exp_b
;
2269 exp_a
= extract64(xa
->VsrD(0), 48, 15);
2270 exp_b
= extract64(xb
->VsrD(0), 48, 15);
2272 if (unlikely(float128_is_any_nan(xa
->f128
) ||
2273 float128_is_any_nan(xb
->f128
))) {
2276 if (exp_a
< exp_b
) {
2278 } else if (exp_a
> exp_b
) {
2285 env
->fpscr
&= ~FP_FPCC
;
2286 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2287 env
->crf
[BF(opcode
)] = cc
;
2289 do_float_check_status(env
, GETPC());
2292 static inline void do_scalar_cmp(CPUPPCState
*env
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
,
2293 int crf_idx
, bool ordered
)
2296 bool vxsnan_flag
= false, vxvc_flag
= false;
2298 helper_reset_fpstatus(env
);
2300 switch (float64_compare(xa
->VsrD(0), xb
->VsrD(0), &env
->fp_status
)) {
2301 case float_relation_less
:
2304 case float_relation_equal
:
2307 case float_relation_greater
:
2310 case float_relation_unordered
:
2313 if (float64_is_signaling_nan(xa
->VsrD(0), &env
->fp_status
) ||
2314 float64_is_signaling_nan(xb
->VsrD(0), &env
->fp_status
)) {
2316 if (fpscr_ve
== 0 && ordered
) {
2319 } else if (float64_is_quiet_nan(xa
->VsrD(0), &env
->fp_status
) ||
2320 float64_is_quiet_nan(xb
->VsrD(0), &env
->fp_status
)) {
2328 g_assert_not_reached();
2331 env
->fpscr
&= ~FP_FPCC
;
2332 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2333 env
->crf
[crf_idx
] = cc
;
2336 float_invalid_op_vxsnan(env
, GETPC());
2339 float_invalid_op_vxvc(env
, 0, GETPC());
2342 do_float_check_status(env
, GETPC());
2345 void helper_xscmpodp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2348 do_scalar_cmp(env
, xa
, xb
, BF(opcode
), true);
2351 void helper_xscmpudp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2354 do_scalar_cmp(env
, xa
, xb
, BF(opcode
), false);
2357 static inline void do_scalar_cmpq(CPUPPCState
*env
, ppc_vsr_t
*xa
,
2358 ppc_vsr_t
*xb
, int crf_idx
, bool ordered
)
2361 bool vxsnan_flag
= false, vxvc_flag
= false;
2363 helper_reset_fpstatus(env
);
2365 switch (float128_compare(xa
->f128
, xb
->f128
, &env
->fp_status
)) {
2366 case float_relation_less
:
2369 case float_relation_equal
:
2372 case float_relation_greater
:
2375 case float_relation_unordered
:
2378 if (float128_is_signaling_nan(xa
->f128
, &env
->fp_status
) ||
2379 float128_is_signaling_nan(xb
->f128
, &env
->fp_status
)) {
2381 if (fpscr_ve
== 0 && ordered
) {
2384 } else if (float128_is_quiet_nan(xa
->f128
, &env
->fp_status
) ||
2385 float128_is_quiet_nan(xb
->f128
, &env
->fp_status
)) {
2393 g_assert_not_reached();
2396 env
->fpscr
&= ~FP_FPCC
;
2397 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2398 env
->crf
[crf_idx
] = cc
;
2401 float_invalid_op_vxsnan(env
, GETPC());
2404 float_invalid_op_vxvc(env
, 0, GETPC());
2407 do_float_check_status(env
, GETPC());
2410 void helper_xscmpoqp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2413 do_scalar_cmpq(env
, xa
, xb
, BF(opcode
), true);
2416 void helper_xscmpuqp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2419 do_scalar_cmpq(env
, xa
, xb
, BF(opcode
), false);
2423 * VSX_MAX_MIN - VSX floating point maximum/minimum
2424 * name - instruction mnemonic
2425 * op - operation (max or min)
2426 * nels - number of elements (1, 2 or 4)
2427 * tp - type (float32 or float64)
2428 * fld - vsr_t field (VsrD(*) or VsrW(*))
2430 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2431 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
2432 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2434 ppc_vsr_t t = *xt; \
2437 for (i = 0; i < nels; i++) { \
2438 t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
2439 if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2440 tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
2441 float_invalid_op_vxsnan(env, GETPC()); \
2446 do_float_check_status(env, GETPC()); \
2449 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2450 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2451 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2452 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2453 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2454 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2456 #define VSX_MAX_MINC(name, max) \
2457 void helper_##name(CPUPPCState *env, uint32_t opcode, \
2458 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2460 ppc_vsr_t t = *xt; \
2461 bool vxsnan_flag = false, vex_flag = false; \
2463 if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \
2464 float64_is_any_nan(xb->VsrD(0)))) { \
2465 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
2466 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2467 vxsnan_flag = true; \
2469 t.VsrD(0) = xb->VsrD(0); \
2470 } else if ((max && \
2471 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2473 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2474 t.VsrD(0) = xa->VsrD(0); \
2476 t.VsrD(0) = xb->VsrD(0); \
2479 vex_flag = fpscr_ve & vxsnan_flag; \
2480 if (vxsnan_flag) { \
2481 float_invalid_op_vxsnan(env, GETPC()); \
2488 VSX_MAX_MINC(xsmaxcdp, 1);
2489 VSX_MAX_MINC(xsmincdp
, 0);
2491 #define VSX_MAX_MINJ(name, max) \
2492 void helper_##name(CPUPPCState *env, uint32_t opcode, \
2493 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2495 ppc_vsr_t t = *xt; \
2496 bool vxsnan_flag = false, vex_flag = false; \
2498 if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
2499 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
2500 vxsnan_flag = true; \
2502 t.VsrD(0) = xa->VsrD(0); \
2503 } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
2504 if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2505 vxsnan_flag = true; \
2507 t.VsrD(0) = xb->VsrD(0); \
2508 } else if (float64_is_zero(xa->VsrD(0)) && \
2509 float64_is_zero(xb->VsrD(0))) { \
2511 if (!float64_is_neg(xa->VsrD(0)) || \
2512 !float64_is_neg(xb->VsrD(0))) { \
2515 t.VsrD(0) = 0x8000000000000000ULL; \
2518 if (float64_is_neg(xa->VsrD(0)) || \
2519 float64_is_neg(xb->VsrD(0))) { \
2520 t.VsrD(0) = 0x8000000000000000ULL; \
2525 } else if ((max && \
2526 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2528 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2529 t.VsrD(0) = xa->VsrD(0); \
2531 t.VsrD(0) = xb->VsrD(0); \
2534 vex_flag = fpscr_ve & vxsnan_flag; \
2535 if (vxsnan_flag) { \
2536 float_invalid_op_vxsnan(env, GETPC()); \
2543 VSX_MAX_MINJ(xsmaxjdp, 1);
2544 VSX_MAX_MINJ(xsminjdp
, 0);
2547 * VSX_CMP - VSX floating point compare
2548 * op - instruction mnemonic
2549 * nels - number of elements (1, 2 or 4)
2550 * tp - type (float32 or float64)
2551 * fld - vsr_t field (VsrD(*) or VsrW(*))
2552 * cmp - comparison operation
2553 * svxvc - set VXVC bit
2554 * exp - expected result of comparison
2556 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2557 uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2558 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2560 ppc_vsr_t t = *xt; \
2561 uint32_t crf6 = 0; \
2564 int all_false = 1; \
2566 for (i = 0; i < nels; i++) { \
2567 if (unlikely(tp##_is_any_nan(xa->fld) || \
2568 tp##_is_any_nan(xb->fld))) { \
2569 if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2570 tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
2571 float_invalid_op_vxsnan(env, GETPC()); \
2574 float_invalid_op_vxvc(env, 0, GETPC()); \
2579 if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
2590 crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2594 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0, 1)
2595 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1, 1)
2596 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1, 1)
2597 VSX_CMP(xvcmpnedp
, 2, float64
, VsrD(i
), eq
, 0, 0)
2598 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0, 1)
2599 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1, 1)
2600 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1, 1)
2601 VSX_CMP(xvcmpnesp
, 4, float32
, VsrW(i
), eq
, 0, 0)
2604 * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2605 * op - instruction mnemonic
2606 * nels - number of elements (1, 2 or 4)
2607 * stp - source type (float32 or float64)
2608 * ttp - target type (float32 or float64)
2609 * sfld - source vsr_t field
2610 * tfld - target vsr_t field (f32 or f64)
2613 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2614 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2616 ppc_vsr_t t = *xt; \
2619 for (i = 0; i < nels; i++) { \
2620 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2621 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2622 &env->fp_status))) { \
2623 float_invalid_op_vxsnan(env, GETPC()); \
2624 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2627 helper_compute_fprf_##ttp(env, t.tfld); \
2632 do_float_check_status(env, GETPC()); \
2635 VSX_CVT_FP_TO_FP(xscvdpsp
, 1, float64
, float32
, VsrD(0), VsrW(0), 1)
2636 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2637 VSX_CVT_FP_TO_FP(xvcvdpsp
, 2, float64
, float32
, VsrD(i
), VsrW(2 * i
), 0)
2638 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2 * i
), VsrD(i
), 0)
2641 * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2642 * op - instruction mnemonic
2643 * nels - number of elements (1, 2 or 4)
2644 * stp - source type (float32 or float64)
2645 * ttp - target type (float32 or float64)
2646 * sfld - source vsr_t field
2647 * tfld - target vsr_t field (f32 or f64)
2650 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2651 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2652 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2654 ppc_vsr_t t = *xt; \
2657 for (i = 0; i < nels; i++) { \
2658 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2659 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2660 &env->fp_status))) { \
2661 float_invalid_op_vxsnan(env, GETPC()); \
2662 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2665 helper_compute_fprf_##ttp(env, t.tfld); \
2670 do_float_check_status(env, GETPC()); \
2673 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp
, 1, float64
, float128
, VsrD(0), f128
, 1)
2676 * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2677 * involving one half precision value
2678 * op - instruction mnemonic
2679 * nels - number of elements (1, 2 or 4)
2682 * sfld - source vsr_t field
2683 * tfld - target vsr_t field
2686 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2687 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2689 ppc_vsr_t t = { }; \
2692 for (i = 0; i < nels; i++) { \
2693 t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
2694 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2695 &env->fp_status))) { \
2696 float_invalid_op_vxsnan(env, GETPC()); \
2697 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2700 helper_compute_fprf_##ttp(env, t.tfld); \
2705 do_float_check_status(env, GETPC()); \
2708 VSX_CVT_FP_TO_FP_HP(xscvdphp
, 1, float64
, float16
, VsrD(0), VsrH(3), 1)
2709 VSX_CVT_FP_TO_FP_HP(xscvhpdp
, 1, float16
, float64
, VsrH(3), VsrD(0), 1)
2710 VSX_CVT_FP_TO_FP_HP(xvcvsphp
, 4, float32
, float16
, VsrW(i
), VsrH(2 * i
+ 1), 0)
2711 VSX_CVT_FP_TO_FP_HP(xvcvhpsp
, 4, float16
, float32
, VsrH(2 * i
+ 1), VsrW(i
), 0)
2714 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2715 * added to this later.
2717 void helper_xscvqpdp(CPUPPCState
*env
, uint32_t opcode
,
2718 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
2723 tstat
= env
->fp_status
;
2724 if (unlikely(Rc(opcode
) != 0)) {
2725 tstat
.float_rounding_mode
= float_round_to_odd
;
2728 t
.VsrD(0) = float128_to_float64(xb
->f128
, &tstat
);
2729 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2730 if (unlikely(float128_is_signaling_nan(xb
->f128
, &tstat
))) {
2731 float_invalid_op_vxsnan(env
, GETPC());
2732 t
.VsrD(0) = float64_snan_to_qnan(t
.VsrD(0));
2734 helper_compute_fprf_float64(env
, t
.VsrD(0));
2737 do_float_check_status(env
, GETPC());
2740 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2742 uint64_t result
, sign
, exp
, frac
;
2744 float_status tstat
= env
->fp_status
;
2745 set_float_exception_flags(0, &tstat
);
2747 sign
= extract64(xb
, 63, 1);
2748 exp
= extract64(xb
, 52, 11);
2749 frac
= extract64(xb
, 0, 52) | 0x10000000000000ULL
;
2751 if (unlikely(exp
== 0 && extract64(frac
, 0, 52) != 0)) {
2752 /* DP denormal operand. */
2753 /* Exponent override to DP min exp. */
2755 /* Implicit bit override to 0. */
2756 frac
= deposit64(frac
, 53, 1, 0);
2759 if (unlikely(exp
< 897 && frac
!= 0)) {
2760 /* SP tiny operand. */
2761 if (897 - exp
> 63) {
2764 /* Denormalize until exp = SP min exp. */
2765 frac
>>= (897 - exp
);
2767 /* Exponent override to SP min exp - 1. */
2771 result
= sign
<< 31;
2772 result
|= extract64(exp
, 10, 1) << 30;
2773 result
|= extract64(exp
, 0, 7) << 23;
2774 result
|= extract64(frac
, 29, 23);
2776 /* hardware replicates result to both words of the doubleword result. */
2777 return (result
<< 32) | result
;
2780 uint64_t helper_xscvspdpn(CPUPPCState
*env
, uint64_t xb
)
2782 float_status tstat
= env
->fp_status
;
2783 set_float_exception_flags(0, &tstat
);
2785 return float32_to_float64(xb
>> 32, &tstat
);
2789 * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2790 * op - instruction mnemonic
2791 * nels - number of elements (1, 2 or 4)
2792 * stp - source type (float32 or float64)
2793 * ttp - target type (int32, uint32, int64 or uint64)
2794 * sfld - source vsr_t field
2795 * tfld - target vsr_t field
2796 * rnan - resulting NaN
2798 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2799 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2801 int all_flags = env->fp_status.float_exception_flags, flags; \
2802 ppc_vsr_t t = *xt; \
2805 for (i = 0; i < nels; i++) { \
2806 env->fp_status.float_exception_flags = 0; \
2807 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2808 flags = env->fp_status.float_exception_flags; \
2809 if (unlikely(flags & float_flag_invalid)) { \
2810 float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
2813 all_flags |= flags; \
2817 env->fp_status.float_exception_flags = all_flags; \
2818 do_float_check_status(env, GETPC()); \
2821 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), \
2822 0x8000000000000000ULL
)
2823 VSX_CVT_FP_TO_INT(xscvdpsxws
, 1, float64
, int32
, VsrD(0), VsrW(1), \
2825 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), 0ULL)
2826 VSX_CVT_FP_TO_INT(xscvdpuxws
, 1, float64
, uint32
, VsrD(0), VsrW(1), 0U)
2827 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), \
2828 0x8000000000000000ULL
)
2829 VSX_CVT_FP_TO_INT(xvcvdpsxws
, 2, float64
, int32
, VsrD(i
), VsrW(2 * i
), \
2831 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), 0ULL)
2832 VSX_CVT_FP_TO_INT(xvcvdpuxws
, 2, float64
, uint32
, VsrD(i
), VsrW(2 * i
), 0U)
2833 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2 * i
), VsrD(i
), \
2834 0x8000000000000000ULL
)
2835 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), 0x80000000U
)
2836 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2 * i
), VsrD(i
), 0ULL)
2837 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), 0U)
2840 * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2841 * op - instruction mnemonic
2842 * stp - source type (float32 or float64)
2843 * ttp - target type (int32, uint32, int64 or uint64)
2844 * sfld - source vsr_t field
2845 * tfld - target vsr_t field
2846 * rnan - resulting NaN
2848 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
2849 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2850 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2852 ppc_vsr_t t = { }; \
2854 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2855 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2856 float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
2861 do_float_check_status(env, GETPC()); \
2864 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz
, float128
, int64
, f128
, VsrD(0), \
2865 0x8000000000000000ULL
)
2867 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz
, float128
, int32
, f128
, VsrD(0), \
2868 0xffffffff80000000ULL
)
2869 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz
, float128
, uint64
, f128
, VsrD(0), 0x0ULL
)
2870 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz
, float128
, uint32
, f128
, VsrD(0), 0x0ULL
)
2873 * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2874 * op - instruction mnemonic
2875 * nels - number of elements (1, 2 or 4)
2876 * stp - source type (int32, uint32, int64 or uint64)
2877 * ttp - target type (float32 or float64)
2878 * sfld - source vsr_t field
2879 * tfld - target vsr_t field
2880 * jdef - definition of the j index (i or 2*i)
2883 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
2884 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2886 ppc_vsr_t t = *xt; \
2889 for (i = 0; i < nels; i++) { \
2890 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2892 t.tfld = helper_frsp(env, t.tfld); \
2895 helper_compute_fprf_float64(env, t.tfld); \
2900 do_float_check_status(env, GETPC()); \
2903 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
2904 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
2905 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
2906 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
2907 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
2908 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
2909 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
2910 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
2911 VSX_CVT_INT_TO_FP(xvcvsxdsp
, 2, int64
, float32
, VsrD(i
), VsrW(2 * i
), 0, 0)
2912 VSX_CVT_INT_TO_FP(xvcvuxdsp
, 2, uint64
, float32
, VsrD(i
), VsrW(2 * i
), 0, 0)
2913 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
2914 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
2917 * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
2918 * op - instruction mnemonic
2919 * stp - source type (int32, uint32, int64 or uint64)
2920 * ttp - target type (float32 or float64)
2921 * sfld - source vsr_t field
2922 * tfld - target vsr_t field
2924 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
2925 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2926 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2928 ppc_vsr_t t = *xt; \
2930 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2931 helper_compute_fprf_##ttp(env, t.tfld); \
2934 do_float_check_status(env, GETPC()); \
2937 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp
, int64
, float128
, VsrD(0), f128
)
2938 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp
, uint64
, float128
, VsrD(0), f128
)
2941 * For "use current rounding mode", define a value that will not be
2942 * one of the existing rounding model enums.
2944 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
2945 float_round_up + float_round_to_zero)
2948 * VSX_ROUND - VSX floating point round
2949 * op - instruction mnemonic
2950 * nels - number of elements (1, 2 or 4)
2951 * tp - type (float32 or float64)
2952 * fld - vsr_t field (VsrD(*) or VsrW(*))
2953 * rmode - rounding mode
2956 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
2957 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2959 ppc_vsr_t t = *xt; \
2961 FloatRoundMode curr_rounding_mode; \
2963 if (rmode != FLOAT_ROUND_CURRENT) { \
2964 curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
2965 set_float_rounding_mode(rmode, &env->fp_status); \
2968 for (i = 0; i < nels; i++) { \
2969 if (unlikely(tp##_is_signaling_nan(xb->fld, \
2970 &env->fp_status))) { \
2971 float_invalid_op_vxsnan(env, GETPC()); \
2972 t.fld = tp##_snan_to_qnan(xb->fld); \
2974 t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
2977 helper_compute_fprf_float64(env, t.fld); \
2982 * If this is not a "use current rounding mode" instruction, \
2983 * then inhibit setting of the XX bit and restore rounding \
2986 if (rmode != FLOAT_ROUND_CURRENT) { \
2987 set_float_rounding_mode(curr_rounding_mode, &env->fp_status); \
2988 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
2992 do_float_check_status(env, GETPC()); \
2995 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_ties_away
, 1)
2996 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
2997 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
2998 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
2999 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
3001 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_ties_away
, 0)
3002 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
3003 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
3004 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
3005 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
3007 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_ties_away
, 0)
3008 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
3009 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
3010 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
3011 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
3013 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
3015 helper_reset_fpstatus(env
);
3017 uint64_t xt
= helper_frsp(env
, xb
);
3019 helper_compute_fprf_float64(env
, xt
);
3020 do_float_check_status(env
, GETPC());
3024 #define VSX_XXPERM(op, indexed) \
3025 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
3026 ppc_vsr_t *xa, ppc_vsr_t *pcv) \
3028 ppc_vsr_t t = *xt; \
3031 for (i = 0; i < 16; i++) { \
3032 idx = pcv->VsrB(i) & 0x1F; \
3036 t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \
3037 : xt->VsrB(idx - 16); \
3042 VSX_XXPERM(xxperm
, 0)
3043 VSX_XXPERM(xxpermr
, 1)
3045 void helper_xvxsigsp(CPUPPCState
*env
, ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3048 uint32_t exp
, i
, fraction
;
3050 for (i
= 0; i
< 4; i
++) {
3051 exp
= (xb
->VsrW(i
) >> 23) & 0xFF;
3052 fraction
= xb
->VsrW(i
) & 0x7FFFFF;
3053 if (exp
!= 0 && exp
!= 255) {
3054 t
.VsrW(i
) = fraction
| 0x00800000;
3056 t
.VsrW(i
) = fraction
;
3063 * VSX_TEST_DC - VSX floating point test data class
3064 * op - instruction mnemonic
3065 * nels - number of elements (1, 2 or 4)
3066 * xbn - VSR register number
3067 * tp - type (float32 or float64)
3068 * fld - vsr_t field (VsrD(*) or VsrW(*))
3069 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3070 * fld_max - target field max
3071 * scrf - set result in CR and FPCC
3073 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3074 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3076 ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \
3077 ppc_vsr_t *xb = &env->vsr[xbn]; \
3078 ppc_vsr_t t = { }; \
3079 uint32_t i, sign, dcmx; \
3080 uint32_t cc, match = 0; \
3083 dcmx = DCMX_XV(opcode); \
3086 dcmx = DCMX(opcode); \
3089 for (i = 0; i < nels; i++) { \
3090 sign = tp##_is_neg(xb->fld); \
3091 if (tp##_is_any_nan(xb->fld)) { \
3092 match = extract32(dcmx, 6, 1); \
3093 } else if (tp##_is_infinity(xb->fld)) { \
3094 match = extract32(dcmx, 4 + !sign, 1); \
3095 } else if (tp##_is_zero(xb->fld)) { \
3096 match = extract32(dcmx, 2 + !sign, 1); \
3097 } else if (tp##_is_zero_or_denormal(xb->fld)) { \
3098 match = extract32(dcmx, 0 + !sign, 1); \
3102 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3103 env->fpscr &= ~FP_FPCC; \
3104 env->fpscr |= cc << FPSCR_FPCC; \
3105 env->crf[BF(opcode)] = cc; \
3107 t.tfld = match ? fld_max : 0; \
3116 VSX_TEST_DC(xvtstdcdp
, 2, xB(opcode
), float64
, VsrD(i
), VsrD(i
), UINT64_MAX
, 0)
3117 VSX_TEST_DC(xvtstdcsp
, 4, xB(opcode
), float32
, VsrW(i
), VsrW(i
), UINT32_MAX
, 0)
3118 VSX_TEST_DC(xststdcdp
, 1, xB(opcode
), float64
, VsrD(0), VsrD(0), 0, 1)
3119 VSX_TEST_DC(xststdcqp
, 1, (rB(opcode
) + 32), float128
, f128
, VsrD(0), 0, 1)
3121 void helper_xststdcsp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xb
)
3123 uint32_t dcmx
, sign
, exp
;
3124 uint32_t cc
, match
= 0, not_sp
= 0;
3126 dcmx
= DCMX(opcode
);
3127 exp
= (xb
->VsrD(0) >> 52) & 0x7FF;
3129 sign
= float64_is_neg(xb
->VsrD(0));
3130 if (float64_is_any_nan(xb
->VsrD(0))) {
3131 match
= extract32(dcmx
, 6, 1);
3132 } else if (float64_is_infinity(xb
->VsrD(0))) {
3133 match
= extract32(dcmx
, 4 + !sign
, 1);
3134 } else if (float64_is_zero(xb
->VsrD(0))) {
3135 match
= extract32(dcmx
, 2 + !sign
, 1);
3136 } else if (float64_is_zero_or_denormal(xb
->VsrD(0)) ||
3137 (exp
> 0 && exp
< 0x381)) {
3138 match
= extract32(dcmx
, 0 + !sign
, 1);
3141 not_sp
= !float64_eq(xb
->VsrD(0),
3143 float64_to_float32(xb
->VsrD(0), &env
->fp_status
),
3144 &env
->fp_status
), &env
->fp_status
);
3146 cc
= sign
<< CRF_LT_BIT
| match
<< CRF_EQ_BIT
| not_sp
<< CRF_SO_BIT
;
3147 env
->fpscr
&= ~FP_FPCC
;
3148 env
->fpscr
|= cc
<< FPSCR_FPCC
;
3149 env
->crf
[BF(opcode
)] = cc
;
3152 void helper_xsrqpi(CPUPPCState
*env
, uint32_t opcode
,
3153 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3156 uint8_t r
= Rrm(opcode
);
3157 uint8_t ex
= Rc(opcode
);
3158 uint8_t rmc
= RMC(opcode
);
3162 helper_reset_fpstatus(env
);
3164 if (r
== 0 && rmc
== 0) {
3165 rmode
= float_round_ties_away
;
3166 } else if (r
== 0 && rmc
== 0x3) {
3168 } else if (r
== 1) {
3171 rmode
= float_round_nearest_even
;
3174 rmode
= float_round_to_zero
;
3177 rmode
= float_round_up
;
3180 rmode
= float_round_down
;
3187 tstat
= env
->fp_status
;
3188 set_float_exception_flags(0, &tstat
);
3189 set_float_rounding_mode(rmode
, &tstat
);
3190 t
.f128
= float128_round_to_int(xb
->f128
, &tstat
);
3191 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3193 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3194 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3195 float_invalid_op_vxsnan(env
, GETPC());
3196 t
.f128
= float128_snan_to_qnan(t
.f128
);
3200 if (ex
== 0 && (tstat
.float_exception_flags
& float_flag_inexact
)) {
3201 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
3204 helper_compute_fprf_float128(env
, t
.f128
);
3205 do_float_check_status(env
, GETPC());
3209 void helper_xsrqpxp(CPUPPCState
*env
, uint32_t opcode
,
3210 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3213 uint8_t r
= Rrm(opcode
);
3214 uint8_t rmc
= RMC(opcode
);
3219 helper_reset_fpstatus(env
);
3221 if (r
== 0 && rmc
== 0) {
3222 rmode
= float_round_ties_away
;
3223 } else if (r
== 0 && rmc
== 0x3) {
3225 } else if (r
== 1) {
3228 rmode
= float_round_nearest_even
;
3231 rmode
= float_round_to_zero
;
3234 rmode
= float_round_up
;
3237 rmode
= float_round_down
;
3244 tstat
= env
->fp_status
;
3245 set_float_exception_flags(0, &tstat
);
3246 set_float_rounding_mode(rmode
, &tstat
);
3247 round_res
= float128_to_floatx80(xb
->f128
, &tstat
);
3248 t
.f128
= floatx80_to_float128(round_res
, &tstat
);
3249 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3251 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3252 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3253 float_invalid_op_vxsnan(env
, GETPC());
3254 t
.f128
= float128_snan_to_qnan(t
.f128
);
3258 helper_compute_fprf_float128(env
, t
.f128
);
3260 do_float_check_status(env
, GETPC());
3263 void helper_xssqrtqp(CPUPPCState
*env
, uint32_t opcode
,
3264 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3269 helper_reset_fpstatus(env
);
3271 tstat
= env
->fp_status
;
3272 if (unlikely(Rc(opcode
) != 0)) {
3273 tstat
.float_rounding_mode
= float_round_to_odd
;
3276 set_float_exception_flags(0, &tstat
);
3277 t
.f128
= float128_sqrt(xb
->f128
, &tstat
);
3278 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3280 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3281 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3282 float_invalid_op_vxsnan(env
, GETPC());
3283 t
.f128
= float128_snan_to_qnan(xb
->f128
);
3284 } else if (float128_is_quiet_nan(xb
->f128
, &tstat
)) {
3286 } else if (float128_is_neg(xb
->f128
) && !float128_is_zero(xb
->f128
)) {
3287 float_invalid_op_vxsqrt(env
, 1, GETPC());
3288 t
.f128
= float128_default_nan(&env
->fp_status
);
3292 helper_compute_fprf_float128(env
, t
.f128
);
3294 do_float_check_status(env
, GETPC());
3297 void helper_xssubqp(CPUPPCState
*env
, uint32_t opcode
,
3298 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
3303 helper_reset_fpstatus(env
);
3305 tstat
= env
->fp_status
;
3306 if (unlikely(Rc(opcode
) != 0)) {
3307 tstat
.float_rounding_mode
= float_round_to_odd
;
3310 set_float_exception_flags(0, &tstat
);
3311 t
.f128
= float128_sub(xa
->f128
, xb
->f128
, &tstat
);
3312 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3314 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3315 float_invalid_op_addsub(env
, tstat
.float_exception_flags
, 1, GETPC());
3318 helper_compute_fprf_float128(env
, t
.f128
);
3320 do_float_check_status(env
, GETPC());