2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
26 static inline float128
float128_snan_to_qnan(float128 x
)
30 r
.high
= x
.high
| 0x0000800000000000;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 static inline bool fp_exceptions_enabled(CPUPPCState
*env
)
41 #ifdef CONFIG_USER_ONLY
44 return (env
->msr
& ((1U << MSR_FE0
) | (1U << MSR_FE1
))) != 0;
48 /*****************************************************************************/
49 /* Floating point operations helpers */
52 * This is the non-arithmatic conversion that happens e.g. on loads.
53 * In the Power ISA pseudocode, this is called DOUBLE.
55 uint64_t helper_todouble(uint32_t arg
)
57 uint32_t abs_arg
= arg
& 0x7fffffff;
60 if (likely(abs_arg
>= 0x00800000)) {
61 if (unlikely(extract32(arg
, 23, 8) == 0xff)) {
63 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
64 ret
|= (uint64_t)0x7ff << 52;
65 ret
|= (uint64_t)extract32(arg
, 0, 23) << 29;
67 /* Normalized operand. */
68 ret
= (uint64_t)extract32(arg
, 30, 2) << 62;
69 ret
|= ((extract32(arg
, 30, 1) ^ 1) * (uint64_t)7) << 59;
70 ret
|= (uint64_t)extract32(arg
, 0, 30) << 29;
73 /* Zero or Denormalized operand. */
74 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
75 if (unlikely(abs_arg
!= 0)) {
77 * Denormalized operand.
78 * Shift fraction so that the msb is in the implicit bit position.
79 * Thus, shift is in the range [1:23].
81 int shift
= clz32(abs_arg
) - 8;
83 * The first 3 terms compute the float64 exponent. We then bias
84 * this result by -1 so that we can swallow the implicit bit below.
86 int exp
= -126 - shift
+ 1023 - 1;
88 ret
|= (uint64_t)exp
<< 52;
89 ret
+= (uint64_t)abs_arg
<< (52 - 23 + shift
);
96 * This is the non-arithmatic conversion that happens e.g. on stores.
97 * In the Power ISA pseudocode, this is called SINGLE.
99 uint32_t helper_tosingle(uint64_t arg
)
101 int exp
= extract64(arg
, 52, 11);
104 if (likely(exp
> 896)) {
105 /* No denormalization required (includes Inf, NaN). */
106 ret
= extract64(arg
, 62, 2) << 30;
107 ret
|= extract64(arg
, 29, 30);
110 * Zero or Denormal result. If the exponent is in bounds for
111 * a single-precision denormal result, extract the proper
112 * bits. If the input is not zero, and the exponent is out of
113 * bounds, then the result is undefined; this underflows to
116 ret
= extract64(arg
, 63, 1) << 31;
117 if (unlikely(exp
>= 874)) {
118 /* Denormal result. */
119 ret
|= ((1ULL << 52) | extract64(arg
, 0, 52)) >> (896 + 30 - exp
);
125 static inline int ppc_float32_get_unbiased_exp(float32 f
)
127 return ((f
>> 23) & 0xFF) - 127;
130 static inline int ppc_float64_get_unbiased_exp(float64 f
)
132 return ((f
>> 52) & 0x7FF) - 1023;
135 /* Classify a floating-point number. */
146 #define COMPUTE_CLASS(tp) \
147 static int tp##_classify(tp arg) \
149 int ret = tp##_is_neg(arg) * is_neg; \
150 if (unlikely(tp##_is_any_nan(arg))) { \
151 float_status dummy = { }; /* snan_bit_is_one = 0 */ \
152 ret |= (tp##_is_signaling_nan(arg, &dummy) \
153 ? is_snan : is_qnan); \
154 } else if (unlikely(tp##_is_infinity(arg))) { \
156 } else if (tp##_is_zero(arg)) { \
158 } else if (tp##_is_zero_or_denormal(arg)) { \
159 ret |= is_denormal; \
166 COMPUTE_CLASS(float16
)
167 COMPUTE_CLASS(float32
)
168 COMPUTE_CLASS(float64
)
169 COMPUTE_CLASS(float128
)
171 static void set_fprf_from_class(CPUPPCState
*env
, int class)
173 static const uint8_t fprf
[6][2] = {
174 { 0x04, 0x08 }, /* normalized */
175 { 0x02, 0x12 }, /* zero */
176 { 0x14, 0x18 }, /* denormalized */
177 { 0x05, 0x09 }, /* infinity */
178 { 0x11, 0x11 }, /* qnan */
179 { 0x00, 0x00 }, /* snan -- flags are undefined */
181 bool isneg
= class & is_neg
;
183 env
->fpscr
&= ~FP_FPRF
;
184 env
->fpscr
|= fprf
[ctz32(class)][isneg
] << FPSCR_FPRF
;
187 #define COMPUTE_FPRF(tp) \
188 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
190 set_fprf_from_class(env, tp##_classify(arg)); \
193 COMPUTE_FPRF(float16
)
194 COMPUTE_FPRF(float32
)
195 COMPUTE_FPRF(float64
)
196 COMPUTE_FPRF(float128
)
198 /* Floating-point invalid operations exception */
199 static void finish_invalid_op_excp(CPUPPCState
*env
, int op
, uintptr_t retaddr
)
201 /* Update the floating-point invalid operation summary */
203 /* Update the floating-point exception summary */
206 /* Update the floating-point enabled exception summary */
207 env
->fpscr
|= FP_FEX
;
208 if (fp_exceptions_enabled(env
)) {
209 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
210 POWERPC_EXCP_FP
| op
, retaddr
);
215 static void finish_invalid_op_arith(CPUPPCState
*env
, int op
,
216 bool set_fpcc
, uintptr_t retaddr
)
218 env
->fpscr
&= ~(FP_FR
| FP_FI
);
221 env
->fpscr
&= ~FP_FPCC
;
222 env
->fpscr
|= (FP_C
| FP_FU
);
225 finish_invalid_op_excp(env
, op
, retaddr
);
229 static void float_invalid_op_vxsnan(CPUPPCState
*env
, uintptr_t retaddr
)
231 env
->fpscr
|= FP_VXSNAN
;
232 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, retaddr
);
235 /* Magnitude subtraction of infinities */
236 static void float_invalid_op_vxisi(CPUPPCState
*env
, bool set_fpcc
,
239 env
->fpscr
|= FP_VXISI
;
240 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXISI
, set_fpcc
, retaddr
);
243 /* Division of infinity by infinity */
244 static void float_invalid_op_vxidi(CPUPPCState
*env
, bool set_fpcc
,
247 env
->fpscr
|= FP_VXIDI
;
248 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIDI
, set_fpcc
, retaddr
);
251 /* Division of zero by zero */
252 static void float_invalid_op_vxzdz(CPUPPCState
*env
, bool set_fpcc
,
255 env
->fpscr
|= FP_VXZDZ
;
256 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXZDZ
, set_fpcc
, retaddr
);
259 /* Multiplication of zero by infinity */
260 static void float_invalid_op_vximz(CPUPPCState
*env
, bool set_fpcc
,
263 env
->fpscr
|= FP_VXIMZ
;
264 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIMZ
, set_fpcc
, retaddr
);
267 /* Square root of a negative number */
268 static void float_invalid_op_vxsqrt(CPUPPCState
*env
, bool set_fpcc
,
271 env
->fpscr
|= FP_VXSQRT
;
272 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXSQRT
, set_fpcc
, retaddr
);
275 /* Ordered comparison of NaN */
276 static void float_invalid_op_vxvc(CPUPPCState
*env
, bool set_fpcc
,
279 env
->fpscr
|= FP_VXVC
;
281 env
->fpscr
&= ~FP_FPCC
;
282 env
->fpscr
|= (FP_C
| FP_FU
);
284 /* Update the floating-point invalid operation summary */
286 /* Update the floating-point exception summary */
288 /* We must update the target FPR before raising the exception */
290 CPUState
*cs
= env_cpu(env
);
292 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
293 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
294 /* Update the floating-point enabled exception summary */
295 env
->fpscr
|= FP_FEX
;
296 /* Exception is deferred */
300 /* Invalid conversion */
301 static void float_invalid_op_vxcvi(CPUPPCState
*env
, bool set_fpcc
,
304 env
->fpscr
|= FP_VXCVI
;
305 env
->fpscr
&= ~(FP_FR
| FP_FI
);
308 env
->fpscr
&= ~FP_FPCC
;
309 env
->fpscr
|= (FP_C
| FP_FU
);
312 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
, retaddr
);
315 static inline void float_zero_divide_excp(CPUPPCState
*env
, uintptr_t raddr
)
318 env
->fpscr
&= ~(FP_FR
| FP_FI
);
319 /* Update the floating-point exception summary */
322 /* Update the floating-point enabled exception summary */
323 env
->fpscr
|= FP_FEX
;
324 if (fp_exceptions_enabled(env
)) {
325 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
326 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
,
332 static inline void float_overflow_excp(CPUPPCState
*env
)
334 CPUState
*cs
= env_cpu(env
);
337 /* Update the floating-point exception summary */
340 /* XXX: should adjust the result */
341 /* Update the floating-point enabled exception summary */
342 env
->fpscr
|= FP_FEX
;
343 /* We must update the target FPR before raising the exception */
344 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
345 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
352 static inline void float_underflow_excp(CPUPPCState
*env
)
354 CPUState
*cs
= env_cpu(env
);
357 /* Update the floating-point exception summary */
360 /* XXX: should adjust the result */
361 /* Update the floating-point enabled exception summary */
362 env
->fpscr
|= FP_FEX
;
363 /* We must update the target FPR before raising the exception */
364 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
365 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
369 static inline void float_inexact_excp(CPUPPCState
*env
)
371 CPUState
*cs
= env_cpu(env
);
375 /* Update the floating-point exception summary */
378 /* Update the floating-point enabled exception summary */
379 env
->fpscr
|= FP_FEX
;
380 /* We must update the target FPR before raising the exception */
381 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
382 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
386 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
388 uint32_t mask
= 1u << bit
;
389 if (env
->fpscr
& mask
) {
390 ppc_store_fpscr(env
, env
->fpscr
& ~(target_ulong
)mask
);
394 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
396 uint32_t mask
= 1u << bit
;
397 if (!(env
->fpscr
& mask
)) {
398 ppc_store_fpscr(env
, env
->fpscr
| mask
);
402 void helper_store_fpscr(CPUPPCState
*env
, uint64_t val
, uint32_t nibbles
)
404 target_ulong mask
= 0;
407 /* TODO: push this extension back to translation time */
408 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
409 if (nibbles
& (1 << i
)) {
410 mask
|= (target_ulong
) 0xf << (4 * i
);
413 val
= (val
& mask
) | (env
->fpscr
& ~mask
);
414 ppc_store_fpscr(env
, val
);
417 void helper_fpscr_check_status(CPUPPCState
*env
)
419 CPUState
*cs
= env_cpu(env
);
420 target_ulong fpscr
= env
->fpscr
;
423 if ((fpscr
& FP_OX
) && (fpscr
& FP_OE
)) {
424 error
= POWERPC_EXCP_FP_OX
;
425 } else if ((fpscr
& FP_UX
) && (fpscr
& FP_UE
)) {
426 error
= POWERPC_EXCP_FP_UX
;
427 } else if ((fpscr
& FP_XX
) && (fpscr
& FP_XE
)) {
428 error
= POWERPC_EXCP_FP_XX
;
429 } else if ((fpscr
& FP_ZX
) && (fpscr
& FP_ZE
)) {
430 error
= POWERPC_EXCP_FP_ZX
;
431 } else if (fpscr
& FP_VE
) {
432 if (fpscr
& FP_VXSOFT
) {
433 error
= POWERPC_EXCP_FP_VXSOFT
;
434 } else if (fpscr
& FP_VXSNAN
) {
435 error
= POWERPC_EXCP_FP_VXSNAN
;
436 } else if (fpscr
& FP_VXISI
) {
437 error
= POWERPC_EXCP_FP_VXISI
;
438 } else if (fpscr
& FP_VXIDI
) {
439 error
= POWERPC_EXCP_FP_VXIDI
;
440 } else if (fpscr
& FP_VXZDZ
) {
441 error
= POWERPC_EXCP_FP_VXZDZ
;
442 } else if (fpscr
& FP_VXIMZ
) {
443 error
= POWERPC_EXCP_FP_VXIMZ
;
444 } else if (fpscr
& FP_VXVC
) {
445 error
= POWERPC_EXCP_FP_VXVC
;
446 } else if (fpscr
& FP_VXSQRT
) {
447 error
= POWERPC_EXCP_FP_VXSQRT
;
448 } else if (fpscr
& FP_VXCVI
) {
449 error
= POWERPC_EXCP_FP_VXCVI
;
456 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
457 env
->error_code
= error
| POWERPC_EXCP_FP
;
458 /* Deferred floating-point exception after target FPSCR update */
459 if (fp_exceptions_enabled(env
)) {
460 raise_exception_err_ra(env
, cs
->exception_index
,
461 env
->error_code
, GETPC());
465 static void do_float_check_status(CPUPPCState
*env
, uintptr_t raddr
)
467 CPUState
*cs
= env_cpu(env
);
468 int status
= get_float_exception_flags(&env
->fp_status
);
470 if (status
& float_flag_overflow
) {
471 float_overflow_excp(env
);
472 } else if (status
& float_flag_underflow
) {
473 float_underflow_excp(env
);
475 if (status
& float_flag_inexact
) {
476 float_inexact_excp(env
);
478 env
->fpscr
&= ~FP_FI
; /* clear the FPSCR[FI] bit */
481 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
482 (env
->error_code
& POWERPC_EXCP_FP
)) {
483 /* Deferred floating-point exception after target FPR update */
484 if (fp_exceptions_enabled(env
)) {
485 raise_exception_err_ra(env
, cs
->exception_index
,
486 env
->error_code
, raddr
);
491 void helper_float_check_status(CPUPPCState
*env
)
493 do_float_check_status(env
, GETPC());
496 void helper_reset_fpstatus(CPUPPCState
*env
)
498 set_float_exception_flags(0, &env
->fp_status
);
501 static void float_invalid_op_addsub(CPUPPCState
*env
, int flags
,
502 bool set_fpcc
, uintptr_t retaddr
)
504 if (flags
& float_flag_invalid_isi
) {
505 float_invalid_op_vxisi(env
, set_fpcc
, retaddr
);
506 } else if (flags
& float_flag_invalid_snan
) {
507 float_invalid_op_vxsnan(env
, retaddr
);
512 float64
helper_fadd(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
514 float64 ret
= float64_add(arg1
, arg2
, &env
->fp_status
);
515 int flags
= get_float_exception_flags(&env
->fp_status
);
517 if (unlikely(flags
& float_flag_invalid
)) {
518 float_invalid_op_addsub(env
, flags
, 1, GETPC());
525 float64
helper_fsub(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
527 float64 ret
= float64_sub(arg1
, arg2
, &env
->fp_status
);
528 int flags
= get_float_exception_flags(&env
->fp_status
);
530 if (unlikely(flags
& float_flag_invalid
)) {
531 float_invalid_op_addsub(env
, flags
, 1, GETPC());
537 static void float_invalid_op_mul(CPUPPCState
*env
, int flags
,
538 bool set_fprc
, uintptr_t retaddr
)
540 if (flags
& float_flag_invalid_imz
) {
541 float_invalid_op_vximz(env
, set_fprc
, retaddr
);
542 } else if (flags
& float_flag_invalid_snan
) {
543 float_invalid_op_vxsnan(env
, retaddr
);
548 float64
helper_fmul(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
550 float64 ret
= float64_mul(arg1
, arg2
, &env
->fp_status
);
551 int flags
= get_float_exception_flags(&env
->fp_status
);
553 if (unlikely(flags
& float_flag_invalid
)) {
554 float_invalid_op_mul(env
, flags
, 1, GETPC());
560 static void float_invalid_op_div(CPUPPCState
*env
, int flags
,
561 bool set_fprc
, uintptr_t retaddr
)
563 if (flags
& float_flag_invalid_idi
) {
564 float_invalid_op_vxidi(env
, set_fprc
, retaddr
);
565 } else if (flags
& float_flag_invalid_zdz
) {
566 float_invalid_op_vxzdz(env
, set_fprc
, retaddr
);
567 } else if (flags
& float_flag_invalid_snan
) {
568 float_invalid_op_vxsnan(env
, retaddr
);
573 float64
helper_fdiv(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
575 float64 ret
= float64_div(arg1
, arg2
, &env
->fp_status
);
576 int flags
= get_float_exception_flags(&env
->fp_status
);
578 if (unlikely(flags
& float_flag_invalid
)) {
579 float_invalid_op_div(env
, flags
, 1, GETPC());
581 if (unlikely(flags
& float_flag_divbyzero
)) {
582 float_zero_divide_excp(env
, GETPC());
588 static uint64_t float_invalid_cvt(CPUPPCState
*env
, int flags
,
589 uint64_t ret
, uint64_t ret_nan
,
590 bool set_fprc
, uintptr_t retaddr
)
593 * VXCVI is different from most in that it sets two exception bits,
594 * VXCVI and VXSNAN for an SNaN input.
596 if (flags
& float_flag_invalid_snan
) {
597 env
->fpscr
|= FP_VXSNAN
;
599 float_invalid_op_vxcvi(env
, set_fprc
, retaddr
);
601 return flags
& float_flag_invalid_cvti
? ret
: ret_nan
;
604 #define FPU_FCTI(op, cvt, nanval) \
605 uint64_t helper_##op(CPUPPCState *env, float64 arg) \
607 uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \
608 int flags = get_float_exception_flags(&env->fp_status); \
609 if (unlikely(flags & float_flag_invalid)) { \
610 ret = float_invalid_cvt(env, flags, ret, nanval, 1, GETPC()); \
615 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
616 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
617 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
618 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
619 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
620 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
621 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
622 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
624 #define FPU_FCFI(op, cvtr, is_single) \
625 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
630 float32 tmp = cvtr(arg, &env->fp_status); \
631 farg.d = float32_to_float64(tmp, &env->fp_status); \
633 farg.d = cvtr(arg, &env->fp_status); \
635 do_float_check_status(env, GETPC()); \
639 FPU_FCFI(fcfid
, int64_to_float64
, 0)
640 FPU_FCFI(fcfids
, int64_to_float32
, 1)
641 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
642 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
644 static uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
645 FloatRoundMode rounding_mode
)
647 FloatRoundMode old_rounding_mode
= get_float_rounding_mode(&env
->fp_status
);
650 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
651 arg
= float64_round_to_int(arg
, &env
->fp_status
);
652 set_float_rounding_mode(old_rounding_mode
, &env
->fp_status
);
654 flags
= get_float_exception_flags(&env
->fp_status
);
655 if (flags
& float_flag_invalid_snan
) {
656 float_invalid_op_vxsnan(env
, GETPC());
659 /* fri* does not set FPSCR[XX] */
660 set_float_exception_flags(flags
& ~float_flag_inexact
, &env
->fp_status
);
661 do_float_check_status(env
, GETPC());
666 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
668 return do_fri(env
, arg
, float_round_ties_away
);
671 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
673 return do_fri(env
, arg
, float_round_to_zero
);
676 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
678 return do_fri(env
, arg
, float_round_up
);
681 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
683 return do_fri(env
, arg
, float_round_down
);
686 static void float_invalid_op_madd(CPUPPCState
*env
, int flags
,
687 bool set_fpcc
, uintptr_t retaddr
)
689 if (flags
& float_flag_invalid_imz
) {
690 float_invalid_op_vximz(env
, set_fpcc
, retaddr
);
692 float_invalid_op_addsub(env
, flags
, set_fpcc
, retaddr
);
696 static float64
do_fmadd(CPUPPCState
*env
, float64 a
, float64 b
,
697 float64 c
, int madd_flags
, uintptr_t retaddr
)
699 float64 ret
= float64_muladd(a
, b
, c
, madd_flags
, &env
->fp_status
);
700 int flags
= get_float_exception_flags(&env
->fp_status
);
702 if (unlikely(flags
& float_flag_invalid
)) {
703 float_invalid_op_madd(env
, flags
, 1, retaddr
);
708 #define FPU_FMADD(op, madd_flags) \
709 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
710 uint64_t arg2, uint64_t arg3) \
711 { return do_fmadd(env, arg1, arg2, arg3, madd_flags, GETPC()); }
714 #define MSUB_FLGS float_muladd_negate_c
715 #define NMADD_FLGS float_muladd_negate_result
716 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
718 FPU_FMADD(fmadd
, MADD_FLGS
)
719 FPU_FMADD(fnmadd
, NMADD_FLGS
)
720 FPU_FMADD(fmsub
, MSUB_FLGS
)
721 FPU_FMADD(fnmsub
, NMSUB_FLGS
)
724 static uint64_t do_frsp(CPUPPCState
*env
, uint64_t arg
, uintptr_t retaddr
)
726 float32 f32
= float64_to_float32(arg
, &env
->fp_status
);
727 int flags
= get_float_exception_flags(&env
->fp_status
);
729 if (unlikely(flags
& float_flag_invalid_snan
)) {
730 float_invalid_op_vxsnan(env
, retaddr
);
732 return helper_todouble(f32
);
735 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
737 return do_frsp(env
, arg
, GETPC());
740 static void float_invalid_op_sqrt(CPUPPCState
*env
, int flags
,
741 bool set_fpcc
, uintptr_t retaddr
)
743 if (unlikely(flags
& float_flag_invalid_sqrt
)) {
744 float_invalid_op_vxsqrt(env
, set_fpcc
, retaddr
);
745 } else if (unlikely(flags
& float_flag_invalid_snan
)) {
746 float_invalid_op_vxsnan(env
, retaddr
);
751 float64
helper_fsqrt(CPUPPCState
*env
, float64 arg
)
753 float64 ret
= float64_sqrt(arg
, &env
->fp_status
);
754 int flags
= get_float_exception_flags(&env
->fp_status
);
756 if (unlikely(flags
& float_flag_invalid
)) {
757 float_invalid_op_sqrt(env
, flags
, 1, GETPC());
764 float64
helper_fre(CPUPPCState
*env
, float64 arg
)
766 /* "Estimate" the reciprocal with actual division. */
767 float64 ret
= float64_div(float64_one
, arg
, &env
->fp_status
);
768 int status
= get_float_exception_flags(&env
->fp_status
);
770 if (unlikely(status
)) {
771 if (status
& float_flag_invalid
) {
772 if (float64_is_signaling_nan(arg
, &env
->fp_status
)) {
773 /* sNaN reciprocal */
774 float_invalid_op_vxsnan(env
, GETPC());
777 if (status
& float_flag_divbyzero
) {
778 float_zero_divide_excp(env
, GETPC());
779 /* For FPSCR.ZE == 0, the result is 1/2. */
780 ret
= float64_set_sign(float64_half
, float64_is_neg(arg
));
788 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
795 if (unlikely(float64_is_signaling_nan(farg
.d
, &env
->fp_status
))) {
796 /* sNaN reciprocal */
797 float_invalid_op_vxsnan(env
, GETPC());
799 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
800 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
801 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
806 /* frsqrte - frsqrte. */
807 float64
helper_frsqrte(CPUPPCState
*env
, float64 arg
)
809 /* "Estimate" the reciprocal with actual division. */
810 float64 rets
= float64_sqrt(arg
, &env
->fp_status
);
811 float64 retd
= float64_div(float64_one
, rets
, &env
->fp_status
);
812 int flags
= get_float_exception_flags(&env
->fp_status
);
814 if (unlikely(flags
& float_flag_invalid
)) {
815 float_invalid_op_sqrt(env
, flags
, 1, GETPC());
817 if (unlikely(flags
& float_flag_divbyzero
)) {
818 /* Reciprocal of (square root of) zero. */
819 float_zero_divide_excp(env
, GETPC());
826 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
833 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
834 !float64_is_any_nan(farg1
.d
)) {
841 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
846 if (unlikely(float64_is_infinity(fra
) ||
847 float64_is_infinity(frb
) ||
848 float64_is_zero(frb
))) {
852 int e_a
= ppc_float64_get_unbiased_exp(fra
);
853 int e_b
= ppc_float64_get_unbiased_exp(frb
);
855 if (unlikely(float64_is_any_nan(fra
) ||
856 float64_is_any_nan(frb
))) {
858 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
860 } else if (!float64_is_zero(fra
) &&
861 (((e_a
- e_b
) >= 1023) ||
862 ((e_a
- e_b
) <= -1021) ||
867 if (unlikely(float64_is_zero_or_denormal(frb
))) {
868 /* XB is not zero because of the above check and */
869 /* so must be denormalized. */
874 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
877 uint32_t helper_ftsqrt(uint64_t frb
)
882 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
886 int e_b
= ppc_float64_get_unbiased_exp(frb
);
888 if (unlikely(float64_is_any_nan(frb
))) {
890 } else if (unlikely(float64_is_zero(frb
))) {
892 } else if (unlikely(float64_is_neg(frb
))) {
894 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022 + 52))) {
898 if (unlikely(float64_is_zero_or_denormal(frb
))) {
899 /* XB is not zero because of the above check and */
900 /* therefore must be denormalized. */
905 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
908 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
911 CPU_DoubleU farg1
, farg2
;
917 if (unlikely(float64_is_any_nan(farg1
.d
) ||
918 float64_is_any_nan(farg2
.d
))) {
920 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
922 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
928 env
->fpscr
&= ~FP_FPCC
;
929 env
->fpscr
|= ret
<< FPSCR_FPCC
;
930 env
->crf
[crfD
] = ret
;
931 if (unlikely(ret
== 0x01UL
932 && (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
933 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)))) {
934 /* sNaN comparison */
935 float_invalid_op_vxsnan(env
, GETPC());
939 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
942 CPU_DoubleU farg1
, farg2
;
948 if (unlikely(float64_is_any_nan(farg1
.d
) ||
949 float64_is_any_nan(farg2
.d
))) {
951 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
953 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
959 env
->fpscr
&= ~FP_FPCC
;
960 env
->fpscr
|= ret
<< FPSCR_FPCC
;
961 env
->crf
[crfD
] = (uint32_t) ret
;
962 if (unlikely(ret
== 0x01UL
)) {
963 float_invalid_op_vxvc(env
, 1, GETPC());
964 if (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
965 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)) {
966 /* sNaN comparison */
967 float_invalid_op_vxsnan(env
, GETPC());
972 /* Single-precision floating-point conversions */
973 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
977 u
.f
= int32_to_float32(val
, &env
->vec_status
);
982 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
986 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
991 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
996 /* NaN are not treated the same way IEEE 754 does */
997 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1001 return float32_to_int32(u
.f
, &env
->vec_status
);
1004 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1009 /* NaN are not treated the same way IEEE 754 does */
1010 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1014 return float32_to_uint32(u
.f
, &env
->vec_status
);
1017 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1022 /* NaN are not treated the same way IEEE 754 does */
1023 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1027 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1030 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1035 /* NaN are not treated the same way IEEE 754 does */
1036 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1040 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1043 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1048 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1049 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1050 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1055 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1060 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1061 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1062 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1067 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1073 /* NaN are not treated the same way IEEE 754 does */
1074 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1077 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1078 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1080 return float32_to_int32(u
.f
, &env
->vec_status
);
1083 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1089 /* NaN are not treated the same way IEEE 754 does */
1090 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1093 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1094 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1096 return float32_to_uint32(u
.f
, &env
->vec_status
);
1099 #define HELPER_SPE_SINGLE_CONV(name) \
1100 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1102 return e##name(env, val); \
1105 HELPER_SPE_SINGLE_CONV(fscfsi
);
1107 HELPER_SPE_SINGLE_CONV(fscfui
);
1109 HELPER_SPE_SINGLE_CONV(fscfuf
);
1111 HELPER_SPE_SINGLE_CONV(fscfsf
);
1113 HELPER_SPE_SINGLE_CONV(fsctsi
);
1115 HELPER_SPE_SINGLE_CONV(fsctui
);
1117 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1119 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1121 HELPER_SPE_SINGLE_CONV(fsctsf
);
1123 HELPER_SPE_SINGLE_CONV(fsctuf
);
1125 #define HELPER_SPE_VECTOR_CONV(name) \
1126 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1128 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1129 (uint64_t)e##name(env, val); \
1132 HELPER_SPE_VECTOR_CONV(fscfsi
);
1134 HELPER_SPE_VECTOR_CONV(fscfui
);
1136 HELPER_SPE_VECTOR_CONV(fscfuf
);
1138 HELPER_SPE_VECTOR_CONV(fscfsf
);
1140 HELPER_SPE_VECTOR_CONV(fsctsi
);
1142 HELPER_SPE_VECTOR_CONV(fsctui
);
1144 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1146 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1148 HELPER_SPE_VECTOR_CONV(fsctsf
);
1150 HELPER_SPE_VECTOR_CONV(fsctuf
);
1152 /* Single-precision floating-point arithmetic */
1153 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1159 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1163 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1169 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1173 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1179 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1183 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1189 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1193 #define HELPER_SPE_SINGLE_ARITH(name) \
1194 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1196 return e##name(env, op1, op2); \
1199 HELPER_SPE_SINGLE_ARITH(fsadd
);
1201 HELPER_SPE_SINGLE_ARITH(fssub
);
1203 HELPER_SPE_SINGLE_ARITH(fsmul
);
1205 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1207 #define HELPER_SPE_VECTOR_ARITH(name) \
1208 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1210 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1211 (uint64_t)e##name(env, op1, op2); \
1214 HELPER_SPE_VECTOR_ARITH(fsadd
);
1216 HELPER_SPE_VECTOR_ARITH(fssub
);
1218 HELPER_SPE_VECTOR_ARITH(fsmul
);
1220 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1222 /* Single-precision floating-point comparisons */
1223 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1229 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1232 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1238 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1241 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1247 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1250 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1252 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1253 return efscmplt(env
, op1
, op2
);
1256 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1258 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1259 return efscmpgt(env
, op1
, op2
);
1262 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1264 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1265 return efscmpeq(env
, op1
, op2
);
1268 #define HELPER_SINGLE_SPE_CMP(name) \
1269 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1271 return e##name(env, op1, op2); \
1274 HELPER_SINGLE_SPE_CMP(fststlt
);
1276 HELPER_SINGLE_SPE_CMP(fststgt
);
1278 HELPER_SINGLE_SPE_CMP(fststeq
);
1280 HELPER_SINGLE_SPE_CMP(fscmplt
);
1282 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1284 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1286 static inline uint32_t evcmp_merge(int t0
, int t1
)
1288 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1291 #define HELPER_VECTOR_SPE_CMP(name) \
1292 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1294 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1295 e##name(env, op1, op2)); \
1298 HELPER_VECTOR_SPE_CMP(fststlt
);
1300 HELPER_VECTOR_SPE_CMP(fststgt
);
1302 HELPER_VECTOR_SPE_CMP(fststeq
);
1304 HELPER_VECTOR_SPE_CMP(fscmplt
);
1306 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1308 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1310 /* Double-precision floating-point conversion */
1311 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1315 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1320 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1324 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1329 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1333 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1338 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1342 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1347 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1352 /* NaN are not treated the same way IEEE 754 does */
1353 if (unlikely(float64_is_any_nan(u
.d
))) {
1357 return float64_to_int32(u
.d
, &env
->vec_status
);
1360 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1365 /* NaN are not treated the same way IEEE 754 does */
1366 if (unlikely(float64_is_any_nan(u
.d
))) {
1370 return float64_to_uint32(u
.d
, &env
->vec_status
);
1373 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1378 /* NaN are not treated the same way IEEE 754 does */
1379 if (unlikely(float64_is_any_nan(u
.d
))) {
1383 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1386 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1391 /* NaN are not treated the same way IEEE 754 does */
1392 if (unlikely(float64_is_any_nan(u
.d
))) {
1396 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1399 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1404 /* NaN are not treated the same way IEEE 754 does */
1405 if (unlikely(float64_is_any_nan(u
.d
))) {
1409 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1412 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1417 /* NaN are not treated the same way IEEE 754 does */
1418 if (unlikely(float64_is_any_nan(u
.d
))) {
1422 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1425 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1430 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1431 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1432 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1437 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1442 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1443 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1444 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1449 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1455 /* NaN are not treated the same way IEEE 754 does */
1456 if (unlikely(float64_is_any_nan(u
.d
))) {
1459 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1460 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1462 return float64_to_int32(u
.d
, &env
->vec_status
);
1465 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1471 /* NaN are not treated the same way IEEE 754 does */
1472 if (unlikely(float64_is_any_nan(u
.d
))) {
1475 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1476 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1478 return float64_to_uint32(u
.d
, &env
->vec_status
);
1481 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1487 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1492 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1498 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1503 /* Double precision fixed-point arithmetic */
1504 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1510 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1514 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1520 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1524 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1530 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1534 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1540 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1544 /* Double precision floating point helpers */
1545 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1551 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1554 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1560 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1563 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1569 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1572 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1574 /* XXX: TODO: test special values (NaN, infinites, ...) */
1575 return helper_efdtstlt(env
, op1
, op2
);
1578 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1580 /* XXX: TODO: test special values (NaN, infinites, ...) */
1581 return helper_efdtstgt(env
, op1
, op2
);
1584 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1586 /* XXX: TODO: test special values (NaN, infinites, ...) */
1587 return helper_efdtsteq(env
, op1
, op2
);
1590 #define float64_to_float64(x, env) x
1594 * VSX_ADD_SUB - VSX floating point add/subtract
1595 * name - instruction mnemonic
1596 * op - operation (add or sub)
1597 * nels - number of elements (1, 2 or 4)
1598 * tp - type (float32 or float64)
1599 * fld - vsr_t field (VsrD(*) or VsrW(*))
1602 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1603 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
1604 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1606 ppc_vsr_t t = *xt; \
1609 helper_reset_fpstatus(env); \
1611 for (i = 0; i < nels; i++) { \
1612 float_status tstat = env->fp_status; \
1613 set_float_exception_flags(0, &tstat); \
1614 t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
1615 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1617 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1618 float_invalid_op_addsub(env, tstat.float_exception_flags, \
1623 t.fld = do_frsp(env, t.fld, GETPC()); \
1627 helper_compute_fprf_float64(env, t.fld); \
1631 do_float_check_status(env, GETPC()); \
1634 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1635 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1636 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1637 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1638 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1639 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1640 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1641 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1643 void helper_xsaddqp(CPUPPCState
*env
, uint32_t opcode
,
1644 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1649 helper_reset_fpstatus(env
);
1651 tstat
= env
->fp_status
;
1652 if (unlikely(Rc(opcode
) != 0)) {
1653 tstat
.float_rounding_mode
= float_round_to_odd
;
1656 set_float_exception_flags(0, &tstat
);
1657 t
.f128
= float128_add(xa
->f128
, xb
->f128
, &tstat
);
1658 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1660 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1661 float_invalid_op_addsub(env
, tstat
.float_exception_flags
, 1, GETPC());
1664 helper_compute_fprf_float128(env
, t
.f128
);
1667 do_float_check_status(env
, GETPC());
1671 * VSX_MUL - VSX floating point multiply
1672 * op - instruction mnemonic
1673 * nels - number of elements (1, 2 or 4)
1674 * tp - type (float32 or float64)
1675 * fld - vsr_t field (VsrD(*) or VsrW(*))
1678 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1679 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1680 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1682 ppc_vsr_t t = *xt; \
1685 helper_reset_fpstatus(env); \
1687 for (i = 0; i < nels; i++) { \
1688 float_status tstat = env->fp_status; \
1689 set_float_exception_flags(0, &tstat); \
1690 t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
1691 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1693 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1694 float_invalid_op_mul(env, tstat.float_exception_flags, \
1699 t.fld = do_frsp(env, t.fld, GETPC()); \
1703 helper_compute_fprf_float64(env, t.fld); \
1708 do_float_check_status(env, GETPC()); \
1711 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1712 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1713 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1714 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1716 void helper_xsmulqp(CPUPPCState
*env
, uint32_t opcode
,
1717 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1722 helper_reset_fpstatus(env
);
1723 tstat
= env
->fp_status
;
1724 if (unlikely(Rc(opcode
) != 0)) {
1725 tstat
.float_rounding_mode
= float_round_to_odd
;
1728 set_float_exception_flags(0, &tstat
);
1729 t
.f128
= float128_mul(xa
->f128
, xb
->f128
, &tstat
);
1730 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1732 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1733 float_invalid_op_mul(env
, tstat
.float_exception_flags
, 1, GETPC());
1735 helper_compute_fprf_float128(env
, t
.f128
);
1738 do_float_check_status(env
, GETPC());
1742 * VSX_DIV - VSX floating point divide
1743 * op - instruction mnemonic
1744 * nels - number of elements (1, 2 or 4)
1745 * tp - type (float32 or float64)
1746 * fld - vsr_t field (VsrD(*) or VsrW(*))
1749 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1750 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1751 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1753 ppc_vsr_t t = *xt; \
1756 helper_reset_fpstatus(env); \
1758 for (i = 0; i < nels; i++) { \
1759 float_status tstat = env->fp_status; \
1760 set_float_exception_flags(0, &tstat); \
1761 t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
1762 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1764 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1765 float_invalid_op_div(env, tstat.float_exception_flags, \
1768 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1769 float_zero_divide_excp(env, GETPC()); \
1773 t.fld = do_frsp(env, t.fld, GETPC()); \
1777 helper_compute_fprf_float64(env, t.fld); \
1782 do_float_check_status(env, GETPC()); \
1785 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
1786 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
1787 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
1788 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
1790 void helper_xsdivqp(CPUPPCState
*env
, uint32_t opcode
,
1791 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1796 helper_reset_fpstatus(env
);
1797 tstat
= env
->fp_status
;
1798 if (unlikely(Rc(opcode
) != 0)) {
1799 tstat
.float_rounding_mode
= float_round_to_odd
;
1802 set_float_exception_flags(0, &tstat
);
1803 t
.f128
= float128_div(xa
->f128
, xb
->f128
, &tstat
);
1804 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1806 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1807 float_invalid_op_div(env
, tstat
.float_exception_flags
, 1, GETPC());
1809 if (unlikely(tstat
.float_exception_flags
& float_flag_divbyzero
)) {
1810 float_zero_divide_excp(env
, GETPC());
1813 helper_compute_fprf_float128(env
, t
.f128
);
1815 do_float_check_status(env
, GETPC());
1819 * VSX_RE - VSX floating point reciprocal estimate
1820 * op - instruction mnemonic
1821 * nels - number of elements (1, 2 or 4)
1822 * tp - type (float32 or float64)
1823 * fld - vsr_t field (VsrD(*) or VsrW(*))
1826 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
1827 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1829 ppc_vsr_t t = *xt; \
1832 helper_reset_fpstatus(env); \
1834 for (i = 0; i < nels; i++) { \
1835 if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
1836 float_invalid_op_vxsnan(env, GETPC()); \
1838 t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
1841 t.fld = do_frsp(env, t.fld, GETPC()); \
1845 helper_compute_fprf_float64(env, t.fld); \
1850 do_float_check_status(env, GETPC()); \
1853 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
1854 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
1855 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
1856 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
1859 * VSX_SQRT - VSX floating point square root
1860 * op - instruction mnemonic
1861 * nels - number of elements (1, 2 or 4)
1862 * tp - type (float32 or float64)
1863 * fld - vsr_t field (VsrD(*) or VsrW(*))
1866 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
1867 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1869 ppc_vsr_t t = *xt; \
1872 helper_reset_fpstatus(env); \
1874 for (i = 0; i < nels; i++) { \
1875 float_status tstat = env->fp_status; \
1876 set_float_exception_flags(0, &tstat); \
1877 t.fld = tp##_sqrt(xb->fld, &tstat); \
1878 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1880 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1881 float_invalid_op_sqrt(env, tstat.float_exception_flags, \
1886 t.fld = do_frsp(env, t.fld, GETPC()); \
1890 helper_compute_fprf_float64(env, t.fld); \
1895 do_float_check_status(env, GETPC()); \
1898 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
1899 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
1900 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
1901 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
1904 *VSX_RSQRTE - VSX floating point reciprocal square root estimate
1905 * op - instruction mnemonic
1906 * nels - number of elements (1, 2 or 4)
1907 * tp - type (float32 or float64)
1908 * fld - vsr_t field (VsrD(*) or VsrW(*))
1911 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
1912 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1914 ppc_vsr_t t = *xt; \
1917 helper_reset_fpstatus(env); \
1919 for (i = 0; i < nels; i++) { \
1920 float_status tstat = env->fp_status; \
1921 set_float_exception_flags(0, &tstat); \
1922 t.fld = tp##_sqrt(xb->fld, &tstat); \
1923 t.fld = tp##_div(tp##_one, t.fld, &tstat); \
1924 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1925 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1926 float_invalid_op_sqrt(env, tstat.float_exception_flags, \
1930 t.fld = do_frsp(env, t.fld, GETPC()); \
1934 helper_compute_fprf_float64(env, t.fld); \
1939 do_float_check_status(env, GETPC()); \
1942 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
1943 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
1944 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
1945 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
1948 * VSX_TDIV - VSX floating point test for divide
1949 * op - instruction mnemonic
1950 * nels - number of elements (1, 2 or 4)
1951 * tp - type (float32 or float64)
1952 * fld - vsr_t field (VsrD(*) or VsrW(*))
1953 * emin - minimum unbiased exponent
1954 * emax - maximum unbiased exponent
1955 * nbits - number of fraction bits
1957 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
1958 void helper_##op(CPUPPCState *env, uint32_t opcode, \
1959 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1965 for (i = 0; i < nels; i++) { \
1966 if (unlikely(tp##_is_infinity(xa->fld) || \
1967 tp##_is_infinity(xb->fld) || \
1968 tp##_is_zero(xb->fld))) { \
1972 int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \
1973 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
1975 if (unlikely(tp##_is_any_nan(xa->fld) || \
1976 tp##_is_any_nan(xb->fld))) { \
1978 } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
1980 } else if (!tp##_is_zero(xa->fld) && \
1981 (((e_a - e_b) >= emax) || \
1982 ((e_a - e_b) <= (emin + 1)) || \
1983 (e_a <= (emin + nbits)))) { \
1987 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
1989 * XB is not zero because of the above check and so \
1990 * must be denormalized. \
1997 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2000 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2001 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2002 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2005 * VSX_TSQRT - VSX floating point test for square root
2006 * op - instruction mnemonic
2007 * nels - number of elements (1, 2 or 4)
2008 * tp - type (float32 or float64)
2009 * fld - vsr_t field (VsrD(*) or VsrW(*))
2010 * emin - minimum unbiased exponent
2011 * emax - maximum unbiased exponent
2012 * nbits - number of fraction bits
2014 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2015 void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
2021 for (i = 0; i < nels; i++) { \
2022 if (unlikely(tp##_is_infinity(xb->fld) || \
2023 tp##_is_zero(xb->fld))) { \
2027 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2029 if (unlikely(tp##_is_any_nan(xb->fld))) { \
2031 } else if (unlikely(tp##_is_zero(xb->fld))) { \
2033 } else if (unlikely(tp##_is_neg(xb->fld))) { \
2035 } else if (!tp##_is_zero(xb->fld) && \
2036 (e_b <= (emin + nbits))) { \
2040 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2042 * XB is not zero because of the above check and \
2043 * therefore must be denormalized. \
2050 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2053 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2054 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2055 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2058 * VSX_MADD - VSX floating point muliply/add variations
2059 * op - instruction mnemonic
2060 * nels - number of elements (1, 2 or 4)
2061 * tp - type (float32 or float64)
2062 * fld - vsr_t field (VsrD(*) or VsrW(*))
2063 * maddflgs - flags for the float*muladd routine that control the
2064 * various forms (madd, msub, nmadd, nmsub)
2067 #define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \
2068 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2069 ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \
2071 ppc_vsr_t t = *xt; \
2074 helper_reset_fpstatus(env); \
2076 for (i = 0; i < nels; i++) { \
2077 float_status tstat = env->fp_status; \
2078 set_float_exception_flags(0, &tstat); \
2079 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2081 * Avoid double rounding errors by rounding the intermediate \
2084 set_float_rounding_mode(float_round_to_zero, &tstat); \
2085 t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
2086 maddflgs, &tstat); \
2087 t.fld |= (get_float_exception_flags(&tstat) & \
2088 float_flag_inexact) != 0; \
2090 t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
2091 maddflgs, &tstat); \
2093 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2095 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2096 float_invalid_op_madd(env, tstat.float_exception_flags, \
2101 t.fld = do_frsp(env, t.fld, GETPC()); \
2105 helper_compute_fprf_float64(env, t.fld); \
2109 do_float_check_status(env, GETPC()); \
2112 VSX_MADD(xsmadddp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 0)
2113 VSX_MADD(xsmsubdp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 0)
2114 VSX_MADD(xsnmadddp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 0)
2115 VSX_MADD(xsnmsubdp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 0)
2116 VSX_MADD(xsmaddsp
, 1, float64
, VsrD(0), MADD_FLGS
, 1, 1)
2117 VSX_MADD(xsmsubsp
, 1, float64
, VsrD(0), MSUB_FLGS
, 1, 1)
2118 VSX_MADD(xsnmaddsp
, 1, float64
, VsrD(0), NMADD_FLGS
, 1, 1)
2119 VSX_MADD(xsnmsubsp
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1, 1)
2121 VSX_MADD(xvmadddp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0, 0)
2122 VSX_MADD(xvmsubdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0, 0)
2123 VSX_MADD(xvnmadddp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0, 0)
2124 VSX_MADD(xvnmsubdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0, 0)
2126 VSX_MADD(xvmaddsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0, 0)
2127 VSX_MADD(xvmsubsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0, 0)
2128 VSX_MADD(xvnmaddsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0, 0)
2129 VSX_MADD(xvnmsubsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0, 0)
2132 * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2133 * op - instruction mnemonic
2134 * cmp - comparison operation
2135 * exp - expected result of comparison
2136 * svxvc - set VXVC bit
2138 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2139 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2140 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2142 ppc_vsr_t t = *xt; \
2143 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2145 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
2146 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2147 vxsnan_flag = true; \
2148 if (fpscr_ve == 0 && svxvc) { \
2151 } else if (svxvc) { \
2152 vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
2153 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \
2155 if (vxsnan_flag) { \
2156 float_invalid_op_vxsnan(env, GETPC()); \
2159 float_invalid_op_vxvc(env, 0, GETPC()); \
2161 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2164 if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \
2165 &env->fp_status) == exp) { \
2174 do_float_check_status(env, GETPC()); \
2177 VSX_SCALAR_CMP_DP(xscmpeqdp
, eq
, 1, 0)
2178 VSX_SCALAR_CMP_DP(xscmpgedp
, le
, 1, 1)
2179 VSX_SCALAR_CMP_DP(xscmpgtdp
, lt
, 1, 1)
2180 VSX_SCALAR_CMP_DP(xscmpnedp
, eq
, 0, 0)
2182 void helper_xscmpexpdp(CPUPPCState
*env
, uint32_t opcode
,
2183 ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2185 int64_t exp_a
, exp_b
;
2188 exp_a
= extract64(xa
->VsrD(0), 52, 11);
2189 exp_b
= extract64(xb
->VsrD(0), 52, 11);
2191 if (unlikely(float64_is_any_nan(xa
->VsrD(0)) ||
2192 float64_is_any_nan(xb
->VsrD(0)))) {
2195 if (exp_a
< exp_b
) {
2197 } else if (exp_a
> exp_b
) {
2204 env
->fpscr
&= ~FP_FPCC
;
2205 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2206 env
->crf
[BF(opcode
)] = cc
;
2208 do_float_check_status(env
, GETPC());
2211 void helper_xscmpexpqp(CPUPPCState
*env
, uint32_t opcode
,
2212 ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2214 int64_t exp_a
, exp_b
;
2217 exp_a
= extract64(xa
->VsrD(0), 48, 15);
2218 exp_b
= extract64(xb
->VsrD(0), 48, 15);
2220 if (unlikely(float128_is_any_nan(xa
->f128
) ||
2221 float128_is_any_nan(xb
->f128
))) {
2224 if (exp_a
< exp_b
) {
2226 } else if (exp_a
> exp_b
) {
2233 env
->fpscr
&= ~FP_FPCC
;
2234 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2235 env
->crf
[BF(opcode
)] = cc
;
2237 do_float_check_status(env
, GETPC());
2240 static inline void do_scalar_cmp(CPUPPCState
*env
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
,
2241 int crf_idx
, bool ordered
)
2244 bool vxsnan_flag
= false, vxvc_flag
= false;
2246 helper_reset_fpstatus(env
);
2248 switch (float64_compare(xa
->VsrD(0), xb
->VsrD(0), &env
->fp_status
)) {
2249 case float_relation_less
:
2252 case float_relation_equal
:
2255 case float_relation_greater
:
2258 case float_relation_unordered
:
2261 if (float64_is_signaling_nan(xa
->VsrD(0), &env
->fp_status
) ||
2262 float64_is_signaling_nan(xb
->VsrD(0), &env
->fp_status
)) {
2264 if (fpscr_ve
== 0 && ordered
) {
2267 } else if (float64_is_quiet_nan(xa
->VsrD(0), &env
->fp_status
) ||
2268 float64_is_quiet_nan(xb
->VsrD(0), &env
->fp_status
)) {
2276 g_assert_not_reached();
2279 env
->fpscr
&= ~FP_FPCC
;
2280 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2281 env
->crf
[crf_idx
] = cc
;
2284 float_invalid_op_vxsnan(env
, GETPC());
2287 float_invalid_op_vxvc(env
, 0, GETPC());
2290 do_float_check_status(env
, GETPC());
2293 void helper_xscmpodp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2296 do_scalar_cmp(env
, xa
, xb
, BF(opcode
), true);
2299 void helper_xscmpudp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2302 do_scalar_cmp(env
, xa
, xb
, BF(opcode
), false);
2305 static inline void do_scalar_cmpq(CPUPPCState
*env
, ppc_vsr_t
*xa
,
2306 ppc_vsr_t
*xb
, int crf_idx
, bool ordered
)
2309 bool vxsnan_flag
= false, vxvc_flag
= false;
2311 helper_reset_fpstatus(env
);
2313 switch (float128_compare(xa
->f128
, xb
->f128
, &env
->fp_status
)) {
2314 case float_relation_less
:
2317 case float_relation_equal
:
2320 case float_relation_greater
:
2323 case float_relation_unordered
:
2326 if (float128_is_signaling_nan(xa
->f128
, &env
->fp_status
) ||
2327 float128_is_signaling_nan(xb
->f128
, &env
->fp_status
)) {
2329 if (fpscr_ve
== 0 && ordered
) {
2332 } else if (float128_is_quiet_nan(xa
->f128
, &env
->fp_status
) ||
2333 float128_is_quiet_nan(xb
->f128
, &env
->fp_status
)) {
2341 g_assert_not_reached();
2344 env
->fpscr
&= ~FP_FPCC
;
2345 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2346 env
->crf
[crf_idx
] = cc
;
2349 float_invalid_op_vxsnan(env
, GETPC());
2352 float_invalid_op_vxvc(env
, 0, GETPC());
2355 do_float_check_status(env
, GETPC());
2358 void helper_xscmpoqp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2361 do_scalar_cmpq(env
, xa
, xb
, BF(opcode
), true);
2364 void helper_xscmpuqp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2367 do_scalar_cmpq(env
, xa
, xb
, BF(opcode
), false);
2371 * VSX_MAX_MIN - VSX floating point maximum/minimum
2372 * name - instruction mnemonic
2373 * op - operation (max or min)
2374 * nels - number of elements (1, 2 or 4)
2375 * tp - type (float32 or float64)
2376 * fld - vsr_t field (VsrD(*) or VsrW(*))
2378 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2379 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
2380 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2382 ppc_vsr_t t = *xt; \
2385 for (i = 0; i < nels; i++) { \
2386 t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
2387 if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2388 tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
2389 float_invalid_op_vxsnan(env, GETPC()); \
2394 do_float_check_status(env, GETPC()); \
2397 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2398 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2399 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2400 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2401 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2402 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2404 #define VSX_MAX_MINC(name, max) \
2405 void helper_##name(CPUPPCState *env, uint32_t opcode, \
2406 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2408 ppc_vsr_t t = *xt; \
2409 bool vxsnan_flag = false, vex_flag = false; \
2411 if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \
2412 float64_is_any_nan(xb->VsrD(0)))) { \
2413 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
2414 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2415 vxsnan_flag = true; \
2417 t.VsrD(0) = xb->VsrD(0); \
2418 } else if ((max && \
2419 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2421 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2422 t.VsrD(0) = xa->VsrD(0); \
2424 t.VsrD(0) = xb->VsrD(0); \
2427 vex_flag = fpscr_ve & vxsnan_flag; \
2428 if (vxsnan_flag) { \
2429 float_invalid_op_vxsnan(env, GETPC()); \
2436 VSX_MAX_MINC(xsmaxcdp, 1);
2437 VSX_MAX_MINC(xsmincdp
, 0);
2439 #define VSX_MAX_MINJ(name, max) \
2440 void helper_##name(CPUPPCState *env, uint32_t opcode, \
2441 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2443 ppc_vsr_t t = *xt; \
2444 bool vxsnan_flag = false, vex_flag = false; \
2446 if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
2447 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
2448 vxsnan_flag = true; \
2450 t.VsrD(0) = xa->VsrD(0); \
2451 } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
2452 if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2453 vxsnan_flag = true; \
2455 t.VsrD(0) = xb->VsrD(0); \
2456 } else if (float64_is_zero(xa->VsrD(0)) && \
2457 float64_is_zero(xb->VsrD(0))) { \
2459 if (!float64_is_neg(xa->VsrD(0)) || \
2460 !float64_is_neg(xb->VsrD(0))) { \
2463 t.VsrD(0) = 0x8000000000000000ULL; \
2466 if (float64_is_neg(xa->VsrD(0)) || \
2467 float64_is_neg(xb->VsrD(0))) { \
2468 t.VsrD(0) = 0x8000000000000000ULL; \
2473 } else if ((max && \
2474 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2476 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2477 t.VsrD(0) = xa->VsrD(0); \
2479 t.VsrD(0) = xb->VsrD(0); \
2482 vex_flag = fpscr_ve & vxsnan_flag; \
2483 if (vxsnan_flag) { \
2484 float_invalid_op_vxsnan(env, GETPC()); \
2491 VSX_MAX_MINJ(xsmaxjdp, 1);
2492 VSX_MAX_MINJ(xsminjdp
, 0);
2495 * VSX_CMP - VSX floating point compare
2496 * op - instruction mnemonic
2497 * nels - number of elements (1, 2 or 4)
2498 * tp - type (float32 or float64)
2499 * fld - vsr_t field (VsrD(*) or VsrW(*))
2500 * cmp - comparison operation
2501 * svxvc - set VXVC bit
2502 * exp - expected result of comparison
2504 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2505 uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2506 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2508 ppc_vsr_t t = *xt; \
2509 uint32_t crf6 = 0; \
2512 int all_false = 1; \
2514 for (i = 0; i < nels; i++) { \
2515 if (unlikely(tp##_is_any_nan(xa->fld) || \
2516 tp##_is_any_nan(xb->fld))) { \
2517 if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2518 tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
2519 float_invalid_op_vxsnan(env, GETPC()); \
2522 float_invalid_op_vxvc(env, 0, GETPC()); \
2527 if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
2538 crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2542 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0, 1)
2543 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1, 1)
2544 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1, 1)
2545 VSX_CMP(xvcmpnedp
, 2, float64
, VsrD(i
), eq
, 0, 0)
2546 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0, 1)
2547 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1, 1)
2548 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1, 1)
2549 VSX_CMP(xvcmpnesp
, 4, float32
, VsrW(i
), eq
, 0, 0)
2552 * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2553 * op - instruction mnemonic
2554 * nels - number of elements (1, 2 or 4)
2555 * stp - source type (float32 or float64)
2556 * ttp - target type (float32 or float64)
2557 * sfld - source vsr_t field
2558 * tfld - target vsr_t field (f32 or f64)
2561 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2562 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2564 ppc_vsr_t t = *xt; \
2567 for (i = 0; i < nels; i++) { \
2568 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2569 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2570 &env->fp_status))) { \
2571 float_invalid_op_vxsnan(env, GETPC()); \
2572 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2575 helper_compute_fprf_##ttp(env, t.tfld); \
2580 do_float_check_status(env, GETPC()); \
2583 VSX_CVT_FP_TO_FP(xscvdpsp
, 1, float64
, float32
, VsrD(0), VsrW(0), 1)
2584 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2585 VSX_CVT_FP_TO_FP(xvcvdpsp
, 2, float64
, float32
, VsrD(i
), VsrW(2 * i
), 0)
2586 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2 * i
), VsrD(i
), 0)
2589 * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2590 * op - instruction mnemonic
2591 * nels - number of elements (1, 2 or 4)
2592 * stp - source type (float32 or float64)
2593 * ttp - target type (float32 or float64)
2594 * sfld - source vsr_t field
2595 * tfld - target vsr_t field (f32 or f64)
2598 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2599 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2600 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2602 ppc_vsr_t t = *xt; \
2605 for (i = 0; i < nels; i++) { \
2606 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2607 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2608 &env->fp_status))) { \
2609 float_invalid_op_vxsnan(env, GETPC()); \
2610 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2613 helper_compute_fprf_##ttp(env, t.tfld); \
2618 do_float_check_status(env, GETPC()); \
2621 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp
, 1, float64
, float128
, VsrD(0), f128
, 1)
2624 * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2625 * involving one half precision value
2626 * op - instruction mnemonic
2627 * nels - number of elements (1, 2 or 4)
2630 * sfld - source vsr_t field
2631 * tfld - target vsr_t field
2634 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2635 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2637 ppc_vsr_t t = { }; \
2640 for (i = 0; i < nels; i++) { \
2641 t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
2642 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2643 &env->fp_status))) { \
2644 float_invalid_op_vxsnan(env, GETPC()); \
2645 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2648 helper_compute_fprf_##ttp(env, t.tfld); \
2653 do_float_check_status(env, GETPC()); \
2656 VSX_CVT_FP_TO_FP_HP(xscvdphp
, 1, float64
, float16
, VsrD(0), VsrH(3), 1)
2657 VSX_CVT_FP_TO_FP_HP(xscvhpdp
, 1, float16
, float64
, VsrH(3), VsrD(0), 1)
2658 VSX_CVT_FP_TO_FP_HP(xvcvsphp
, 4, float32
, float16
, VsrW(i
), VsrH(2 * i
+ 1), 0)
2659 VSX_CVT_FP_TO_FP_HP(xvcvhpsp
, 4, float16
, float32
, VsrH(2 * i
+ 1), VsrW(i
), 0)
2662 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2663 * added to this later.
2665 void helper_xscvqpdp(CPUPPCState
*env
, uint32_t opcode
,
2666 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
2671 tstat
= env
->fp_status
;
2672 if (unlikely(Rc(opcode
) != 0)) {
2673 tstat
.float_rounding_mode
= float_round_to_odd
;
2676 t
.VsrD(0) = float128_to_float64(xb
->f128
, &tstat
);
2677 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2678 if (unlikely(float128_is_signaling_nan(xb
->f128
, &tstat
))) {
2679 float_invalid_op_vxsnan(env
, GETPC());
2680 t
.VsrD(0) = float64_snan_to_qnan(t
.VsrD(0));
2682 helper_compute_fprf_float64(env
, t
.VsrD(0));
2685 do_float_check_status(env
, GETPC());
2688 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2690 uint64_t result
, sign
, exp
, frac
;
2692 float_status tstat
= env
->fp_status
;
2693 set_float_exception_flags(0, &tstat
);
2695 sign
= extract64(xb
, 63, 1);
2696 exp
= extract64(xb
, 52, 11);
2697 frac
= extract64(xb
, 0, 52) | 0x10000000000000ULL
;
2699 if (unlikely(exp
== 0 && extract64(frac
, 0, 52) != 0)) {
2700 /* DP denormal operand. */
2701 /* Exponent override to DP min exp. */
2703 /* Implicit bit override to 0. */
2704 frac
= deposit64(frac
, 53, 1, 0);
2707 if (unlikely(exp
< 897 && frac
!= 0)) {
2708 /* SP tiny operand. */
2709 if (897 - exp
> 63) {
2712 /* Denormalize until exp = SP min exp. */
2713 frac
>>= (897 - exp
);
2715 /* Exponent override to SP min exp - 1. */
2719 result
= sign
<< 31;
2720 result
|= extract64(exp
, 10, 1) << 30;
2721 result
|= extract64(exp
, 0, 7) << 23;
2722 result
|= extract64(frac
, 29, 23);
2724 /* hardware replicates result to both words of the doubleword result. */
2725 return (result
<< 32) | result
;
2728 uint64_t helper_xscvspdpn(CPUPPCState
*env
, uint64_t xb
)
2730 float_status tstat
= env
->fp_status
;
2731 set_float_exception_flags(0, &tstat
);
2733 return float32_to_float64(xb
>> 32, &tstat
);
2737 * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2738 * op - instruction mnemonic
2739 * nels - number of elements (1, 2 or 4)
2740 * stp - source type (float32 or float64)
2741 * ttp - target type (int32, uint32, int64 or uint64)
2742 * sfld - source vsr_t field
2743 * tfld - target vsr_t field
2744 * rnan - resulting NaN
2746 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2747 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2749 int all_flags = env->fp_status.float_exception_flags, flags; \
2750 ppc_vsr_t t = *xt; \
2753 for (i = 0; i < nels; i++) { \
2754 env->fp_status.float_exception_flags = 0; \
2755 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2756 flags = env->fp_status.float_exception_flags; \
2757 if (unlikely(flags & float_flag_invalid)) { \
2758 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\
2760 all_flags |= flags; \
2764 env->fp_status.float_exception_flags = all_flags; \
2765 do_float_check_status(env, GETPC()); \
2768 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), \
2769 0x8000000000000000ULL
)
2770 VSX_CVT_FP_TO_INT(xscvdpsxws
, 1, float64
, int32
, VsrD(0), VsrW(1), \
2772 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), 0ULL)
2773 VSX_CVT_FP_TO_INT(xscvdpuxws
, 1, float64
, uint32
, VsrD(0), VsrW(1), 0U)
2774 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), \
2775 0x8000000000000000ULL
)
2776 VSX_CVT_FP_TO_INT(xvcvdpsxws
, 2, float64
, int32
, VsrD(i
), VsrW(2 * i
), \
2778 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), 0ULL)
2779 VSX_CVT_FP_TO_INT(xvcvdpuxws
, 2, float64
, uint32
, VsrD(i
), VsrW(2 * i
), 0U)
2780 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2 * i
), VsrD(i
), \
2781 0x8000000000000000ULL
)
2782 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), 0x80000000U
)
2783 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2 * i
), VsrD(i
), 0ULL)
2784 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), 0U)
2787 * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2788 * op - instruction mnemonic
2789 * stp - source type (float32 or float64)
2790 * ttp - target type (int32, uint32, int64 or uint64)
2791 * sfld - source vsr_t field
2792 * tfld - target vsr_t field
2793 * rnan - resulting NaN
2795 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
2796 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2797 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2799 ppc_vsr_t t = { }; \
2802 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2803 flags = get_float_exception_flags(&env->fp_status); \
2804 if (flags & float_flag_invalid) { \
2805 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC()); \
2809 do_float_check_status(env, GETPC()); \
2812 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz
, float128
, int64
, f128
, VsrD(0), \
2813 0x8000000000000000ULL
)
2815 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz
, float128
, int32
, f128
, VsrD(0), \
2816 0xffffffff80000000ULL
)
2817 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz
, float128
, uint64
, f128
, VsrD(0), 0x0ULL
)
2818 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz
, float128
, uint32
, f128
, VsrD(0), 0x0ULL
)
2821 * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2822 * op - instruction mnemonic
2823 * nels - number of elements (1, 2 or 4)
2824 * stp - source type (int32, uint32, int64 or uint64)
2825 * ttp - target type (float32 or float64)
2826 * sfld - source vsr_t field
2827 * tfld - target vsr_t field
2828 * jdef - definition of the j index (i or 2*i)
2831 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
2832 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2834 ppc_vsr_t t = *xt; \
2837 for (i = 0; i < nels; i++) { \
2838 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2840 t.tfld = do_frsp(env, t.tfld, GETPC()); \
2843 helper_compute_fprf_float64(env, t.tfld); \
2848 do_float_check_status(env, GETPC()); \
2851 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
2852 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
2853 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
2854 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
2855 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
2856 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
2857 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
2858 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
2859 VSX_CVT_INT_TO_FP(xvcvsxdsp
, 2, int64
, float32
, VsrD(i
), VsrW(2 * i
), 0, 0)
2860 VSX_CVT_INT_TO_FP(xvcvuxdsp
, 2, uint64
, float32
, VsrD(i
), VsrW(2 * i
), 0, 0)
2861 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
2862 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
2865 * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
2866 * op - instruction mnemonic
2867 * stp - source type (int32, uint32, int64 or uint64)
2868 * ttp - target type (float32 or float64)
2869 * sfld - source vsr_t field
2870 * tfld - target vsr_t field
2872 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
2873 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2874 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2876 ppc_vsr_t t = *xt; \
2878 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2879 helper_compute_fprf_##ttp(env, t.tfld); \
2882 do_float_check_status(env, GETPC()); \
2885 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp
, int64
, float128
, VsrD(0), f128
)
2886 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp
, uint64
, float128
, VsrD(0), f128
)
2889 * For "use current rounding mode", define a value that will not be
2890 * one of the existing rounding model enums.
2892 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
2893 float_round_up + float_round_to_zero)
2896 * VSX_ROUND - VSX floating point round
2897 * op - instruction mnemonic
2898 * nels - number of elements (1, 2 or 4)
2899 * tp - type (float32 or float64)
2900 * fld - vsr_t field (VsrD(*) or VsrW(*))
2901 * rmode - rounding mode
2904 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
2905 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2907 ppc_vsr_t t = *xt; \
2909 FloatRoundMode curr_rounding_mode; \
2911 if (rmode != FLOAT_ROUND_CURRENT) { \
2912 curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
2913 set_float_rounding_mode(rmode, &env->fp_status); \
2916 for (i = 0; i < nels; i++) { \
2917 if (unlikely(tp##_is_signaling_nan(xb->fld, \
2918 &env->fp_status))) { \
2919 float_invalid_op_vxsnan(env, GETPC()); \
2920 t.fld = tp##_snan_to_qnan(xb->fld); \
2922 t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
2925 helper_compute_fprf_float64(env, t.fld); \
2930 * If this is not a "use current rounding mode" instruction, \
2931 * then inhibit setting of the XX bit and restore rounding \
2934 if (rmode != FLOAT_ROUND_CURRENT) { \
2935 set_float_rounding_mode(curr_rounding_mode, &env->fp_status); \
2936 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
2940 do_float_check_status(env, GETPC()); \
2943 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_ties_away
, 1)
2944 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
2945 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
2946 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
2947 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
2949 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_ties_away
, 0)
2950 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
2951 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
2952 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
2953 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
2955 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_ties_away
, 0)
2956 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
2957 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
2958 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
2959 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
2961 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
2963 helper_reset_fpstatus(env
);
2965 uint64_t xt
= do_frsp(env
, xb
, GETPC());
2967 helper_compute_fprf_float64(env
, xt
);
2968 do_float_check_status(env
, GETPC());
2972 #define VSX_XXPERM(op, indexed) \
2973 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2974 ppc_vsr_t *xa, ppc_vsr_t *pcv) \
2976 ppc_vsr_t t = *xt; \
2979 for (i = 0; i < 16; i++) { \
2980 idx = pcv->VsrB(i) & 0x1F; \
2984 t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \
2985 : xt->VsrB(idx - 16); \
2990 VSX_XXPERM(xxperm
, 0)
2991 VSX_XXPERM(xxpermr
, 1)
2993 void helper_xvxsigsp(CPUPPCState
*env
, ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
2996 uint32_t exp
, i
, fraction
;
2998 for (i
= 0; i
< 4; i
++) {
2999 exp
= (xb
->VsrW(i
) >> 23) & 0xFF;
3000 fraction
= xb
->VsrW(i
) & 0x7FFFFF;
3001 if (exp
!= 0 && exp
!= 255) {
3002 t
.VsrW(i
) = fraction
| 0x00800000;
3004 t
.VsrW(i
) = fraction
;
3011 * VSX_TEST_DC - VSX floating point test data class
3012 * op - instruction mnemonic
3013 * nels - number of elements (1, 2 or 4)
3014 * xbn - VSR register number
3015 * tp - type (float32 or float64)
3016 * fld - vsr_t field (VsrD(*) or VsrW(*))
3017 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3018 * fld_max - target field max
3019 * scrf - set result in CR and FPCC
3021 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3022 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3024 ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \
3025 ppc_vsr_t *xb = &env->vsr[xbn]; \
3026 ppc_vsr_t t = { }; \
3027 uint32_t i, sign, dcmx; \
3028 uint32_t cc, match = 0; \
3031 dcmx = DCMX_XV(opcode); \
3034 dcmx = DCMX(opcode); \
3037 for (i = 0; i < nels; i++) { \
3038 sign = tp##_is_neg(xb->fld); \
3039 if (tp##_is_any_nan(xb->fld)) { \
3040 match = extract32(dcmx, 6, 1); \
3041 } else if (tp##_is_infinity(xb->fld)) { \
3042 match = extract32(dcmx, 4 + !sign, 1); \
3043 } else if (tp##_is_zero(xb->fld)) { \
3044 match = extract32(dcmx, 2 + !sign, 1); \
3045 } else if (tp##_is_zero_or_denormal(xb->fld)) { \
3046 match = extract32(dcmx, 0 + !sign, 1); \
3050 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3051 env->fpscr &= ~FP_FPCC; \
3052 env->fpscr |= cc << FPSCR_FPCC; \
3053 env->crf[BF(opcode)] = cc; \
3055 t.tfld = match ? fld_max : 0; \
3064 VSX_TEST_DC(xvtstdcdp
, 2, xB(opcode
), float64
, VsrD(i
), VsrD(i
), UINT64_MAX
, 0)
3065 VSX_TEST_DC(xvtstdcsp
, 4, xB(opcode
), float32
, VsrW(i
), VsrW(i
), UINT32_MAX
, 0)
3066 VSX_TEST_DC(xststdcdp
, 1, xB(opcode
), float64
, VsrD(0), VsrD(0), 0, 1)
3067 VSX_TEST_DC(xststdcqp
, 1, (rB(opcode
) + 32), float128
, f128
, VsrD(0), 0, 1)
3069 void helper_xststdcsp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xb
)
3071 uint32_t dcmx
, sign
, exp
;
3072 uint32_t cc
, match
= 0, not_sp
= 0;
3074 dcmx
= DCMX(opcode
);
3075 exp
= (xb
->VsrD(0) >> 52) & 0x7FF;
3077 sign
= float64_is_neg(xb
->VsrD(0));
3078 if (float64_is_any_nan(xb
->VsrD(0))) {
3079 match
= extract32(dcmx
, 6, 1);
3080 } else if (float64_is_infinity(xb
->VsrD(0))) {
3081 match
= extract32(dcmx
, 4 + !sign
, 1);
3082 } else if (float64_is_zero(xb
->VsrD(0))) {
3083 match
= extract32(dcmx
, 2 + !sign
, 1);
3084 } else if (float64_is_zero_or_denormal(xb
->VsrD(0)) ||
3085 (exp
> 0 && exp
< 0x381)) {
3086 match
= extract32(dcmx
, 0 + !sign
, 1);
3089 not_sp
= !float64_eq(xb
->VsrD(0),
3091 float64_to_float32(xb
->VsrD(0), &env
->fp_status
),
3092 &env
->fp_status
), &env
->fp_status
);
3094 cc
= sign
<< CRF_LT_BIT
| match
<< CRF_EQ_BIT
| not_sp
<< CRF_SO_BIT
;
3095 env
->fpscr
&= ~FP_FPCC
;
3096 env
->fpscr
|= cc
<< FPSCR_FPCC
;
3097 env
->crf
[BF(opcode
)] = cc
;
3100 void helper_xsrqpi(CPUPPCState
*env
, uint32_t opcode
,
3101 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3104 uint8_t r
= Rrm(opcode
);
3105 uint8_t ex
= Rc(opcode
);
3106 uint8_t rmc
= RMC(opcode
);
3110 helper_reset_fpstatus(env
);
3112 if (r
== 0 && rmc
== 0) {
3113 rmode
= float_round_ties_away
;
3114 } else if (r
== 0 && rmc
== 0x3) {
3116 } else if (r
== 1) {
3119 rmode
= float_round_nearest_even
;
3122 rmode
= float_round_to_zero
;
3125 rmode
= float_round_up
;
3128 rmode
= float_round_down
;
3135 tstat
= env
->fp_status
;
3136 set_float_exception_flags(0, &tstat
);
3137 set_float_rounding_mode(rmode
, &tstat
);
3138 t
.f128
= float128_round_to_int(xb
->f128
, &tstat
);
3139 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3141 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3142 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3143 float_invalid_op_vxsnan(env
, GETPC());
3144 t
.f128
= float128_snan_to_qnan(t
.f128
);
3148 if (ex
== 0 && (tstat
.float_exception_flags
& float_flag_inexact
)) {
3149 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
3152 helper_compute_fprf_float128(env
, t
.f128
);
3153 do_float_check_status(env
, GETPC());
3157 void helper_xsrqpxp(CPUPPCState
*env
, uint32_t opcode
,
3158 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3161 uint8_t r
= Rrm(opcode
);
3162 uint8_t rmc
= RMC(opcode
);
3167 helper_reset_fpstatus(env
);
3169 if (r
== 0 && rmc
== 0) {
3170 rmode
= float_round_ties_away
;
3171 } else if (r
== 0 && rmc
== 0x3) {
3173 } else if (r
== 1) {
3176 rmode
= float_round_nearest_even
;
3179 rmode
= float_round_to_zero
;
3182 rmode
= float_round_up
;
3185 rmode
= float_round_down
;
3192 tstat
= env
->fp_status
;
3193 set_float_exception_flags(0, &tstat
);
3194 set_float_rounding_mode(rmode
, &tstat
);
3195 round_res
= float128_to_floatx80(xb
->f128
, &tstat
);
3196 t
.f128
= floatx80_to_float128(round_res
, &tstat
);
3197 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3199 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3200 if (float128_is_signaling_nan(xb
->f128
, &tstat
)) {
3201 float_invalid_op_vxsnan(env
, GETPC());
3202 t
.f128
= float128_snan_to_qnan(t
.f128
);
3206 helper_compute_fprf_float128(env
, t
.f128
);
3208 do_float_check_status(env
, GETPC());
3211 void helper_xssqrtqp(CPUPPCState
*env
, uint32_t opcode
,
3212 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3217 helper_reset_fpstatus(env
);
3219 tstat
= env
->fp_status
;
3220 if (unlikely(Rc(opcode
) != 0)) {
3221 tstat
.float_rounding_mode
= float_round_to_odd
;
3224 set_float_exception_flags(0, &tstat
);
3225 t
.f128
= float128_sqrt(xb
->f128
, &tstat
);
3226 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3228 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3229 float_invalid_op_sqrt(env
, tstat
.float_exception_flags
, 1, GETPC());
3232 helper_compute_fprf_float128(env
, t
.f128
);
3234 do_float_check_status(env
, GETPC());
3237 void helper_xssubqp(CPUPPCState
*env
, uint32_t opcode
,
3238 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
3243 helper_reset_fpstatus(env
);
3245 tstat
= env
->fp_status
;
3246 if (unlikely(Rc(opcode
) != 0)) {
3247 tstat
.float_rounding_mode
= float_round_to_odd
;
3250 set_float_exception_flags(0, &tstat
);
3251 t
.f128
= float128_sub(xa
->f128
, xb
->f128
, &tstat
);
3252 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3254 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3255 float_invalid_op_addsub(env
, tstat
.float_exception_flags
, 1, GETPC());
3258 helper_compute_fprf_float128(env
, t
.f128
);
3260 do_float_check_status(env
, GETPC());