2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
26 static inline float128
float128_snan_to_qnan(float128 x
)
30 r
.high
= x
.high
| 0x0000800000000000;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 static inline bool fp_exceptions_enabled(CPUPPCState
*env
)
41 #ifdef CONFIG_USER_ONLY
44 return (env
->msr
& ((1U << MSR_FE0
) | (1U << MSR_FE1
))) != 0;
48 /*****************************************************************************/
49 /* Floating point operations helpers */
52 * This is the non-arithmatic conversion that happens e.g. on loads.
53 * In the Power ISA pseudocode, this is called DOUBLE.
55 uint64_t helper_todouble(uint32_t arg
)
57 uint32_t abs_arg
= arg
& 0x7fffffff;
60 if (likely(abs_arg
>= 0x00800000)) {
61 if (unlikely(extract32(arg
, 23, 8) == 0xff)) {
63 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
64 ret
|= (uint64_t)0x7ff << 52;
65 ret
|= (uint64_t)extract32(arg
, 0, 23) << 29;
67 /* Normalized operand. */
68 ret
= (uint64_t)extract32(arg
, 30, 2) << 62;
69 ret
|= ((extract32(arg
, 30, 1) ^ 1) * (uint64_t)7) << 59;
70 ret
|= (uint64_t)extract32(arg
, 0, 30) << 29;
73 /* Zero or Denormalized operand. */
74 ret
= (uint64_t)extract32(arg
, 31, 1) << 63;
75 if (unlikely(abs_arg
!= 0)) {
77 * Denormalized operand.
78 * Shift fraction so that the msb is in the implicit bit position.
79 * Thus, shift is in the range [1:23].
81 int shift
= clz32(abs_arg
) - 8;
83 * The first 3 terms compute the float64 exponent. We then bias
84 * this result by -1 so that we can swallow the implicit bit below.
86 int exp
= -126 - shift
+ 1023 - 1;
88 ret
|= (uint64_t)exp
<< 52;
89 ret
+= (uint64_t)abs_arg
<< (52 - 23 + shift
);
96 * This is the non-arithmatic conversion that happens e.g. on stores.
97 * In the Power ISA pseudocode, this is called SINGLE.
99 uint32_t helper_tosingle(uint64_t arg
)
101 int exp
= extract64(arg
, 52, 11);
104 if (likely(exp
> 896)) {
105 /* No denormalization required (includes Inf, NaN). */
106 ret
= extract64(arg
, 62, 2) << 30;
107 ret
|= extract64(arg
, 29, 30);
110 * Zero or Denormal result. If the exponent is in bounds for
111 * a single-precision denormal result, extract the proper
112 * bits. If the input is not zero, and the exponent is out of
113 * bounds, then the result is undefined; this underflows to
116 ret
= extract64(arg
, 63, 1) << 31;
117 if (unlikely(exp
>= 874)) {
118 /* Denormal result. */
119 ret
|= ((1ULL << 52) | extract64(arg
, 0, 52)) >> (896 + 30 - exp
);
125 static inline int ppc_float32_get_unbiased_exp(float32 f
)
127 return ((f
>> 23) & 0xFF) - 127;
130 static inline int ppc_float64_get_unbiased_exp(float64 f
)
132 return ((f
>> 52) & 0x7FF) - 1023;
135 /* Classify a floating-point number. */
146 #define COMPUTE_CLASS(tp) \
147 static int tp##_classify(tp arg) \
149 int ret = tp##_is_neg(arg) * is_neg; \
150 if (unlikely(tp##_is_any_nan(arg))) { \
151 float_status dummy = { }; /* snan_bit_is_one = 0 */ \
152 ret |= (tp##_is_signaling_nan(arg, &dummy) \
153 ? is_snan : is_qnan); \
154 } else if (unlikely(tp##_is_infinity(arg))) { \
156 } else if (tp##_is_zero(arg)) { \
158 } else if (tp##_is_zero_or_denormal(arg)) { \
159 ret |= is_denormal; \
166 COMPUTE_CLASS(float16
)
167 COMPUTE_CLASS(float32
)
168 COMPUTE_CLASS(float64
)
169 COMPUTE_CLASS(float128
)
171 static void set_fprf_from_class(CPUPPCState
*env
, int class)
173 static const uint8_t fprf
[6][2] = {
174 { 0x04, 0x08 }, /* normalized */
175 { 0x02, 0x12 }, /* zero */
176 { 0x14, 0x18 }, /* denormalized */
177 { 0x05, 0x09 }, /* infinity */
178 { 0x11, 0x11 }, /* qnan */
179 { 0x00, 0x00 }, /* snan -- flags are undefined */
181 bool isneg
= class & is_neg
;
183 env
->fpscr
&= ~FP_FPRF
;
184 env
->fpscr
|= fprf
[ctz32(class)][isneg
] << FPSCR_FPRF
;
187 #define COMPUTE_FPRF(tp) \
188 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
190 set_fprf_from_class(env, tp##_classify(arg)); \
193 COMPUTE_FPRF(float16
)
194 COMPUTE_FPRF(float32
)
195 COMPUTE_FPRF(float64
)
196 COMPUTE_FPRF(float128
)
198 /* Floating-point invalid operations exception */
199 static void finish_invalid_op_excp(CPUPPCState
*env
, int op
, uintptr_t retaddr
)
201 /* Update the floating-point invalid operation summary */
203 /* Update the floating-point exception summary */
205 if (env
->fpscr
& FP_VE
) {
206 /* Update the floating-point enabled exception summary */
207 env
->fpscr
|= FP_FEX
;
208 if (fp_exceptions_enabled(env
)) {
209 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
210 POWERPC_EXCP_FP
| op
, retaddr
);
215 static void finish_invalid_op_arith(CPUPPCState
*env
, int op
,
216 bool set_fpcc
, uintptr_t retaddr
)
218 env
->fpscr
&= ~(FP_FR
| FP_FI
);
219 if (!(env
->fpscr
& FP_VE
)) {
221 env
->fpscr
&= ~FP_FPCC
;
222 env
->fpscr
|= (FP_C
| FP_FU
);
225 finish_invalid_op_excp(env
, op
, retaddr
);
229 static void float_invalid_op_vxsnan(CPUPPCState
*env
, uintptr_t retaddr
)
231 env
->fpscr
|= FP_VXSNAN
;
232 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
, retaddr
);
235 /* Magnitude subtraction of infinities */
236 static void float_invalid_op_vxisi(CPUPPCState
*env
, bool set_fpcc
,
239 env
->fpscr
|= FP_VXISI
;
240 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXISI
, set_fpcc
, retaddr
);
243 /* Division of infinity by infinity */
244 static void float_invalid_op_vxidi(CPUPPCState
*env
, bool set_fpcc
,
247 env
->fpscr
|= FP_VXIDI
;
248 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIDI
, set_fpcc
, retaddr
);
251 /* Division of zero by zero */
252 static void float_invalid_op_vxzdz(CPUPPCState
*env
, bool set_fpcc
,
255 env
->fpscr
|= FP_VXZDZ
;
256 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXZDZ
, set_fpcc
, retaddr
);
259 /* Multiplication of zero by infinity */
260 static void float_invalid_op_vximz(CPUPPCState
*env
, bool set_fpcc
,
263 env
->fpscr
|= FP_VXIMZ
;
264 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXIMZ
, set_fpcc
, retaddr
);
267 /* Square root of a negative number */
268 static void float_invalid_op_vxsqrt(CPUPPCState
*env
, bool set_fpcc
,
271 env
->fpscr
|= FP_VXSQRT
;
272 finish_invalid_op_arith(env
, POWERPC_EXCP_FP_VXSQRT
, set_fpcc
, retaddr
);
275 /* Ordered comparison of NaN */
276 static void float_invalid_op_vxvc(CPUPPCState
*env
, bool set_fpcc
,
279 env
->fpscr
|= FP_VXVC
;
281 env
->fpscr
&= ~FP_FPCC
;
282 env
->fpscr
|= (FP_C
| FP_FU
);
284 /* Update the floating-point invalid operation summary */
286 /* Update the floating-point exception summary */
288 /* We must update the target FPR before raising the exception */
289 if (env
->fpscr
& FP_VE
) {
290 CPUState
*cs
= env_cpu(env
);
292 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
293 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
294 /* Update the floating-point enabled exception summary */
295 env
->fpscr
|= FP_FEX
;
296 /* Exception is deferred */
300 /* Invalid conversion */
301 static void float_invalid_op_vxcvi(CPUPPCState
*env
, bool set_fpcc
,
304 env
->fpscr
|= FP_VXCVI
;
305 env
->fpscr
&= ~(FP_FR
| FP_FI
);
306 if (!(env
->fpscr
& FP_VE
)) {
308 env
->fpscr
&= ~FP_FPCC
;
309 env
->fpscr
|= (FP_C
| FP_FU
);
312 finish_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
, retaddr
);
315 static inline void float_zero_divide_excp(CPUPPCState
*env
, uintptr_t raddr
)
318 env
->fpscr
&= ~(FP_FR
| FP_FI
);
319 /* Update the floating-point exception summary */
321 if (env
->fpscr
& FP_ZE
) {
322 /* Update the floating-point enabled exception summary */
323 env
->fpscr
|= FP_FEX
;
324 if (fp_exceptions_enabled(env
)) {
325 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
326 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
,
332 static inline int float_overflow_excp(CPUPPCState
*env
)
334 CPUState
*cs
= env_cpu(env
);
337 /* Update the floating-point exception summary */
340 bool overflow_enabled
= !!(env
->fpscr
& FP_OE
);
341 if (overflow_enabled
) {
342 /* XXX: should adjust the result */
343 /* Update the floating-point enabled exception summary */
344 env
->fpscr
|= FP_FEX
;
345 /* We must update the target FPR before raising the exception */
346 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
347 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
350 return overflow_enabled
? 0 : float_flag_inexact
;
353 static inline void float_underflow_excp(CPUPPCState
*env
)
355 CPUState
*cs
= env_cpu(env
);
358 /* Update the floating-point exception summary */
360 if (env
->fpscr
& FP_UE
) {
361 /* XXX: should adjust the result */
362 /* Update the floating-point enabled exception summary */
363 env
->fpscr
|= FP_FEX
;
364 /* We must update the target FPR before raising the exception */
365 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
366 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
370 static inline void float_inexact_excp(CPUPPCState
*env
)
372 CPUState
*cs
= env_cpu(env
);
375 /* Update the floating-point exception summary */
377 if (env
->fpscr
& FP_XE
) {
378 /* Update the floating-point enabled exception summary */
379 env
->fpscr
|= FP_FEX
;
380 /* We must update the target FPR before raising the exception */
381 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
382 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
386 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
388 uint32_t mask
= 1u << bit
;
389 if (env
->fpscr
& mask
) {
390 ppc_store_fpscr(env
, env
->fpscr
& ~(target_ulong
)mask
);
394 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
396 uint32_t mask
= 1u << bit
;
397 if (!(env
->fpscr
& mask
)) {
398 ppc_store_fpscr(env
, env
->fpscr
| mask
);
402 void helper_store_fpscr(CPUPPCState
*env
, uint64_t val
, uint32_t nibbles
)
404 target_ulong mask
= 0;
407 /* TODO: push this extension back to translation time */
408 for (i
= 0; i
< sizeof(target_ulong
) * 2; i
++) {
409 if (nibbles
& (1 << i
)) {
410 mask
|= (target_ulong
) 0xf << (4 * i
);
413 val
= (val
& mask
) | (env
->fpscr
& ~mask
);
414 ppc_store_fpscr(env
, val
);
417 void helper_fpscr_check_status(CPUPPCState
*env
)
419 CPUState
*cs
= env_cpu(env
);
420 target_ulong fpscr
= env
->fpscr
;
423 if ((fpscr
& FP_OX
) && (fpscr
& FP_OE
)) {
424 error
= POWERPC_EXCP_FP_OX
;
425 } else if ((fpscr
& FP_UX
) && (fpscr
& FP_UE
)) {
426 error
= POWERPC_EXCP_FP_UX
;
427 } else if ((fpscr
& FP_XX
) && (fpscr
& FP_XE
)) {
428 error
= POWERPC_EXCP_FP_XX
;
429 } else if ((fpscr
& FP_ZX
) && (fpscr
& FP_ZE
)) {
430 error
= POWERPC_EXCP_FP_ZX
;
431 } else if (fpscr
& FP_VE
) {
432 if (fpscr
& FP_VXSOFT
) {
433 error
= POWERPC_EXCP_FP_VXSOFT
;
434 } else if (fpscr
& FP_VXSNAN
) {
435 error
= POWERPC_EXCP_FP_VXSNAN
;
436 } else if (fpscr
& FP_VXISI
) {
437 error
= POWERPC_EXCP_FP_VXISI
;
438 } else if (fpscr
& FP_VXIDI
) {
439 error
= POWERPC_EXCP_FP_VXIDI
;
440 } else if (fpscr
& FP_VXZDZ
) {
441 error
= POWERPC_EXCP_FP_VXZDZ
;
442 } else if (fpscr
& FP_VXIMZ
) {
443 error
= POWERPC_EXCP_FP_VXIMZ
;
444 } else if (fpscr
& FP_VXVC
) {
445 error
= POWERPC_EXCP_FP_VXVC
;
446 } else if (fpscr
& FP_VXSQRT
) {
447 error
= POWERPC_EXCP_FP_VXSQRT
;
448 } else if (fpscr
& FP_VXCVI
) {
449 error
= POWERPC_EXCP_FP_VXCVI
;
456 cs
->exception_index
= POWERPC_EXCP_PROGRAM
;
457 env
->error_code
= error
| POWERPC_EXCP_FP
;
458 /* Deferred floating-point exception after target FPSCR update */
459 if (fp_exceptions_enabled(env
)) {
460 raise_exception_err_ra(env
, cs
->exception_index
,
461 env
->error_code
, GETPC());
465 static void do_float_check_status(CPUPPCState
*env
, bool change_fi
,
468 CPUState
*cs
= env_cpu(env
);
469 int status
= get_float_exception_flags(&env
->fp_status
);
471 if (status
& float_flag_overflow
) {
472 status
|= float_overflow_excp(env
);
473 } else if (status
& float_flag_underflow
) {
474 float_underflow_excp(env
);
476 if (status
& float_flag_inexact
) {
477 float_inexact_excp(env
);
480 env
->fpscr
= FIELD_DP64(env
->fpscr
, FPSCR
, FI
,
481 !!(status
& float_flag_inexact
));
484 if (cs
->exception_index
== POWERPC_EXCP_PROGRAM
&&
485 (env
->error_code
& POWERPC_EXCP_FP
)) {
486 /* Deferred floating-point exception after target FPR update */
487 if (fp_exceptions_enabled(env
)) {
488 raise_exception_err_ra(env
, cs
->exception_index
,
489 env
->error_code
, raddr
);
494 void helper_float_check_status(CPUPPCState
*env
)
496 do_float_check_status(env
, true, GETPC());
499 void helper_reset_fpstatus(CPUPPCState
*env
)
501 set_float_exception_flags(0, &env
->fp_status
);
504 static void float_invalid_op_addsub(CPUPPCState
*env
, int flags
,
505 bool set_fpcc
, uintptr_t retaddr
)
507 if (flags
& float_flag_invalid_isi
) {
508 float_invalid_op_vxisi(env
, set_fpcc
, retaddr
);
509 } else if (flags
& float_flag_invalid_snan
) {
510 float_invalid_op_vxsnan(env
, retaddr
);
515 float64
helper_fadd(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
517 float64 ret
= float64_add(arg1
, arg2
, &env
->fp_status
);
518 int flags
= get_float_exception_flags(&env
->fp_status
);
520 if (unlikely(flags
& float_flag_invalid
)) {
521 float_invalid_op_addsub(env
, flags
, 1, GETPC());
528 float64
helper_fadds(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
530 float64 ret
= float64r32_add(arg1
, arg2
, &env
->fp_status
);
531 int flags
= get_float_exception_flags(&env
->fp_status
);
533 if (unlikely(flags
& float_flag_invalid
)) {
534 float_invalid_op_addsub(env
, flags
, 1, GETPC());
540 float64
helper_fsub(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
542 float64 ret
= float64_sub(arg1
, arg2
, &env
->fp_status
);
543 int flags
= get_float_exception_flags(&env
->fp_status
);
545 if (unlikely(flags
& float_flag_invalid
)) {
546 float_invalid_op_addsub(env
, flags
, 1, GETPC());
553 float64
helper_fsubs(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
555 float64 ret
= float64r32_sub(arg1
, arg2
, &env
->fp_status
);
556 int flags
= get_float_exception_flags(&env
->fp_status
);
558 if (unlikely(flags
& float_flag_invalid
)) {
559 float_invalid_op_addsub(env
, flags
, 1, GETPC());
564 static void float_invalid_op_mul(CPUPPCState
*env
, int flags
,
565 bool set_fprc
, uintptr_t retaddr
)
567 if (flags
& float_flag_invalid_imz
) {
568 float_invalid_op_vximz(env
, set_fprc
, retaddr
);
569 } else if (flags
& float_flag_invalid_snan
) {
570 float_invalid_op_vxsnan(env
, retaddr
);
575 float64
helper_fmul(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
577 float64 ret
= float64_mul(arg1
, arg2
, &env
->fp_status
);
578 int flags
= get_float_exception_flags(&env
->fp_status
);
580 if (unlikely(flags
& float_flag_invalid
)) {
581 float_invalid_op_mul(env
, flags
, 1, GETPC());
588 float64
helper_fmuls(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
590 float64 ret
= float64r32_mul(arg1
, arg2
, &env
->fp_status
);
591 int flags
= get_float_exception_flags(&env
->fp_status
);
593 if (unlikely(flags
& float_flag_invalid
)) {
594 float_invalid_op_mul(env
, flags
, 1, GETPC());
599 static void float_invalid_op_div(CPUPPCState
*env
, int flags
,
600 bool set_fprc
, uintptr_t retaddr
)
602 if (flags
& float_flag_invalid_idi
) {
603 float_invalid_op_vxidi(env
, set_fprc
, retaddr
);
604 } else if (flags
& float_flag_invalid_zdz
) {
605 float_invalid_op_vxzdz(env
, set_fprc
, retaddr
);
606 } else if (flags
& float_flag_invalid_snan
) {
607 float_invalid_op_vxsnan(env
, retaddr
);
612 float64
helper_fdiv(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
614 float64 ret
= float64_div(arg1
, arg2
, &env
->fp_status
);
615 int flags
= get_float_exception_flags(&env
->fp_status
);
617 if (unlikely(flags
& float_flag_invalid
)) {
618 float_invalid_op_div(env
, flags
, 1, GETPC());
620 if (unlikely(flags
& float_flag_divbyzero
)) {
621 float_zero_divide_excp(env
, GETPC());
628 float64
helper_fdivs(CPUPPCState
*env
, float64 arg1
, float64 arg2
)
630 float64 ret
= float64r32_div(arg1
, arg2
, &env
->fp_status
);
631 int flags
= get_float_exception_flags(&env
->fp_status
);
633 if (unlikely(flags
& float_flag_invalid
)) {
634 float_invalid_op_div(env
, flags
, 1, GETPC());
636 if (unlikely(flags
& float_flag_divbyzero
)) {
637 float_zero_divide_excp(env
, GETPC());
643 static uint64_t float_invalid_cvt(CPUPPCState
*env
, int flags
,
644 uint64_t ret
, uint64_t ret_nan
,
645 bool set_fprc
, uintptr_t retaddr
)
648 * VXCVI is different from most in that it sets two exception bits,
649 * VXCVI and VXSNAN for an SNaN input.
651 if (flags
& float_flag_invalid_snan
) {
652 env
->fpscr
|= FP_VXSNAN
;
654 float_invalid_op_vxcvi(env
, set_fprc
, retaddr
);
656 return flags
& float_flag_invalid_cvti
? ret
: ret_nan
;
659 #define FPU_FCTI(op, cvt, nanval) \
660 uint64_t helper_##op(CPUPPCState *env, float64 arg) \
662 uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \
663 int flags = get_float_exception_flags(&env->fp_status); \
664 if (unlikely(flags & float_flag_invalid)) { \
665 ret = float_invalid_cvt(env, flags, ret, nanval, 1, GETPC()); \
670 FPU_FCTI(fctiw
, int32
, 0x80000000U
)
671 FPU_FCTI(fctiwz
, int32_round_to_zero
, 0x80000000U
)
672 FPU_FCTI(fctiwu
, uint32
, 0x00000000U
)
673 FPU_FCTI(fctiwuz
, uint32_round_to_zero
, 0x00000000U
)
674 FPU_FCTI(fctid
, int64
, 0x8000000000000000ULL
)
675 FPU_FCTI(fctidz
, int64_round_to_zero
, 0x8000000000000000ULL
)
676 FPU_FCTI(fctidu
, uint64
, 0x0000000000000000ULL
)
677 FPU_FCTI(fctiduz
, uint64_round_to_zero
, 0x0000000000000000ULL
)
679 #define FPU_FCFI(op, cvtr, is_single) \
680 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
685 float32 tmp = cvtr(arg, &env->fp_status); \
686 farg.d = float32_to_float64(tmp, &env->fp_status); \
688 farg.d = cvtr(arg, &env->fp_status); \
690 do_float_check_status(env, true, GETPC()); \
694 FPU_FCFI(fcfid
, int64_to_float64
, 0)
695 FPU_FCFI(fcfids
, int64_to_float32
, 1)
696 FPU_FCFI(fcfidu
, uint64_to_float64
, 0)
697 FPU_FCFI(fcfidus
, uint64_to_float32
, 1)
699 static uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
700 FloatRoundMode rounding_mode
)
702 FloatRoundMode old_rounding_mode
= get_float_rounding_mode(&env
->fp_status
);
705 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
706 arg
= float64_round_to_int(arg
, &env
->fp_status
);
707 set_float_rounding_mode(old_rounding_mode
, &env
->fp_status
);
709 flags
= get_float_exception_flags(&env
->fp_status
);
710 if (flags
& float_flag_invalid_snan
) {
711 float_invalid_op_vxsnan(env
, GETPC());
714 /* fri* does not set FPSCR[XX] */
715 set_float_exception_flags(flags
& ~float_flag_inexact
, &env
->fp_status
);
716 do_float_check_status(env
, true, GETPC());
721 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
723 return do_fri(env
, arg
, float_round_ties_away
);
726 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
728 return do_fri(env
, arg
, float_round_to_zero
);
731 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
733 return do_fri(env
, arg
, float_round_up
);
736 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
738 return do_fri(env
, arg
, float_round_down
);
741 static void float_invalid_op_madd(CPUPPCState
*env
, int flags
,
742 bool set_fpcc
, uintptr_t retaddr
)
744 if (flags
& float_flag_invalid_imz
) {
745 float_invalid_op_vximz(env
, set_fpcc
, retaddr
);
747 float_invalid_op_addsub(env
, flags
, set_fpcc
, retaddr
);
751 static float64
do_fmadd(CPUPPCState
*env
, float64 a
, float64 b
,
752 float64 c
, int madd_flags
, uintptr_t retaddr
)
754 float64 ret
= float64_muladd(a
, b
, c
, madd_flags
, &env
->fp_status
);
755 int flags
= get_float_exception_flags(&env
->fp_status
);
757 if (unlikely(flags
& float_flag_invalid
)) {
758 float_invalid_op_madd(env
, flags
, 1, retaddr
);
763 static uint64_t do_fmadds(CPUPPCState
*env
, float64 a
, float64 b
,
764 float64 c
, int madd_flags
, uintptr_t retaddr
)
766 float64 ret
= float64r32_muladd(a
, b
, c
, madd_flags
, &env
->fp_status
);
767 int flags
= get_float_exception_flags(&env
->fp_status
);
769 if (unlikely(flags
& float_flag_invalid
)) {
770 float_invalid_op_madd(env
, flags
, 1, retaddr
);
775 #define FPU_FMADD(op, madd_flags) \
776 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
777 uint64_t arg2, uint64_t arg3) \
778 { return do_fmadd(env, arg1, arg2, arg3, madd_flags, GETPC()); } \
779 uint64_t helper_##op##s(CPUPPCState *env, uint64_t arg1, \
780 uint64_t arg2, uint64_t arg3) \
781 { return do_fmadds(env, arg1, arg2, arg3, madd_flags, GETPC()); }
784 #define MSUB_FLGS float_muladd_negate_c
785 #define NMADD_FLGS float_muladd_negate_result
786 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
788 FPU_FMADD(fmadd
, MADD_FLGS
)
789 FPU_FMADD(fnmadd
, NMADD_FLGS
)
790 FPU_FMADD(fmsub
, MSUB_FLGS
)
791 FPU_FMADD(fnmsub
, NMSUB_FLGS
)
794 static uint64_t do_frsp(CPUPPCState
*env
, uint64_t arg
, uintptr_t retaddr
)
796 float32 f32
= float64_to_float32(arg
, &env
->fp_status
);
797 int flags
= get_float_exception_flags(&env
->fp_status
);
799 if (unlikely(flags
& float_flag_invalid_snan
)) {
800 float_invalid_op_vxsnan(env
, retaddr
);
802 return helper_todouble(f32
);
805 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
807 return do_frsp(env
, arg
, GETPC());
810 static void float_invalid_op_sqrt(CPUPPCState
*env
, int flags
,
811 bool set_fpcc
, uintptr_t retaddr
)
813 if (unlikely(flags
& float_flag_invalid_sqrt
)) {
814 float_invalid_op_vxsqrt(env
, set_fpcc
, retaddr
);
815 } else if (unlikely(flags
& float_flag_invalid_snan
)) {
816 float_invalid_op_vxsnan(env
, retaddr
);
821 float64
helper_fsqrt(CPUPPCState
*env
, float64 arg
)
823 float64 ret
= float64_sqrt(arg
, &env
->fp_status
);
824 int flags
= get_float_exception_flags(&env
->fp_status
);
826 if (unlikely(flags
& float_flag_invalid
)) {
827 float_invalid_op_sqrt(env
, flags
, 1, GETPC());
833 /* fsqrts - fsqrts. */
834 float64
helper_fsqrts(CPUPPCState
*env
, float64 arg
)
836 float64 ret
= float64r32_sqrt(arg
, &env
->fp_status
);
837 int flags
= get_float_exception_flags(&env
->fp_status
);
839 if (unlikely(flags
& float_flag_invalid
)) {
840 float_invalid_op_sqrt(env
, flags
, 1, GETPC());
846 float64
helper_fre(CPUPPCState
*env
, float64 arg
)
848 /* "Estimate" the reciprocal with actual division. */
849 float64 ret
= float64_div(float64_one
, arg
, &env
->fp_status
);
850 int flags
= get_float_exception_flags(&env
->fp_status
);
852 if (unlikely(flags
& float_flag_invalid_snan
)) {
853 float_invalid_op_vxsnan(env
, GETPC());
855 if (unlikely(flags
& float_flag_divbyzero
)) {
856 float_zero_divide_excp(env
, GETPC());
857 /* For FPSCR.ZE == 0, the result is 1/2. */
858 ret
= float64_set_sign(float64_half
, float64_is_neg(arg
));
865 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
867 /* "Estimate" the reciprocal with actual division. */
868 float64 ret
= float64r32_div(float64_one
, arg
, &env
->fp_status
);
869 int flags
= get_float_exception_flags(&env
->fp_status
);
871 if (unlikely(flags
& float_flag_invalid_snan
)) {
872 float_invalid_op_vxsnan(env
, GETPC());
874 if (unlikely(flags
& float_flag_divbyzero
)) {
875 float_zero_divide_excp(env
, GETPC());
876 /* For FPSCR.ZE == 0, the result is 1/2. */
877 ret
= float64_set_sign(float64_half
, float64_is_neg(arg
));
883 /* frsqrte - frsqrte. */
884 float64
helper_frsqrte(CPUPPCState
*env
, float64 arg
)
886 /* "Estimate" the reciprocal with actual division. */
887 float64 rets
= float64_sqrt(arg
, &env
->fp_status
);
888 float64 retd
= float64_div(float64_one
, rets
, &env
->fp_status
);
889 int flags
= get_float_exception_flags(&env
->fp_status
);
891 if (unlikely(flags
& float_flag_invalid
)) {
892 float_invalid_op_sqrt(env
, flags
, 1, GETPC());
894 if (unlikely(flags
& float_flag_divbyzero
)) {
895 /* Reciprocal of (square root of) zero. */
896 float_zero_divide_excp(env
, GETPC());
902 /* frsqrtes - frsqrtes. */
903 float64
helper_frsqrtes(CPUPPCState
*env
, float64 arg
)
905 /* "Estimate" the reciprocal with actual division. */
906 float64 rets
= float64_sqrt(arg
, &env
->fp_status
);
907 float64 retd
= float64r32_div(float64_one
, rets
, &env
->fp_status
);
908 int flags
= get_float_exception_flags(&env
->fp_status
);
910 if (unlikely(flags
& float_flag_invalid
)) {
911 float_invalid_op_sqrt(env
, flags
, 1, GETPC());
913 if (unlikely(flags
& float_flag_divbyzero
)) {
914 /* Reciprocal of (square root of) zero. */
915 float_zero_divide_excp(env
, GETPC());
922 uint64_t helper_FSEL(uint64_t a
, uint64_t b
, uint64_t c
)
928 if ((!float64_is_neg(fa
.d
) || float64_is_zero(fa
.d
)) &&
929 !float64_is_any_nan(fa
.d
)) {
936 uint32_t helper_ftdiv(uint64_t fra
, uint64_t frb
)
941 if (unlikely(float64_is_infinity(fra
) ||
942 float64_is_infinity(frb
) ||
943 float64_is_zero(frb
))) {
947 int e_a
= ppc_float64_get_unbiased_exp(fra
);
948 int e_b
= ppc_float64_get_unbiased_exp(frb
);
950 if (unlikely(float64_is_any_nan(fra
) ||
951 float64_is_any_nan(frb
))) {
953 } else if ((e_b
<= -1022) || (e_b
>= 1021)) {
955 } else if (!float64_is_zero(fra
) &&
956 (((e_a
- e_b
) >= 1023) ||
957 ((e_a
- e_b
) <= -1021) ||
962 if (unlikely(float64_is_zero_or_denormal(frb
))) {
963 /* XB is not zero because of the above check and */
964 /* so must be denormalized. */
969 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
972 uint32_t helper_ftsqrt(uint64_t frb
)
977 if (unlikely(float64_is_infinity(frb
) || float64_is_zero(frb
))) {
981 int e_b
= ppc_float64_get_unbiased_exp(frb
);
983 if (unlikely(float64_is_any_nan(frb
))) {
985 } else if (unlikely(float64_is_zero(frb
))) {
987 } else if (unlikely(float64_is_neg(frb
))) {
989 } else if (!float64_is_zero(frb
) && (e_b
<= (-1022 + 52))) {
993 if (unlikely(float64_is_zero_or_denormal(frb
))) {
994 /* XB is not zero because of the above check and */
995 /* therefore must be denormalized. */
1000 return 0x8 | (fg_flag
? 4 : 0) | (fe_flag
? 2 : 0);
1003 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1006 CPU_DoubleU farg1
, farg2
;
1012 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1013 float64_is_any_nan(farg2
.d
))) {
1015 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1017 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1023 env
->fpscr
&= ~FP_FPCC
;
1024 env
->fpscr
|= ret
<< FPSCR_FPCC
;
1025 env
->crf
[crfD
] = ret
;
1026 if (unlikely(ret
== 0x01UL
1027 && (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1028 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)))) {
1029 /* sNaN comparison */
1030 float_invalid_op_vxsnan(env
, GETPC());
1034 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1037 CPU_DoubleU farg1
, farg2
;
1043 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1044 float64_is_any_nan(farg2
.d
))) {
1046 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1048 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1054 env
->fpscr
&= ~FP_FPCC
;
1055 env
->fpscr
|= ret
<< FPSCR_FPCC
;
1056 env
->crf
[crfD
] = (uint32_t) ret
;
1057 if (unlikely(ret
== 0x01UL
)) {
1058 float_invalid_op_vxvc(env
, 1, GETPC());
1059 if (float64_is_signaling_nan(farg1
.d
, &env
->fp_status
) ||
1060 float64_is_signaling_nan(farg2
.d
, &env
->fp_status
)) {
1061 /* sNaN comparison */
1062 float_invalid_op_vxsnan(env
, GETPC());
1067 /* Single-precision floating-point conversions */
1068 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1072 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1077 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1081 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1086 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1091 /* NaN are not treated the same way IEEE 754 does */
1092 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1096 return float32_to_int32(u
.f
, &env
->vec_status
);
1099 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1104 /* NaN are not treated the same way IEEE 754 does */
1105 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1109 return float32_to_uint32(u
.f
, &env
->vec_status
);
1112 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1117 /* NaN are not treated the same way IEEE 754 does */
1118 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1122 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1125 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1130 /* NaN are not treated the same way IEEE 754 does */
1131 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1135 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1138 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1143 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1144 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1145 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1150 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1155 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1156 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1157 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1162 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1168 /* NaN are not treated the same way IEEE 754 does */
1169 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1172 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1173 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1175 return float32_to_int32(u
.f
, &env
->vec_status
);
1178 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1184 /* NaN are not treated the same way IEEE 754 does */
1185 if (unlikely(float32_is_quiet_nan(u
.f
, &env
->vec_status
))) {
1188 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1189 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1191 return float32_to_uint32(u
.f
, &env
->vec_status
);
1194 #define HELPER_SPE_SINGLE_CONV(name) \
1195 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1197 return e##name(env, val); \
1200 HELPER_SPE_SINGLE_CONV(fscfsi
);
1202 HELPER_SPE_SINGLE_CONV(fscfui
);
1204 HELPER_SPE_SINGLE_CONV(fscfuf
);
1206 HELPER_SPE_SINGLE_CONV(fscfsf
);
1208 HELPER_SPE_SINGLE_CONV(fsctsi
);
1210 HELPER_SPE_SINGLE_CONV(fsctui
);
1212 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1214 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1216 HELPER_SPE_SINGLE_CONV(fsctsf
);
1218 HELPER_SPE_SINGLE_CONV(fsctuf
);
1220 #define HELPER_SPE_VECTOR_CONV(name) \
1221 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1223 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1224 (uint64_t)e##name(env, val); \
1227 HELPER_SPE_VECTOR_CONV(fscfsi
);
1229 HELPER_SPE_VECTOR_CONV(fscfui
);
1231 HELPER_SPE_VECTOR_CONV(fscfuf
);
1233 HELPER_SPE_VECTOR_CONV(fscfsf
);
1235 HELPER_SPE_VECTOR_CONV(fsctsi
);
1237 HELPER_SPE_VECTOR_CONV(fsctui
);
1239 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1241 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1243 HELPER_SPE_VECTOR_CONV(fsctsf
);
1245 HELPER_SPE_VECTOR_CONV(fsctuf
);
1247 /* Single-precision floating-point arithmetic */
1248 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1254 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1258 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1264 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1268 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1274 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1278 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1284 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1288 #define HELPER_SPE_SINGLE_ARITH(name) \
1289 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1291 return e##name(env, op1, op2); \
1294 HELPER_SPE_SINGLE_ARITH(fsadd
);
1296 HELPER_SPE_SINGLE_ARITH(fssub
);
1298 HELPER_SPE_SINGLE_ARITH(fsmul
);
1300 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1302 #define HELPER_SPE_VECTOR_ARITH(name) \
1303 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1305 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1306 (uint64_t)e##name(env, op1, op2); \
1309 HELPER_SPE_VECTOR_ARITH(fsadd
);
1311 HELPER_SPE_VECTOR_ARITH(fssub
);
1313 HELPER_SPE_VECTOR_ARITH(fsmul
);
1315 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1317 /* Single-precision floating-point comparisons */
1318 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1324 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1327 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1333 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1336 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1342 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1345 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1347 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1348 return efscmplt(env
, op1
, op2
);
1351 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1353 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1354 return efscmpgt(env
, op1
, op2
);
1357 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1359 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1360 return efscmpeq(env
, op1
, op2
);
1363 #define HELPER_SINGLE_SPE_CMP(name) \
1364 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1366 return e##name(env, op1, op2); \
1369 HELPER_SINGLE_SPE_CMP(fststlt
);
1371 HELPER_SINGLE_SPE_CMP(fststgt
);
1373 HELPER_SINGLE_SPE_CMP(fststeq
);
1375 HELPER_SINGLE_SPE_CMP(fscmplt
);
1377 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1379 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1381 static inline uint32_t evcmp_merge(int t0
, int t1
)
1383 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1386 #define HELPER_VECTOR_SPE_CMP(name) \
1387 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1389 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1390 e##name(env, op1, op2)); \
1393 HELPER_VECTOR_SPE_CMP(fststlt
);
1395 HELPER_VECTOR_SPE_CMP(fststgt
);
1397 HELPER_VECTOR_SPE_CMP(fststeq
);
1399 HELPER_VECTOR_SPE_CMP(fscmplt
);
1401 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1403 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1405 /* Double-precision floating-point conversion */
1406 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1410 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1415 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1419 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1424 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1428 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1433 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1437 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1442 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1447 /* NaN are not treated the same way IEEE 754 does */
1448 if (unlikely(float64_is_any_nan(u
.d
))) {
1452 return float64_to_int32(u
.d
, &env
->vec_status
);
1455 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1460 /* NaN are not treated the same way IEEE 754 does */
1461 if (unlikely(float64_is_any_nan(u
.d
))) {
1465 return float64_to_uint32(u
.d
, &env
->vec_status
);
1468 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1473 /* NaN are not treated the same way IEEE 754 does */
1474 if (unlikely(float64_is_any_nan(u
.d
))) {
1478 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1481 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1486 /* NaN are not treated the same way IEEE 754 does */
1487 if (unlikely(float64_is_any_nan(u
.d
))) {
1491 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1494 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1499 /* NaN are not treated the same way IEEE 754 does */
1500 if (unlikely(float64_is_any_nan(u
.d
))) {
1504 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1507 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1512 /* NaN are not treated the same way IEEE 754 does */
1513 if (unlikely(float64_is_any_nan(u
.d
))) {
1517 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1520 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1525 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1526 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1527 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1532 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1537 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1538 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1539 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1544 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1550 /* NaN are not treated the same way IEEE 754 does */
1551 if (unlikely(float64_is_any_nan(u
.d
))) {
1554 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1555 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1557 return float64_to_int32(u
.d
, &env
->vec_status
);
1560 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1566 /* NaN are not treated the same way IEEE 754 does */
1567 if (unlikely(float64_is_any_nan(u
.d
))) {
1570 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1571 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1573 return float64_to_uint32(u
.d
, &env
->vec_status
);
1576 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1582 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1587 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1593 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1598 /* Double precision fixed-point arithmetic */
1599 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1605 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1609 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1615 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1619 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1625 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1629 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1635 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1639 /* Double precision floating point helpers */
1640 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1646 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1649 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1655 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1658 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1664 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1667 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1669 /* XXX: TODO: test special values (NaN, infinites, ...) */
1670 return helper_efdtstlt(env
, op1
, op2
);
1673 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1675 /* XXX: TODO: test special values (NaN, infinites, ...) */
1676 return helper_efdtstgt(env
, op1
, op2
);
1679 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1681 /* XXX: TODO: test special values (NaN, infinites, ...) */
1682 return helper_efdtsteq(env
, op1
, op2
);
1685 #define float64_to_float64(x, env) x
1689 * VSX_ADD_SUB - VSX floating point add/subtract
1690 * name - instruction mnemonic
1691 * op - operation (add or sub)
1692 * nels - number of elements (1, 2 or 4)
1693 * tp - type (float32 or float64)
1694 * fld - vsr_t field (VsrD(*) or VsrW(*))
1695 * sfifprf - set FI and FPRF
1697 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfifprf, r2sp) \
1698 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
1699 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1701 ppc_vsr_t t = { }; \
1704 helper_reset_fpstatus(env); \
1706 for (i = 0; i < nels; i++) { \
1707 float_status tstat = env->fp_status; \
1708 set_float_exception_flags(0, &tstat); \
1709 t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
1710 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1712 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1713 float_invalid_op_addsub(env, tstat.float_exception_flags, \
1714 sfifprf, GETPC()); \
1718 t.fld = do_frsp(env, t.fld, GETPC()); \
1722 helper_compute_fprf_float64(env, t.fld); \
1726 do_float_check_status(env, sfifprf, GETPC()); \
1729 VSX_ADD_SUB(xsadddp
, add
, 1, float64
, VsrD(0), 1, 0)
1730 VSX_ADD_SUB(xsaddsp
, add
, 1, float64
, VsrD(0), 1, 1)
1731 VSX_ADD_SUB(xvadddp
, add
, 2, float64
, VsrD(i
), 0, 0)
1732 VSX_ADD_SUB(xvaddsp
, add
, 4, float32
, VsrW(i
), 0, 0)
1733 VSX_ADD_SUB(xssubdp
, sub
, 1, float64
, VsrD(0), 1, 0)
1734 VSX_ADD_SUB(xssubsp
, sub
, 1, float64
, VsrD(0), 1, 1)
1735 VSX_ADD_SUB(xvsubdp
, sub
, 2, float64
, VsrD(i
), 0, 0)
1736 VSX_ADD_SUB(xvsubsp
, sub
, 4, float32
, VsrW(i
), 0, 0)
1738 void helper_xsaddqp(CPUPPCState
*env
, uint32_t opcode
,
1739 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1744 helper_reset_fpstatus(env
);
1746 tstat
= env
->fp_status
;
1747 if (unlikely(Rc(opcode
) != 0)) {
1748 tstat
.float_rounding_mode
= float_round_to_odd
;
1751 set_float_exception_flags(0, &tstat
);
1752 t
.f128
= float128_add(xa
->f128
, xb
->f128
, &tstat
);
1753 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1755 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1756 float_invalid_op_addsub(env
, tstat
.float_exception_flags
, 1, GETPC());
1759 helper_compute_fprf_float128(env
, t
.f128
);
1762 do_float_check_status(env
, true, GETPC());
1766 * VSX_MUL - VSX floating point multiply
1767 * op - instruction mnemonic
1768 * nels - number of elements (1, 2 or 4)
1769 * tp - type (float32 or float64)
1770 * fld - vsr_t field (VsrD(*) or VsrW(*))
1771 * sfifprf - set FI and FPRF
1773 #define VSX_MUL(op, nels, tp, fld, sfifprf, r2sp) \
1774 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1775 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1777 ppc_vsr_t t = { }; \
1780 helper_reset_fpstatus(env); \
1782 for (i = 0; i < nels; i++) { \
1783 float_status tstat = env->fp_status; \
1784 set_float_exception_flags(0, &tstat); \
1785 t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
1786 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1788 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1789 float_invalid_op_mul(env, tstat.float_exception_flags, \
1790 sfifprf, GETPC()); \
1794 t.fld = do_frsp(env, t.fld, GETPC()); \
1798 helper_compute_fprf_float64(env, t.fld); \
1803 do_float_check_status(env, sfifprf, GETPC()); \
1806 VSX_MUL(xsmuldp
, 1, float64
, VsrD(0), 1, 0)
1807 VSX_MUL(xsmulsp
, 1, float64
, VsrD(0), 1, 1)
1808 VSX_MUL(xvmuldp
, 2, float64
, VsrD(i
), 0, 0)
1809 VSX_MUL(xvmulsp
, 4, float32
, VsrW(i
), 0, 0)
1811 void helper_xsmulqp(CPUPPCState
*env
, uint32_t opcode
,
1812 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1817 helper_reset_fpstatus(env
);
1818 tstat
= env
->fp_status
;
1819 if (unlikely(Rc(opcode
) != 0)) {
1820 tstat
.float_rounding_mode
= float_round_to_odd
;
1823 set_float_exception_flags(0, &tstat
);
1824 t
.f128
= float128_mul(xa
->f128
, xb
->f128
, &tstat
);
1825 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1827 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1828 float_invalid_op_mul(env
, tstat
.float_exception_flags
, 1, GETPC());
1830 helper_compute_fprf_float128(env
, t
.f128
);
1833 do_float_check_status(env
, true, GETPC());
1837 * VSX_DIV - VSX floating point divide
1838 * op - instruction mnemonic
1839 * nels - number of elements (1, 2 or 4)
1840 * tp - type (float32 or float64)
1841 * fld - vsr_t field (VsrD(*) or VsrW(*))
1842 * sfifprf - set FI and FPRF
1844 #define VSX_DIV(op, nels, tp, fld, sfifprf, r2sp) \
1845 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
1846 ppc_vsr_t *xa, ppc_vsr_t *xb) \
1848 ppc_vsr_t t = { }; \
1851 helper_reset_fpstatus(env); \
1853 for (i = 0; i < nels; i++) { \
1854 float_status tstat = env->fp_status; \
1855 set_float_exception_flags(0, &tstat); \
1856 t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
1857 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1859 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1860 float_invalid_op_div(env, tstat.float_exception_flags, \
1861 sfifprf, GETPC()); \
1863 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1864 float_zero_divide_excp(env, GETPC()); \
1868 t.fld = do_frsp(env, t.fld, GETPC()); \
1872 helper_compute_fprf_float64(env, t.fld); \
1877 do_float_check_status(env, sfifprf, GETPC()); \
1880 VSX_DIV(xsdivdp
, 1, float64
, VsrD(0), 1, 0)
1881 VSX_DIV(xsdivsp
, 1, float64
, VsrD(0), 1, 1)
1882 VSX_DIV(xvdivdp
, 2, float64
, VsrD(i
), 0, 0)
1883 VSX_DIV(xvdivsp
, 4, float32
, VsrW(i
), 0, 0)
1885 void helper_xsdivqp(CPUPPCState
*env
, uint32_t opcode
,
1886 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
1891 helper_reset_fpstatus(env
);
1892 tstat
= env
->fp_status
;
1893 if (unlikely(Rc(opcode
) != 0)) {
1894 tstat
.float_rounding_mode
= float_round_to_odd
;
1897 set_float_exception_flags(0, &tstat
);
1898 t
.f128
= float128_div(xa
->f128
, xb
->f128
, &tstat
);
1899 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
1901 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
1902 float_invalid_op_div(env
, tstat
.float_exception_flags
, 1, GETPC());
1904 if (unlikely(tstat
.float_exception_flags
& float_flag_divbyzero
)) {
1905 float_zero_divide_excp(env
, GETPC());
1908 helper_compute_fprf_float128(env
, t
.f128
);
1910 do_float_check_status(env
, true, GETPC());
1914 * VSX_RE - VSX floating point reciprocal estimate
1915 * op - instruction mnemonic
1916 * nels - number of elements (1, 2 or 4)
1917 * tp - type (float32 or float64)
1918 * fld - vsr_t field (VsrD(*) or VsrW(*))
1919 * sfifprf - set FI and FPRF
1921 #define VSX_RE(op, nels, tp, fld, sfifprf, r2sp) \
1922 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1924 ppc_vsr_t t = { }; \
1927 helper_reset_fpstatus(env); \
1929 for (i = 0; i < nels; i++) { \
1930 if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
1931 float_invalid_op_vxsnan(env, GETPC()); \
1933 t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
1936 t.fld = do_frsp(env, t.fld, GETPC()); \
1940 helper_compute_fprf_float64(env, t.fld); \
1945 do_float_check_status(env, sfifprf, GETPC()); \
1948 VSX_RE(xsredp
, 1, float64
, VsrD(0), 1, 0)
1949 VSX_RE(xsresp
, 1, float64
, VsrD(0), 1, 1)
1950 VSX_RE(xvredp
, 2, float64
, VsrD(i
), 0, 0)
1951 VSX_RE(xvresp
, 4, float32
, VsrW(i
), 0, 0)
1954 * VSX_SQRT - VSX floating point square root
1955 * op - instruction mnemonic
1956 * nels - number of elements (1, 2 or 4)
1957 * tp - type (float32 or float64)
1958 * fld - vsr_t field (VsrD(*) or VsrW(*))
1959 * sfifprf - set FI and FPRF
1961 #define VSX_SQRT(op, nels, tp, fld, sfifprf, r2sp) \
1962 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
1964 ppc_vsr_t t = { }; \
1967 helper_reset_fpstatus(env); \
1969 for (i = 0; i < nels; i++) { \
1970 float_status tstat = env->fp_status; \
1971 set_float_exception_flags(0, &tstat); \
1972 t.fld = tp##_sqrt(xb->fld, &tstat); \
1973 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1975 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1976 float_invalid_op_sqrt(env, tstat.float_exception_flags, \
1977 sfifprf, GETPC()); \
1981 t.fld = do_frsp(env, t.fld, GETPC()); \
1985 helper_compute_fprf_float64(env, t.fld); \
1990 do_float_check_status(env, sfifprf, GETPC()); \
1993 VSX_SQRT(xssqrtdp
, 1, float64
, VsrD(0), 1, 0)
1994 VSX_SQRT(xssqrtsp
, 1, float64
, VsrD(0), 1, 1)
1995 VSX_SQRT(xvsqrtdp
, 2, float64
, VsrD(i
), 0, 0)
1996 VSX_SQRT(xvsqrtsp
, 4, float32
, VsrW(i
), 0, 0)
1999 *VSX_RSQRTE - VSX floating point reciprocal square root estimate
2000 * op - instruction mnemonic
2001 * nels - number of elements (1, 2 or 4)
2002 * tp - type (float32 or float64)
2003 * fld - vsr_t field (VsrD(*) or VsrW(*))
2004 * sfifprf - set FI and FPRF
2006 #define VSX_RSQRTE(op, nels, tp, fld, sfifprf, r2sp) \
2007 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2009 ppc_vsr_t t = { }; \
2012 helper_reset_fpstatus(env); \
2014 for (i = 0; i < nels; i++) { \
2015 float_status tstat = env->fp_status; \
2016 set_float_exception_flags(0, &tstat); \
2017 t.fld = tp##_sqrt(xb->fld, &tstat); \
2018 t.fld = tp##_div(tp##_one, t.fld, &tstat); \
2019 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2020 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2021 float_invalid_op_sqrt(env, tstat.float_exception_flags, \
2022 sfifprf, GETPC()); \
2025 t.fld = do_frsp(env, t.fld, GETPC()); \
2029 helper_compute_fprf_float64(env, t.fld); \
2034 do_float_check_status(env, sfifprf, GETPC()); \
2037 VSX_RSQRTE(xsrsqrtedp
, 1, float64
, VsrD(0), 1, 0)
2038 VSX_RSQRTE(xsrsqrtesp
, 1, float64
, VsrD(0), 1, 1)
2039 VSX_RSQRTE(xvrsqrtedp
, 2, float64
, VsrD(i
), 0, 0)
2040 VSX_RSQRTE(xvrsqrtesp
, 4, float32
, VsrW(i
), 0, 0)
2043 * VSX_TDIV - VSX floating point test for divide
2044 * op - instruction mnemonic
2045 * nels - number of elements (1, 2 or 4)
2046 * tp - type (float32 or float64)
2047 * fld - vsr_t field (VsrD(*) or VsrW(*))
2048 * emin - minimum unbiased exponent
2049 * emax - maximum unbiased exponent
2050 * nbits - number of fraction bits
2052 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2053 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2054 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2060 for (i = 0; i < nels; i++) { \
2061 if (unlikely(tp##_is_infinity(xa->fld) || \
2062 tp##_is_infinity(xb->fld) || \
2063 tp##_is_zero(xb->fld))) { \
2067 int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \
2068 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2070 if (unlikely(tp##_is_any_nan(xa->fld) || \
2071 tp##_is_any_nan(xb->fld))) { \
2073 } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
2075 } else if (!tp##_is_zero(xa->fld) && \
2076 (((e_a - e_b) >= emax) || \
2077 ((e_a - e_b) <= (emin + 1)) || \
2078 (e_a <= (emin + nbits)))) { \
2082 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2084 * XB is not zero because of the above check and so \
2085 * must be denormalized. \
2092 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2095 VSX_TDIV(xstdivdp
, 1, float64
, VsrD(0), -1022, 1023, 52)
2096 VSX_TDIV(xvtdivdp
, 2, float64
, VsrD(i
), -1022, 1023, 52)
2097 VSX_TDIV(xvtdivsp
, 4, float32
, VsrW(i
), -126, 127, 23)
2100 * VSX_TSQRT - VSX floating point test for square root
2101 * op - instruction mnemonic
2102 * nels - number of elements (1, 2 or 4)
2103 * tp - type (float32 or float64)
2104 * fld - vsr_t field (VsrD(*) or VsrW(*))
2105 * emin - minimum unbiased exponent
2106 * emax - maximum unbiased exponent
2107 * nbits - number of fraction bits
2109 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2110 void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
2116 for (i = 0; i < nels; i++) { \
2117 if (unlikely(tp##_is_infinity(xb->fld) || \
2118 tp##_is_zero(xb->fld))) { \
2122 int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
2124 if (unlikely(tp##_is_any_nan(xb->fld))) { \
2126 } else if (unlikely(tp##_is_zero(xb->fld))) { \
2128 } else if (unlikely(tp##_is_neg(xb->fld))) { \
2130 } else if (!tp##_is_zero(xb->fld) && \
2131 (e_b <= (emin + nbits))) { \
2135 if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
2137 * XB is not zero because of the above check and \
2138 * therefore must be denormalized. \
2145 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2148 VSX_TSQRT(xstsqrtdp
, 1, float64
, VsrD(0), -1022, 52)
2149 VSX_TSQRT(xvtsqrtdp
, 2, float64
, VsrD(i
), -1022, 52)
2150 VSX_TSQRT(xvtsqrtsp
, 4, float32
, VsrW(i
), -126, 23)
2153 * VSX_MADD - VSX floating point muliply/add variations
2154 * op - instruction mnemonic
2155 * nels - number of elements (1, 2 or 4)
2156 * tp - type (float32 or float64)
2157 * fld - vsr_t field (VsrD(*) or VsrW(*))
2158 * maddflgs - flags for the float*muladd routine that control the
2159 * various forms (madd, msub, nmadd, nmsub)
2160 * sfifprf - set FI and FPRF
2162 #define VSX_MADD(op, nels, tp, fld, maddflgs, sfifprf) \
2163 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2164 ppc_vsr_t *s1, ppc_vsr_t *s2, ppc_vsr_t *s3) \
2166 ppc_vsr_t t = *xt; \
2169 helper_reset_fpstatus(env); \
2171 for (i = 0; i < nels; i++) { \
2172 float_status tstat = env->fp_status; \
2173 set_float_exception_flags(0, &tstat); \
2174 t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, maddflgs, &tstat); \
2175 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2177 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2178 float_invalid_op_madd(env, tstat.float_exception_flags, \
2179 sfifprf, GETPC()); \
2183 helper_compute_fprf_float64(env, t.fld); \
2187 do_float_check_status(env, sfifprf, GETPC()); \
2190 VSX_MADD(XSMADDDP
, 1, float64
, VsrD(0), MADD_FLGS
, 1)
2191 VSX_MADD(XSMSUBDP
, 1, float64
, VsrD(0), MSUB_FLGS
, 1)
2192 VSX_MADD(XSNMADDDP
, 1, float64
, VsrD(0), NMADD_FLGS
, 1)
2193 VSX_MADD(XSNMSUBDP
, 1, float64
, VsrD(0), NMSUB_FLGS
, 1)
2194 VSX_MADD(XSMADDSP
, 1, float64r32
, VsrD(0), MADD_FLGS
, 1)
2195 VSX_MADD(XSMSUBSP
, 1, float64r32
, VsrD(0), MSUB_FLGS
, 1)
2196 VSX_MADD(XSNMADDSP
, 1, float64r32
, VsrD(0), NMADD_FLGS
, 1)
2197 VSX_MADD(XSNMSUBSP
, 1, float64r32
, VsrD(0), NMSUB_FLGS
, 1)
2199 VSX_MADD(xvmadddp
, 2, float64
, VsrD(i
), MADD_FLGS
, 0)
2200 VSX_MADD(xvmsubdp
, 2, float64
, VsrD(i
), MSUB_FLGS
, 0)
2201 VSX_MADD(xvnmadddp
, 2, float64
, VsrD(i
), NMADD_FLGS
, 0)
2202 VSX_MADD(xvnmsubdp
, 2, float64
, VsrD(i
), NMSUB_FLGS
, 0)
2204 VSX_MADD(xvmaddsp
, 4, float32
, VsrW(i
), MADD_FLGS
, 0)
2205 VSX_MADD(xvmsubsp
, 4, float32
, VsrW(i
), MSUB_FLGS
, 0)
2206 VSX_MADD(xvnmaddsp
, 4, float32
, VsrW(i
), NMADD_FLGS
, 0)
2207 VSX_MADD(xvnmsubsp
, 4, float32
, VsrW(i
), NMSUB_FLGS
, 0)
2210 * VSX_MADDQ - VSX floating point quad-precision muliply/add
2211 * op - instruction mnemonic
2212 * maddflgs - flags for the float*muladd routine that control the
2213 * various forms (madd, msub, nmadd, nmsub)
2216 #define VSX_MADDQ(op, maddflgs, ro) \
2217 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *s1, ppc_vsr_t *s2,\
2220 ppc_vsr_t t = *xt; \
2222 helper_reset_fpstatus(env); \
2224 float_status tstat = env->fp_status; \
2225 set_float_exception_flags(0, &tstat); \
2227 tstat.float_rounding_mode = float_round_to_odd; \
2229 t.f128 = float128_muladd(s1->f128, s3->f128, s2->f128, maddflgs, &tstat); \
2230 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2232 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2233 float_invalid_op_madd(env, tstat.float_exception_flags, \
2237 helper_compute_fprf_float128(env, t.f128); \
2239 do_float_check_status(env, true, GETPC()); \
2242 VSX_MADDQ(XSMADDQP
, MADD_FLGS
, 0)
2243 VSX_MADDQ(XSMADDQPO
, MADD_FLGS
, 1)
2244 VSX_MADDQ(XSMSUBQP
, MSUB_FLGS
, 0)
2245 VSX_MADDQ(XSMSUBQPO
, MSUB_FLGS
, 1)
2246 VSX_MADDQ(XSNMADDQP
, NMADD_FLGS
, 0)
2247 VSX_MADDQ(XSNMADDQPO
, NMADD_FLGS
, 1)
2248 VSX_MADDQ(XSNMSUBQP
, NMSUB_FLGS
, 0)
2249 VSX_MADDQ(XSNMSUBQPO
, NMSUB_FLGS
, 0)
2252 * VSX_SCALAR_CMP - VSX scalar floating point compare
2253 * op - instruction mnemonic
2255 * cmp - comparison operation
2257 * svxvc - set VXVC bit
2259 #define VSX_SCALAR_CMP(op, tp, cmp, fld, svxvc) \
2260 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2261 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2266 helper_reset_fpstatus(env); \
2269 r = tp##_##cmp(xb->fld, xa->fld, &env->fp_status); \
2271 r = tp##_##cmp##_quiet(xb->fld, xa->fld, &env->fp_status); \
2274 flags = get_float_exception_flags(&env->fp_status); \
2275 if (unlikely(flags & float_flag_invalid)) { \
2277 if (flags & float_flag_invalid_snan) { \
2278 float_invalid_op_vxsnan(env, GETPC()); \
2279 vxvc &= !(env->fpscr & FP_VE); \
2282 float_invalid_op_vxvc(env, 0, GETPC()); \
2286 memset(xt, 0, sizeof(*xt)); \
2287 memset(&xt->fld, -r, sizeof(xt->fld)); \
2288 do_float_check_status(env, false, GETPC()); \
2291 VSX_SCALAR_CMP(XSCMPEQDP
, float64
, eq
, VsrD(0), 0)
2292 VSX_SCALAR_CMP(XSCMPGEDP
, float64
, le
, VsrD(0), 1)
2293 VSX_SCALAR_CMP(XSCMPGTDP
, float64
, lt
, VsrD(0), 1)
2294 VSX_SCALAR_CMP(XSCMPEQQP
, float128
, eq
, f128
, 0)
2295 VSX_SCALAR_CMP(XSCMPGEQP
, float128
, le
, f128
, 1)
2296 VSX_SCALAR_CMP(XSCMPGTQP
, float128
, lt
, f128
, 1)
2298 void helper_xscmpexpdp(CPUPPCState
*env
, uint32_t opcode
,
2299 ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2301 int64_t exp_a
, exp_b
;
2304 exp_a
= extract64(xa
->VsrD(0), 52, 11);
2305 exp_b
= extract64(xb
->VsrD(0), 52, 11);
2307 if (unlikely(float64_is_any_nan(xa
->VsrD(0)) ||
2308 float64_is_any_nan(xb
->VsrD(0)))) {
2311 if (exp_a
< exp_b
) {
2313 } else if (exp_a
> exp_b
) {
2320 env
->fpscr
&= ~FP_FPCC
;
2321 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2322 env
->crf
[BF(opcode
)] = cc
;
2324 do_float_check_status(env
, false, GETPC());
2327 void helper_xscmpexpqp(CPUPPCState
*env
, uint32_t opcode
,
2328 ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
2330 int64_t exp_a
, exp_b
;
2333 exp_a
= extract64(xa
->VsrD(0), 48, 15);
2334 exp_b
= extract64(xb
->VsrD(0), 48, 15);
2336 if (unlikely(float128_is_any_nan(xa
->f128
) ||
2337 float128_is_any_nan(xb
->f128
))) {
2340 if (exp_a
< exp_b
) {
2342 } else if (exp_a
> exp_b
) {
2349 env
->fpscr
&= ~FP_FPCC
;
2350 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2351 env
->crf
[BF(opcode
)] = cc
;
2353 do_float_check_status(env
, false, GETPC());
2356 static inline void do_scalar_cmp(CPUPPCState
*env
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
,
2357 int crf_idx
, bool ordered
)
2360 bool vxsnan_flag
= false, vxvc_flag
= false;
2362 helper_reset_fpstatus(env
);
2364 switch (float64_compare(xa
->VsrD(0), xb
->VsrD(0), &env
->fp_status
)) {
2365 case float_relation_less
:
2368 case float_relation_equal
:
2371 case float_relation_greater
:
2374 case float_relation_unordered
:
2377 if (float64_is_signaling_nan(xa
->VsrD(0), &env
->fp_status
) ||
2378 float64_is_signaling_nan(xb
->VsrD(0), &env
->fp_status
)) {
2380 if (!(env
->fpscr
& FP_VE
) && ordered
) {
2383 } else if (float64_is_quiet_nan(xa
->VsrD(0), &env
->fp_status
) ||
2384 float64_is_quiet_nan(xb
->VsrD(0), &env
->fp_status
)) {
2392 g_assert_not_reached();
2395 env
->fpscr
&= ~FP_FPCC
;
2396 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2397 env
->crf
[crf_idx
] = cc
;
2400 float_invalid_op_vxsnan(env
, GETPC());
2403 float_invalid_op_vxvc(env
, 0, GETPC());
2406 do_float_check_status(env
, false, GETPC());
2409 void helper_xscmpodp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2412 do_scalar_cmp(env
, xa
, xb
, BF(opcode
), true);
2415 void helper_xscmpudp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2418 do_scalar_cmp(env
, xa
, xb
, BF(opcode
), false);
2421 static inline void do_scalar_cmpq(CPUPPCState
*env
, ppc_vsr_t
*xa
,
2422 ppc_vsr_t
*xb
, int crf_idx
, bool ordered
)
2425 bool vxsnan_flag
= false, vxvc_flag
= false;
2427 helper_reset_fpstatus(env
);
2429 switch (float128_compare(xa
->f128
, xb
->f128
, &env
->fp_status
)) {
2430 case float_relation_less
:
2433 case float_relation_equal
:
2436 case float_relation_greater
:
2439 case float_relation_unordered
:
2442 if (float128_is_signaling_nan(xa
->f128
, &env
->fp_status
) ||
2443 float128_is_signaling_nan(xb
->f128
, &env
->fp_status
)) {
2445 if (!(env
->fpscr
& FP_VE
) && ordered
) {
2448 } else if (float128_is_quiet_nan(xa
->f128
, &env
->fp_status
) ||
2449 float128_is_quiet_nan(xb
->f128
, &env
->fp_status
)) {
2457 g_assert_not_reached();
2460 env
->fpscr
&= ~FP_FPCC
;
2461 env
->fpscr
|= cc
<< FPSCR_FPCC
;
2462 env
->crf
[crf_idx
] = cc
;
2465 float_invalid_op_vxsnan(env
, GETPC());
2468 float_invalid_op_vxvc(env
, 0, GETPC());
2471 do_float_check_status(env
, false, GETPC());
2474 void helper_xscmpoqp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2477 do_scalar_cmpq(env
, xa
, xb
, BF(opcode
), true);
2480 void helper_xscmpuqp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xa
,
2483 do_scalar_cmpq(env
, xa
, xb
, BF(opcode
), false);
2487 * VSX_MAX_MIN - VSX floating point maximum/minimum
2488 * name - instruction mnemonic
2489 * op - operation (max or min)
2490 * nels - number of elements (1, 2 or 4)
2491 * tp - type (float32 or float64)
2492 * fld - vsr_t field (VsrD(*) or VsrW(*))
2494 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2495 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
2496 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2498 ppc_vsr_t t = { }; \
2501 for (i = 0; i < nels; i++) { \
2502 t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
2503 if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2504 tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
2505 float_invalid_op_vxsnan(env, GETPC()); \
2510 do_float_check_status(env, false, GETPC()); \
2513 VSX_MAX_MIN(xsmaxdp
, maxnum
, 1, float64
, VsrD(0))
2514 VSX_MAX_MIN(xvmaxdp
, maxnum
, 2, float64
, VsrD(i
))
2515 VSX_MAX_MIN(xvmaxsp
, maxnum
, 4, float32
, VsrW(i
))
2516 VSX_MAX_MIN(xsmindp
, minnum
, 1, float64
, VsrD(0))
2517 VSX_MAX_MIN(xvmindp
, minnum
, 2, float64
, VsrD(i
))
2518 VSX_MAX_MIN(xvminsp
, minnum
, 4, float32
, VsrW(i
))
2520 #define VSX_MAX_MINC(name, max, tp, fld) \
2521 void helper_##name(CPUPPCState *env, \
2522 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2524 ppc_vsr_t t = { }; \
2527 helper_reset_fpstatus(env); \
2530 first = tp##_le_quiet(xb->fld, xa->fld, &env->fp_status); \
2532 first = tp##_lt_quiet(xa->fld, xb->fld, &env->fp_status); \
2539 if (env->fp_status.float_exception_flags & float_flag_invalid_snan) { \
2540 float_invalid_op_vxsnan(env, GETPC()); \
2547 VSX_MAX_MINC(XSMAXCDP
, true, float64
, VsrD(0));
2548 VSX_MAX_MINC(XSMINCDP
, false, float64
, VsrD(0));
2549 VSX_MAX_MINC(XSMAXCQP
, true, float128
, f128
);
2550 VSX_MAX_MINC(XSMINCQP
, false, float128
, f128
);
2552 #define VSX_MAX_MINJ(name, max) \
2553 void helper_##name(CPUPPCState *env, \
2554 ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
2556 ppc_vsr_t t = { }; \
2557 bool vxsnan_flag = false, vex_flag = false; \
2559 if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
2560 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
2561 vxsnan_flag = true; \
2563 t.VsrD(0) = xa->VsrD(0); \
2564 } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
2565 if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2566 vxsnan_flag = true; \
2568 t.VsrD(0) = xb->VsrD(0); \
2569 } else if (float64_is_zero(xa->VsrD(0)) && \
2570 float64_is_zero(xb->VsrD(0))) { \
2572 if (!float64_is_neg(xa->VsrD(0)) || \
2573 !float64_is_neg(xb->VsrD(0))) { \
2576 t.VsrD(0) = 0x8000000000000000ULL; \
2579 if (float64_is_neg(xa->VsrD(0)) || \
2580 float64_is_neg(xb->VsrD(0))) { \
2581 t.VsrD(0) = 0x8000000000000000ULL; \
2586 } else if ((max && \
2587 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2589 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2590 t.VsrD(0) = xa->VsrD(0); \
2592 t.VsrD(0) = xb->VsrD(0); \
2595 vex_flag = (env->fpscr & FP_VE) && vxsnan_flag; \
2596 if (vxsnan_flag) { \
2597 float_invalid_op_vxsnan(env, GETPC()); \
2604 VSX_MAX_MINJ(XSMAXJDP, 1);
2605 VSX_MAX_MINJ(XSMINJDP
, 0);
2608 * VSX_CMP - VSX floating point compare
2609 * op - instruction mnemonic
2610 * nels - number of elements (1, 2 or 4)
2611 * tp - type (float32 or float64)
2612 * fld - vsr_t field (VsrD(*) or VsrW(*))
2613 * cmp - comparison operation
2614 * svxvc - set VXVC bit
2615 * exp - expected result of comparison
2617 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2618 uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
2619 ppc_vsr_t *xa, ppc_vsr_t *xb) \
2621 ppc_vsr_t t = *xt; \
2622 uint32_t crf6 = 0; \
2625 int all_false = 1; \
2627 for (i = 0; i < nels; i++) { \
2628 if (unlikely(tp##_is_any_nan(xa->fld) || \
2629 tp##_is_any_nan(xb->fld))) { \
2630 if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
2631 tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
2632 float_invalid_op_vxsnan(env, GETPC()); \
2635 float_invalid_op_vxvc(env, 0, GETPC()); \
2640 if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
2651 crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2655 VSX_CMP(xvcmpeqdp
, 2, float64
, VsrD(i
), eq
, 0, 1)
2656 VSX_CMP(xvcmpgedp
, 2, float64
, VsrD(i
), le
, 1, 1)
2657 VSX_CMP(xvcmpgtdp
, 2, float64
, VsrD(i
), lt
, 1, 1)
2658 VSX_CMP(xvcmpnedp
, 2, float64
, VsrD(i
), eq
, 0, 0)
2659 VSX_CMP(xvcmpeqsp
, 4, float32
, VsrW(i
), eq
, 0, 1)
2660 VSX_CMP(xvcmpgesp
, 4, float32
, VsrW(i
), le
, 1, 1)
2661 VSX_CMP(xvcmpgtsp
, 4, float32
, VsrW(i
), lt
, 1, 1)
2662 VSX_CMP(xvcmpnesp
, 4, float32
, VsrW(i
), eq
, 0, 0)
2665 * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2666 * op - instruction mnemonic
2667 * nels - number of elements (1, 2 or 4)
2668 * stp - source type (float32 or float64)
2669 * ttp - target type (float32 or float64)
2670 * sfld - source vsr_t field
2671 * tfld - target vsr_t field (f32 or f64)
2672 * sfifprf - set FI and FPRF
2674 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf) \
2675 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2677 ppc_vsr_t t = { }; \
2680 for (i = 0; i < nels; i++) { \
2681 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2682 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2683 &env->fp_status))) { \
2684 float_invalid_op_vxsnan(env, GETPC()); \
2685 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2688 helper_compute_fprf_##ttp(env, t.tfld); \
2693 do_float_check_status(env, sfifprf, GETPC()); \
2696 VSX_CVT_FP_TO_FP(xscvspdp
, 1, float32
, float64
, VsrW(0), VsrD(0), 1)
2697 VSX_CVT_FP_TO_FP(xvcvspdp
, 2, float32
, float64
, VsrW(2 * i
), VsrD(i
), 0)
2699 #define VSX_CVT_FP_TO_FP2(op, nels, stp, ttp, sfifprf) \
2700 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2702 ppc_vsr_t t = { }; \
2705 for (i = 0; i < nels; i++) { \
2706 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2707 if (unlikely(stp##_is_signaling_nan(xb->VsrD(i), \
2708 &env->fp_status))) { \
2709 float_invalid_op_vxsnan(env, GETPC()); \
2710 t.VsrW(2 * i) = ttp##_snan_to_qnan(t.VsrW(2 * i)); \
2713 helper_compute_fprf_##ttp(env, t.VsrW(2 * i)); \
2715 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
2719 do_float_check_status(env, sfifprf, GETPC()); \
2722 VSX_CVT_FP_TO_FP2(xvcvdpsp
, 2, float64
, float32
, 0)
2723 VSX_CVT_FP_TO_FP2(xscvdpsp
, 1, float64
, float32
, 1)
2726 * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2727 * op - instruction mnemonic
2728 * nels - number of elements (1, 2 or 4)
2729 * stp - source type (float32 or float64)
2730 * ttp - target type (float32 or float64)
2731 * sfld - source vsr_t field
2732 * tfld - target vsr_t field (f32 or f64)
2735 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2736 void helper_##op(CPUPPCState *env, uint32_t opcode, \
2737 ppc_vsr_t *xt, ppc_vsr_t *xb) \
2739 ppc_vsr_t t = *xt; \
2742 for (i = 0; i < nels; i++) { \
2743 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
2744 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2745 &env->fp_status))) { \
2746 float_invalid_op_vxsnan(env, GETPC()); \
2747 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2750 helper_compute_fprf_##ttp(env, t.tfld); \
2755 do_float_check_status(env, true, GETPC()); \
2758 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp
, 1, float64
, float128
, VsrD(0), f128
, 1)
2761 * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2762 * involving one half precision value
2763 * op - instruction mnemonic
2764 * nels - number of elements (1, 2 or 4)
2767 * sfld - source vsr_t field
2768 * tfld - target vsr_t field
2769 * sfifprf - set FI and FPRF
2771 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfifprf) \
2772 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2774 ppc_vsr_t t = { }; \
2777 for (i = 0; i < nels; i++) { \
2778 t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
2779 if (unlikely(stp##_is_signaling_nan(xb->sfld, \
2780 &env->fp_status))) { \
2781 float_invalid_op_vxsnan(env, GETPC()); \
2782 t.tfld = ttp##_snan_to_qnan(t.tfld); \
2785 helper_compute_fprf_##ttp(env, t.tfld); \
2790 do_float_check_status(env, sfifprf, GETPC()); \
2793 VSX_CVT_FP_TO_FP_HP(xscvdphp
, 1, float64
, float16
, VsrD(0), VsrH(3), 1)
2794 VSX_CVT_FP_TO_FP_HP(xscvhpdp
, 1, float16
, float64
, VsrH(3), VsrD(0), 1)
2795 VSX_CVT_FP_TO_FP_HP(xvcvsphp
, 4, float32
, float16
, VsrW(i
), VsrH(2 * i
+ 1), 0)
2796 VSX_CVT_FP_TO_FP_HP(xvcvhpsp
, 4, float16
, float32
, VsrH(2 * i
+ 1), VsrW(i
), 0)
2798 void helper_XVCVSPBF16(CPUPPCState
*env
, ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
2803 helper_reset_fpstatus(env
);
2805 for (i
= 0; i
< 4; i
++) {
2806 t
.VsrH(2 * i
+ 1) = float32_to_bfloat16(xb
->VsrW(i
), &env
->fp_status
);
2809 status
= get_float_exception_flags(&env
->fp_status
);
2810 if (unlikely(status
& float_flag_invalid_snan
)) {
2811 float_invalid_op_vxsnan(env
, GETPC());
2815 do_float_check_status(env
, false, GETPC());
2818 void helper_XSCVQPDP(CPUPPCState
*env
, uint32_t ro
, ppc_vsr_t
*xt
,
2824 tstat
= env
->fp_status
;
2826 tstat
.float_rounding_mode
= float_round_to_odd
;
2829 t
.VsrD(0) = float128_to_float64(xb
->f128
, &tstat
);
2830 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
2831 if (unlikely(float128_is_signaling_nan(xb
->f128
, &tstat
))) {
2832 float_invalid_op_vxsnan(env
, GETPC());
2833 t
.VsrD(0) = float64_snan_to_qnan(t
.VsrD(0));
2835 helper_compute_fprf_float64(env
, t
.VsrD(0));
2838 do_float_check_status(env
, true, GETPC());
2841 uint64_t helper_xscvdpspn(CPUPPCState
*env
, uint64_t xb
)
2843 uint64_t result
, sign
, exp
, frac
;
2845 float_status tstat
= env
->fp_status
;
2846 set_float_exception_flags(0, &tstat
);
2848 sign
= extract64(xb
, 63, 1);
2849 exp
= extract64(xb
, 52, 11);
2850 frac
= extract64(xb
, 0, 52) | 0x10000000000000ULL
;
2852 if (unlikely(exp
== 0 && extract64(frac
, 0, 52) != 0)) {
2853 /* DP denormal operand. */
2854 /* Exponent override to DP min exp. */
2856 /* Implicit bit override to 0. */
2857 frac
= deposit64(frac
, 53, 1, 0);
2860 if (unlikely(exp
< 897 && frac
!= 0)) {
2861 /* SP tiny operand. */
2862 if (897 - exp
> 63) {
2865 /* Denormalize until exp = SP min exp. */
2866 frac
>>= (897 - exp
);
2868 /* Exponent override to SP min exp - 1. */
2872 result
= sign
<< 31;
2873 result
|= extract64(exp
, 10, 1) << 30;
2874 result
|= extract64(exp
, 0, 7) << 23;
2875 result
|= extract64(frac
, 29, 23);
2877 /* hardware replicates result to both words of the doubleword result. */
2878 return (result
<< 32) | result
;
2881 uint64_t helper_XSCVSPDPN(uint64_t xb
)
2883 return helper_todouble(xb
>> 32);
2887 * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2888 * op - instruction mnemonic
2889 * nels - number of elements (1, 2 or 4)
2890 * stp - source type (float32 or float64)
2891 * ttp - target type (int32, uint32, int64 or uint64)
2892 * sfld - source vsr_t field
2893 * tfld - target vsr_t field
2895 * rnan - resulting NaN
2897 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, sfi, rnan) \
2898 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2900 int all_flags = env->fp_status.float_exception_flags, flags; \
2901 ppc_vsr_t t = { }; \
2904 for (i = 0; i < nels; i++) { \
2905 env->fp_status.float_exception_flags = 0; \
2906 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
2907 flags = env->fp_status.float_exception_flags; \
2908 if (unlikely(flags & float_flag_invalid)) { \
2909 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\
2911 all_flags |= flags; \
2915 env->fp_status.float_exception_flags = all_flags; \
2916 do_float_check_status(env, sfi, GETPC()); \
2919 VSX_CVT_FP_TO_INT(xscvdpsxds
, 1, float64
, int64
, VsrD(0), VsrD(0), true, \
2920 0x8000000000000000ULL
)
2921 VSX_CVT_FP_TO_INT(xscvdpuxds
, 1, float64
, uint64
, VsrD(0), VsrD(0), true, 0ULL)
2922 VSX_CVT_FP_TO_INT(xvcvdpsxds
, 2, float64
, int64
, VsrD(i
), VsrD(i
), false, \
2923 0x8000000000000000ULL
)
2924 VSX_CVT_FP_TO_INT(xvcvdpuxds
, 2, float64
, uint64
, VsrD(i
), VsrD(i
), false, \
2926 VSX_CVT_FP_TO_INT(xvcvspsxds
, 2, float32
, int64
, VsrW(2 * i
), VsrD(i
), false, \
2927 0x8000000000000000ULL
)
2928 VSX_CVT_FP_TO_INT(xvcvspsxws
, 4, float32
, int32
, VsrW(i
), VsrW(i
), false, \
2930 VSX_CVT_FP_TO_INT(xvcvspuxds
, 2, float32
, uint64
, VsrW(2 * i
), VsrD(i
), \
2932 VSX_CVT_FP_TO_INT(xvcvspuxws
, 4, float32
, uint32
, VsrW(i
), VsrW(i
), false, 0U)
2934 #define VSX_CVT_FP_TO_INT128(op, tp, rnan) \
2935 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2940 helper_reset_fpstatus(env); \
2941 t.s128 = float128_to_##tp##_round_to_zero(xb->f128, &env->fp_status); \
2942 flags = get_float_exception_flags(&env->fp_status); \
2943 if (unlikely(flags & float_flag_invalid)) { \
2944 t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\
2945 t.VsrD(1) = -(t.VsrD(0) & 1); \
2949 do_float_check_status(env, true, GETPC()); \
2952 VSX_CVT_FP_TO_INT128(XSCVQPUQZ
, uint128
, 0)
2953 VSX_CVT_FP_TO_INT128(XSCVQPSQZ
, int128
, 0x8000000000000000ULL
);
2956 * Likewise, except that the result is duplicated into both subwords.
2957 * Power ISA v3.1 has Programming Notes for these insns:
2958 * Previous versions of the architecture allowed the contents of
2959 * word 0 of the result register to be undefined. However, all
2960 * processors that support this instruction write the result into
2961 * words 0 and 1 (and words 2 and 3) of the result register, as
2962 * is required by this version of the architecture.
2964 #define VSX_CVT_FP_TO_INT2(op, nels, stp, ttp, sfi, rnan) \
2965 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
2967 int all_flags = env->fp_status.float_exception_flags, flags; \
2968 ppc_vsr_t t = { }; \
2971 for (i = 0; i < nels; i++) { \
2972 env->fp_status.float_exception_flags = 0; \
2973 t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i), \
2975 flags = env->fp_status.float_exception_flags; \
2976 if (unlikely(flags & float_flag_invalid)) { \
2977 t.VsrW(2 * i) = float_invalid_cvt(env, flags, t.VsrW(2 * i), \
2978 rnan, 0, GETPC()); \
2980 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
2981 all_flags |= flags; \
2985 env->fp_status.float_exception_flags = all_flags; \
2986 do_float_check_status(env, sfi, GETPC()); \
2989 VSX_CVT_FP_TO_INT2(xscvdpsxws
, 1, float64
, int32
, true, 0x80000000U
)
2990 VSX_CVT_FP_TO_INT2(xscvdpuxws
, 1, float64
, uint32
, true, 0U)
2991 VSX_CVT_FP_TO_INT2(xvcvdpsxws
, 2, float64
, int32
, false, 0x80000000U
)
2992 VSX_CVT_FP_TO_INT2(xvcvdpuxws
, 2, float64
, uint32
, false, 0U)
2995 * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2996 * op - instruction mnemonic
2997 * stp - source type (float32 or float64)
2998 * ttp - target type (int32, uint32, int64 or uint64)
2999 * sfld - source vsr_t field
3000 * tfld - target vsr_t field
3001 * rnan - resulting NaN
3003 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
3004 void helper_##op(CPUPPCState *env, uint32_t opcode, \
3005 ppc_vsr_t *xt, ppc_vsr_t *xb) \
3007 ppc_vsr_t t = { }; \
3010 t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
3011 flags = get_float_exception_flags(&env->fp_status); \
3012 if (flags & float_flag_invalid) { \
3013 t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC()); \
3017 do_float_check_status(env, true, GETPC()); \
3020 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz
, float128
, int64
, f128
, VsrD(0), \
3021 0x8000000000000000ULL
)
3023 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz
, float128
, int32
, f128
, VsrD(0), \
3024 0xffffffff80000000ULL
)
3025 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz
, float128
, uint64
, f128
, VsrD(0), 0x0ULL
)
3026 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz
, float128
, uint32
, f128
, VsrD(0), 0x0ULL
)
3029 * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3030 * op - instruction mnemonic
3031 * nels - number of elements (1, 2 or 4)
3032 * stp - source type (int32, uint32, int64 or uint64)
3033 * ttp - target type (float32 or float64)
3034 * sfld - source vsr_t field
3035 * tfld - target vsr_t field
3036 * jdef - definition of the j index (i or 2*i)
3037 * sfifprf - set FI and FPRF
3039 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf, r2sp)\
3040 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3042 ppc_vsr_t t = { }; \
3045 for (i = 0; i < nels; i++) { \
3046 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3048 t.tfld = do_frsp(env, t.tfld, GETPC()); \
3051 helper_compute_fprf_float64(env, t.tfld); \
3056 do_float_check_status(env, sfifprf, GETPC()); \
3059 VSX_CVT_INT_TO_FP(xscvsxddp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 0)
3060 VSX_CVT_INT_TO_FP(xscvuxddp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 0)
3061 VSX_CVT_INT_TO_FP(xscvsxdsp
, 1, int64
, float64
, VsrD(0), VsrD(0), 1, 1)
3062 VSX_CVT_INT_TO_FP(xscvuxdsp
, 1, uint64
, float64
, VsrD(0), VsrD(0), 1, 1)
3063 VSX_CVT_INT_TO_FP(xvcvsxddp
, 2, int64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3064 VSX_CVT_INT_TO_FP(xvcvuxddp
, 2, uint64
, float64
, VsrD(i
), VsrD(i
), 0, 0)
3065 VSX_CVT_INT_TO_FP(xvcvsxwdp
, 2, int32
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
3066 VSX_CVT_INT_TO_FP(xvcvuxwdp
, 2, uint64
, float64
, VsrW(2 * i
), VsrD(i
), 0, 0)
3067 VSX_CVT_INT_TO_FP(xvcvsxwsp
, 4, int32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3068 VSX_CVT_INT_TO_FP(xvcvuxwsp
, 4, uint32
, float32
, VsrW(i
), VsrW(i
), 0, 0)
3070 #define VSX_CVT_INT_TO_FP2(op, stp, ttp) \
3071 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3073 ppc_vsr_t t = { }; \
3076 for (i = 0; i < 2; i++) { \
3077 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
3078 t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
3082 do_float_check_status(env, false, GETPC()); \
3085 VSX_CVT_INT_TO_FP2(xvcvsxdsp
, int64
, float32
)
3086 VSX_CVT_INT_TO_FP2(xvcvuxdsp
, uint64
, float32
)
3088 #define VSX_CVT_INT128_TO_FP(op, tp) \
3089 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)\
3091 helper_reset_fpstatus(env); \
3092 xt->f128 = tp##_to_float128(xb->s128, &env->fp_status); \
3093 helper_compute_fprf_float128(env, xt->f128); \
3094 do_float_check_status(env, true, GETPC()); \
3097 VSX_CVT_INT128_TO_FP(XSCVUQQP
, uint128
);
3098 VSX_CVT_INT128_TO_FP(XSCVSQQP
, int128
);
3101 * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3102 * op - instruction mnemonic
3103 * stp - source type (int32, uint32, int64 or uint64)
3104 * ttp - target type (float32 or float64)
3105 * sfld - source vsr_t field
3106 * tfld - target vsr_t field
3108 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3109 void helper_##op(CPUPPCState *env, uint32_t opcode, \
3110 ppc_vsr_t *xt, ppc_vsr_t *xb) \
3112 ppc_vsr_t t = *xt; \
3114 t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
3115 helper_compute_fprf_##ttp(env, t.tfld); \
3118 do_float_check_status(env, true, GETPC()); \
3121 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp
, int64
, float128
, VsrD(0), f128
)
3122 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp
, uint64
, float128
, VsrD(0), f128
)
3125 * For "use current rounding mode", define a value that will not be
3126 * one of the existing rounding model enums.
3128 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3129 float_round_up + float_round_to_zero)
3132 * VSX_ROUND - VSX floating point round
3133 * op - instruction mnemonic
3134 * nels - number of elements (1, 2 or 4)
3135 * tp - type (float32 or float64)
3136 * fld - vsr_t field (VsrD(*) or VsrW(*))
3137 * rmode - rounding mode
3138 * sfifprf - set FI and FPRF
3140 #define VSX_ROUND(op, nels, tp, fld, rmode, sfifprf) \
3141 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
3143 ppc_vsr_t t = { }; \
3145 FloatRoundMode curr_rounding_mode; \
3147 if (rmode != FLOAT_ROUND_CURRENT) { \
3148 curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
3149 set_float_rounding_mode(rmode, &env->fp_status); \
3152 for (i = 0; i < nels; i++) { \
3153 if (unlikely(tp##_is_signaling_nan(xb->fld, \
3154 &env->fp_status))) { \
3155 float_invalid_op_vxsnan(env, GETPC()); \
3156 t.fld = tp##_snan_to_qnan(xb->fld); \
3158 t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
3161 helper_compute_fprf_float64(env, t.fld); \
3166 * If this is not a "use current rounding mode" instruction, \
3167 * then inhibit setting of the XX bit and restore rounding \
3170 if (rmode != FLOAT_ROUND_CURRENT) { \
3171 set_float_rounding_mode(curr_rounding_mode, &env->fp_status); \
3172 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3176 do_float_check_status(env, sfifprf, GETPC()); \
3179 VSX_ROUND(xsrdpi
, 1, float64
, VsrD(0), float_round_ties_away
, 1)
3180 VSX_ROUND(xsrdpic
, 1, float64
, VsrD(0), FLOAT_ROUND_CURRENT
, 1)
3181 VSX_ROUND(xsrdpim
, 1, float64
, VsrD(0), float_round_down
, 1)
3182 VSX_ROUND(xsrdpip
, 1, float64
, VsrD(0), float_round_up
, 1)
3183 VSX_ROUND(xsrdpiz
, 1, float64
, VsrD(0), float_round_to_zero
, 1)
3185 VSX_ROUND(xvrdpi
, 2, float64
, VsrD(i
), float_round_ties_away
, 0)
3186 VSX_ROUND(xvrdpic
, 2, float64
, VsrD(i
), FLOAT_ROUND_CURRENT
, 0)
3187 VSX_ROUND(xvrdpim
, 2, float64
, VsrD(i
), float_round_down
, 0)
3188 VSX_ROUND(xvrdpip
, 2, float64
, VsrD(i
), float_round_up
, 0)
3189 VSX_ROUND(xvrdpiz
, 2, float64
, VsrD(i
), float_round_to_zero
, 0)
3191 VSX_ROUND(xvrspi
, 4, float32
, VsrW(i
), float_round_ties_away
, 0)
3192 VSX_ROUND(xvrspic
, 4, float32
, VsrW(i
), FLOAT_ROUND_CURRENT
, 0)
3193 VSX_ROUND(xvrspim
, 4, float32
, VsrW(i
), float_round_down
, 0)
3194 VSX_ROUND(xvrspip
, 4, float32
, VsrW(i
), float_round_up
, 0)
3195 VSX_ROUND(xvrspiz
, 4, float32
, VsrW(i
), float_round_to_zero
, 0)
3197 uint64_t helper_xsrsp(CPUPPCState
*env
, uint64_t xb
)
3199 helper_reset_fpstatus(env
);
3201 uint64_t xt
= do_frsp(env
, xb
, GETPC());
3203 helper_compute_fprf_float64(env
, xt
);
3204 do_float_check_status(env
, true, GETPC());
3208 void helper_xvxsigsp(CPUPPCState
*env
, ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3211 uint32_t exp
, i
, fraction
;
3213 for (i
= 0; i
< 4; i
++) {
3214 exp
= (xb
->VsrW(i
) >> 23) & 0xFF;
3215 fraction
= xb
->VsrW(i
) & 0x7FFFFF;
3216 if (exp
!= 0 && exp
!= 255) {
3217 t
.VsrW(i
) = fraction
| 0x00800000;
3219 t
.VsrW(i
) = fraction
;
3226 * VSX_TEST_DC - VSX floating point test data class
3227 * op - instruction mnemonic
3228 * nels - number of elements (1, 2 or 4)
3229 * xbn - VSR register number
3230 * tp - type (float32 or float64)
3231 * fld - vsr_t field (VsrD(*) or VsrW(*))
3232 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3233 * fld_max - target field max
3234 * scrf - set result in CR and FPCC
3236 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3237 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3239 ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \
3240 ppc_vsr_t *xb = &env->vsr[xbn]; \
3241 ppc_vsr_t t = { }; \
3242 uint32_t i, sign, dcmx; \
3243 uint32_t cc, match = 0; \
3246 dcmx = DCMX_XV(opcode); \
3249 dcmx = DCMX(opcode); \
3252 for (i = 0; i < nels; i++) { \
3253 sign = tp##_is_neg(xb->fld); \
3254 if (tp##_is_any_nan(xb->fld)) { \
3255 match = extract32(dcmx, 6, 1); \
3256 } else if (tp##_is_infinity(xb->fld)) { \
3257 match = extract32(dcmx, 4 + !sign, 1); \
3258 } else if (tp##_is_zero(xb->fld)) { \
3259 match = extract32(dcmx, 2 + !sign, 1); \
3260 } else if (tp##_is_zero_or_denormal(xb->fld)) { \
3261 match = extract32(dcmx, 0 + !sign, 1); \
3265 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3266 env->fpscr &= ~FP_FPCC; \
3267 env->fpscr |= cc << FPSCR_FPCC; \
3268 env->crf[BF(opcode)] = cc; \
3270 t.tfld = match ? fld_max : 0; \
3279 VSX_TEST_DC(xvtstdcdp
, 2, xB(opcode
), float64
, VsrD(i
), VsrD(i
), UINT64_MAX
, 0)
3280 VSX_TEST_DC(xvtstdcsp
, 4, xB(opcode
), float32
, VsrW(i
), VsrW(i
), UINT32_MAX
, 0)
3281 VSX_TEST_DC(xststdcdp
, 1, xB(opcode
), float64
, VsrD(0), VsrD(0), 0, 1)
3282 VSX_TEST_DC(xststdcqp
, 1, (rB(opcode
) + 32), float128
, f128
, VsrD(0), 0, 1)
3284 void helper_xststdcsp(CPUPPCState
*env
, uint32_t opcode
, ppc_vsr_t
*xb
)
3286 uint32_t dcmx
, sign
, exp
;
3287 uint32_t cc
, match
= 0, not_sp
= 0;
3288 float64 arg
= xb
->VsrD(0);
3291 dcmx
= DCMX(opcode
);
3292 exp
= (arg
>> 52) & 0x7FF;
3293 sign
= float64_is_neg(arg
);
3295 if (float64_is_any_nan(arg
)) {
3296 match
= extract32(dcmx
, 6, 1);
3297 } else if (float64_is_infinity(arg
)) {
3298 match
= extract32(dcmx
, 4 + !sign
, 1);
3299 } else if (float64_is_zero(arg
)) {
3300 match
= extract32(dcmx
, 2 + !sign
, 1);
3301 } else if (float64_is_zero_or_denormal(arg
) || (exp
> 0 && exp
< 0x381)) {
3302 match
= extract32(dcmx
, 0 + !sign
, 1);
3305 arg_sp
= helper_todouble(helper_tosingle(arg
));
3306 not_sp
= arg
!= arg_sp
;
3308 cc
= sign
<< CRF_LT_BIT
| match
<< CRF_EQ_BIT
| not_sp
<< CRF_SO_BIT
;
3309 env
->fpscr
&= ~FP_FPCC
;
3310 env
->fpscr
|= cc
<< FPSCR_FPCC
;
3311 env
->crf
[BF(opcode
)] = cc
;
3314 void helper_xsrqpi(CPUPPCState
*env
, uint32_t opcode
,
3315 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3318 uint8_t r
= Rrm(opcode
);
3319 uint8_t ex
= Rc(opcode
);
3320 uint8_t rmc
= RMC(opcode
);
3324 helper_reset_fpstatus(env
);
3326 if (r
== 0 && rmc
== 0) {
3327 rmode
= float_round_ties_away
;
3328 } else if (r
== 0 && rmc
== 0x3) {
3329 rmode
= env
->fpscr
& FP_RN
;
3330 } else if (r
== 1) {
3333 rmode
= float_round_nearest_even
;
3336 rmode
= float_round_to_zero
;
3339 rmode
= float_round_up
;
3342 rmode
= float_round_down
;
3349 tstat
= env
->fp_status
;
3350 set_float_exception_flags(0, &tstat
);
3351 set_float_rounding_mode(rmode
, &tstat
);
3352 t
.f128
= float128_round_to_int(xb
->f128
, &tstat
);
3353 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3355 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid_snan
)) {
3356 float_invalid_op_vxsnan(env
, GETPC());
3359 if (ex
== 0 && (tstat
.float_exception_flags
& float_flag_inexact
)) {
3360 env
->fp_status
.float_exception_flags
&= ~float_flag_inexact
;
3363 helper_compute_fprf_float128(env
, t
.f128
);
3364 do_float_check_status(env
, true, GETPC());
3368 void helper_xsrqpxp(CPUPPCState
*env
, uint32_t opcode
,
3369 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3372 uint8_t r
= Rrm(opcode
);
3373 uint8_t rmc
= RMC(opcode
);
3378 helper_reset_fpstatus(env
);
3380 if (r
== 0 && rmc
== 0) {
3381 rmode
= float_round_ties_away
;
3382 } else if (r
== 0 && rmc
== 0x3) {
3383 rmode
= env
->fpscr
& FP_RN
;
3384 } else if (r
== 1) {
3387 rmode
= float_round_nearest_even
;
3390 rmode
= float_round_to_zero
;
3393 rmode
= float_round_up
;
3396 rmode
= float_round_down
;
3403 tstat
= env
->fp_status
;
3404 set_float_exception_flags(0, &tstat
);
3405 set_float_rounding_mode(rmode
, &tstat
);
3406 round_res
= float128_to_floatx80(xb
->f128
, &tstat
);
3407 t
.f128
= floatx80_to_float128(round_res
, &tstat
);
3408 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3410 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid_snan
)) {
3411 float_invalid_op_vxsnan(env
, GETPC());
3412 t
.f128
= float128_snan_to_qnan(t
.f128
);
3415 helper_compute_fprf_float128(env
, t
.f128
);
3417 do_float_check_status(env
, true, GETPC());
3420 void helper_xssqrtqp(CPUPPCState
*env
, uint32_t opcode
,
3421 ppc_vsr_t
*xt
, ppc_vsr_t
*xb
)
3426 helper_reset_fpstatus(env
);
3428 tstat
= env
->fp_status
;
3429 if (unlikely(Rc(opcode
) != 0)) {
3430 tstat
.float_rounding_mode
= float_round_to_odd
;
3433 set_float_exception_flags(0, &tstat
);
3434 t
.f128
= float128_sqrt(xb
->f128
, &tstat
);
3435 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3437 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3438 float_invalid_op_sqrt(env
, tstat
.float_exception_flags
, 1, GETPC());
3441 helper_compute_fprf_float128(env
, t
.f128
);
3443 do_float_check_status(env
, true, GETPC());
3446 void helper_xssubqp(CPUPPCState
*env
, uint32_t opcode
,
3447 ppc_vsr_t
*xt
, ppc_vsr_t
*xa
, ppc_vsr_t
*xb
)
3452 helper_reset_fpstatus(env
);
3454 tstat
= env
->fp_status
;
3455 if (unlikely(Rc(opcode
) != 0)) {
3456 tstat
.float_rounding_mode
= float_round_to_odd
;
3459 set_float_exception_flags(0, &tstat
);
3460 t
.f128
= float128_sub(xa
->f128
, xb
->f128
, &tstat
);
3461 env
->fp_status
.float_exception_flags
|= tstat
.float_exception_flags
;
3463 if (unlikely(tstat
.float_exception_flags
& float_flag_invalid
)) {
3464 float_invalid_op_addsub(env
, tstat
.float_exception_flags
, 1, GETPC());
3467 helper_compute_fprf_float128(env
, t
.f128
);
3469 do_float_check_status(env
, true, GETPC());