2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 /*****************************************************************************/
23 /* Floating point operations helpers */
24 uint64_t helper_float32_to_float64(CPUPPCState
*env
, uint32_t arg
)
30 d
.d
= float32_to_float64(f
.f
, &env
->fp_status
);
34 uint32_t helper_float64_to_float32(CPUPPCState
*env
, uint64_t arg
)
40 f
.f
= float64_to_float32(d
.d
, &env
->fp_status
);
44 static inline int isden(float64 d
)
50 return ((u
.ll
>> 52) & 0x7FF) == 0;
53 uint32_t helper_compute_fprf(CPUPPCState
*env
, uint64_t arg
, uint32_t set_fprf
)
60 isneg
= float64_is_neg(farg
.d
);
61 if (unlikely(float64_is_any_nan(farg
.d
))) {
62 if (float64_is_signaling_nan(farg
.d
)) {
63 /* Signaling NaN: flags are undefined */
69 } else if (unlikely(float64_is_infinity(farg
.d
))) {
77 if (float64_is_zero(farg
.d
)) {
86 /* Denormalized numbers */
89 /* Normalized numbers */
100 /* We update FPSCR_FPRF */
101 env
->fpscr
&= ~(0x1F << FPSCR_FPRF
);
102 env
->fpscr
|= ret
<< FPSCR_FPRF
;
104 /* We just need fpcc to update Rc1 */
108 /* Floating-point invalid operations exception */
109 static inline uint64_t fload_invalid_op_excp(CPUPPCState
*env
, int op
)
116 case POWERPC_EXCP_FP_VXSNAN
:
117 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
119 case POWERPC_EXCP_FP_VXSOFT
:
120 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
122 case POWERPC_EXCP_FP_VXISI
:
123 /* Magnitude subtraction of infinities */
124 env
->fpscr
|= 1 << FPSCR_VXISI
;
126 case POWERPC_EXCP_FP_VXIDI
:
127 /* Division of infinity by infinity */
128 env
->fpscr
|= 1 << FPSCR_VXIDI
;
130 case POWERPC_EXCP_FP_VXZDZ
:
131 /* Division of zero by zero */
132 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
134 case POWERPC_EXCP_FP_VXIMZ
:
135 /* Multiplication of zero by infinity */
136 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
138 case POWERPC_EXCP_FP_VXVC
:
139 /* Ordered comparison of NaN */
140 env
->fpscr
|= 1 << FPSCR_VXVC
;
141 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
142 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
143 /* We must update the target FPR before raising the exception */
145 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
146 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
147 /* Update the floating-point enabled exception summary */
148 env
->fpscr
|= 1 << FPSCR_FEX
;
149 /* Exception is differed */
153 case POWERPC_EXCP_FP_VXSQRT
:
154 /* Square root of a negative number */
155 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
157 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
159 /* Set the result to quiet NaN */
160 ret
= 0x7FF8000000000000ULL
;
161 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
162 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
165 case POWERPC_EXCP_FP_VXCVI
:
166 /* Invalid conversion */
167 env
->fpscr
|= 1 << FPSCR_VXCVI
;
168 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
170 /* Set the result to quiet NaN */
171 ret
= 0x7FF8000000000000ULL
;
172 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
173 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
177 /* Update the floating-point invalid operation summary */
178 env
->fpscr
|= 1 << FPSCR_VX
;
179 /* Update the floating-point exception summary */
180 env
->fpscr
|= 1 << FPSCR_FX
;
182 /* Update the floating-point enabled exception summary */
183 env
->fpscr
|= 1 << FPSCR_FEX
;
184 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
185 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
186 POWERPC_EXCP_FP
| op
);
192 static inline void float_zero_divide_excp(CPUPPCState
*env
)
194 env
->fpscr
|= 1 << FPSCR_ZX
;
195 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
196 /* Update the floating-point exception summary */
197 env
->fpscr
|= 1 << FPSCR_FX
;
199 /* Update the floating-point enabled exception summary */
200 env
->fpscr
|= 1 << FPSCR_FEX
;
201 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
202 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
203 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
);
208 static inline void float_overflow_excp(CPUPPCState
*env
)
210 env
->fpscr
|= 1 << FPSCR_OX
;
211 /* Update the floating-point exception summary */
212 env
->fpscr
|= 1 << FPSCR_FX
;
214 /* XXX: should adjust the result */
215 /* Update the floating-point enabled exception summary */
216 env
->fpscr
|= 1 << FPSCR_FEX
;
217 /* We must update the target FPR before raising the exception */
218 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
219 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
221 env
->fpscr
|= 1 << FPSCR_XX
;
222 env
->fpscr
|= 1 << FPSCR_FI
;
226 static inline void float_underflow_excp(CPUPPCState
*env
)
228 env
->fpscr
|= 1 << FPSCR_UX
;
229 /* Update the floating-point exception summary */
230 env
->fpscr
|= 1 << FPSCR_FX
;
232 /* XXX: should adjust the result */
233 /* Update the floating-point enabled exception summary */
234 env
->fpscr
|= 1 << FPSCR_FEX
;
235 /* We must update the target FPR before raising the exception */
236 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
237 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
241 static inline void float_inexact_excp(CPUPPCState
*env
)
243 env
->fpscr
|= 1 << FPSCR_XX
;
244 /* Update the floating-point exception summary */
245 env
->fpscr
|= 1 << FPSCR_FX
;
247 /* Update the floating-point enabled exception summary */
248 env
->fpscr
|= 1 << FPSCR_FEX
;
249 /* We must update the target FPR before raising the exception */
250 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
251 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
255 static inline void fpscr_set_rounding_mode(CPUPPCState
*env
)
259 /* Set rounding mode */
262 /* Best approximation (round to nearest) */
263 rnd_type
= float_round_nearest_even
;
266 /* Smaller magnitude (round toward zero) */
267 rnd_type
= float_round_to_zero
;
270 /* Round toward +infinite */
271 rnd_type
= float_round_up
;
275 /* Round toward -infinite */
276 rnd_type
= float_round_down
;
279 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
282 void helper_fpscr_clrbit(CPUPPCState
*env
, uint32_t bit
)
286 prev
= (env
->fpscr
>> bit
) & 1;
287 env
->fpscr
&= ~(1 << bit
);
292 fpscr_set_rounding_mode(env
);
300 void helper_fpscr_setbit(CPUPPCState
*env
, uint32_t bit
)
304 prev
= (env
->fpscr
>> bit
) & 1;
305 env
->fpscr
|= 1 << bit
;
309 env
->fpscr
|= 1 << FPSCR_FX
;
315 env
->fpscr
|= 1 << FPSCR_FX
;
321 env
->fpscr
|= 1 << FPSCR_FX
;
327 env
->fpscr
|= 1 << FPSCR_FX
;
333 env
->fpscr
|= 1 << FPSCR_FX
;
347 env
->fpscr
|= 1 << FPSCR_VX
;
348 env
->fpscr
|= 1 << FPSCR_FX
;
356 env
->error_code
= POWERPC_EXCP_FP
;
358 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
361 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
364 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
367 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
370 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
373 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
376 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
379 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
382 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
390 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
397 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
404 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
411 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
417 fpscr_set_rounding_mode(env
);
422 /* Update the floating-point enabled exception summary */
423 env
->fpscr
|= 1 << FPSCR_FEX
;
424 /* We have to update Rc1 before raising the exception */
425 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
431 void helper_store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
434 * We use only the 32 LSB of the incoming fpr
442 new |= prev
& 0x60000000;
443 for (i
= 0; i
< 8; i
++) {
444 if (mask
& (1 << i
)) {
445 env
->fpscr
&= ~(0xF << (4 * i
));
446 env
->fpscr
|= new & (0xF << (4 * i
));
449 /* Update VX and FEX */
451 env
->fpscr
|= 1 << FPSCR_VX
;
453 env
->fpscr
&= ~(1 << FPSCR_VX
);
455 if ((fpscr_ex
& fpscr_eex
) != 0) {
456 env
->fpscr
|= 1 << FPSCR_FEX
;
457 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
458 /* XXX: we should compute it properly */
459 env
->error_code
= POWERPC_EXCP_FP
;
461 env
->fpscr
&= ~(1 << FPSCR_FEX
);
463 fpscr_set_rounding_mode(env
);
466 void store_fpscr(CPUPPCState
*env
, uint64_t arg
, uint32_t mask
)
468 helper_store_fpscr(env
, arg
, mask
);
471 void helper_float_check_status(CPUPPCState
*env
)
473 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
474 (env
->error_code
& POWERPC_EXCP_FP
)) {
475 /* Differred floating-point exception after target FPR update */
476 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
477 helper_raise_exception_err(env
, env
->exception_index
,
481 int status
= get_float_exception_flags(&env
->fp_status
);
482 if (status
& float_flag_divbyzero
) {
483 float_zero_divide_excp(env
);
484 } else if (status
& float_flag_overflow
) {
485 float_overflow_excp(env
);
486 } else if (status
& float_flag_underflow
) {
487 float_underflow_excp(env
);
488 } else if (status
& float_flag_inexact
) {
489 float_inexact_excp(env
);
494 void helper_reset_fpstatus(CPUPPCState
*env
)
496 set_float_exception_flags(0, &env
->fp_status
);
500 uint64_t helper_fadd(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
502 CPU_DoubleU farg1
, farg2
;
507 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
508 float64_is_neg(farg1
.d
) != float64_is_neg(farg2
.d
))) {
509 /* Magnitude subtraction of infinities */
510 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
);
512 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
513 float64_is_signaling_nan(farg2
.d
))) {
515 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
517 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
524 uint64_t helper_fsub(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
526 CPU_DoubleU farg1
, farg2
;
531 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
532 float64_is_neg(farg1
.d
) == float64_is_neg(farg2
.d
))) {
533 /* Magnitude subtraction of infinities */
534 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
);
536 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
537 float64_is_signaling_nan(farg2
.d
))) {
538 /* sNaN subtraction */
539 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
541 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
548 uint64_t helper_fmul(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
550 CPU_DoubleU farg1
, farg2
;
555 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
556 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
557 /* Multiplication of zero by infinity */
558 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
);
560 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
561 float64_is_signaling_nan(farg2
.d
))) {
562 /* sNaN multiplication */
563 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
565 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
572 uint64_t helper_fdiv(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
574 CPU_DoubleU farg1
, farg2
;
579 if (unlikely(float64_is_infinity(farg1
.d
) &&
580 float64_is_infinity(farg2
.d
))) {
581 /* Division of infinity by infinity */
582 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIDI
);
583 } else if (unlikely(float64_is_zero(farg1
.d
) && float64_is_zero(farg2
.d
))) {
584 /* Division of zero by zero */
585 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXZDZ
);
587 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
588 float64_is_signaling_nan(farg2
.d
))) {
590 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
592 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
599 uint64_t helper_fabs(CPUPPCState
*env
, uint64_t arg
)
604 farg
.d
= float64_abs(farg
.d
);
609 uint64_t helper_fnabs(CPUPPCState
*env
, uint64_t arg
)
614 farg
.d
= float64_abs(farg
.d
);
615 farg
.d
= float64_chs(farg
.d
);
620 uint64_t helper_fneg(CPUPPCState
*env
, uint64_t arg
)
625 farg
.d
= float64_chs(farg
.d
);
630 uint64_t helper_fctiw(CPUPPCState
*env
, uint64_t arg
)
636 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
637 /* sNaN conversion */
638 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
639 POWERPC_EXCP_FP_VXCVI
);
640 } else if (unlikely(float64_is_quiet_nan(farg
.d
) ||
641 float64_is_infinity(farg
.d
))) {
642 /* qNan / infinity conversion */
643 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
);
645 farg
.ll
= float64_to_int32(farg
.d
, &env
->fp_status
);
646 /* XXX: higher bits are not supposed to be significant.
647 * to make tests easier, return the same as a real PowerPC 750
649 farg
.ll
|= 0xFFF80000ULL
<< 32;
654 /* fctiwz - fctiwz. */
655 uint64_t helper_fctiwz(CPUPPCState
*env
, uint64_t arg
)
661 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
662 /* sNaN conversion */
663 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
664 POWERPC_EXCP_FP_VXCVI
);
665 } else if (unlikely(float64_is_quiet_nan(farg
.d
) ||
666 float64_is_infinity(farg
.d
))) {
667 /* qNan / infinity conversion */
668 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
);
670 farg
.ll
= float64_to_int32_round_to_zero(farg
.d
, &env
->fp_status
);
671 /* XXX: higher bits are not supposed to be significant.
672 * to make tests easier, return the same as a real PowerPC 750
674 farg
.ll
|= 0xFFF80000ULL
<< 32;
679 #if defined(TARGET_PPC64)
681 uint64_t helper_fcfid(CPUPPCState
*env
, uint64_t arg
)
685 farg
.d
= int64_to_float64(arg
, &env
->fp_status
);
690 uint64_t helper_fctid(CPUPPCState
*env
, uint64_t arg
)
696 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
697 /* sNaN conversion */
698 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
699 POWERPC_EXCP_FP_VXCVI
);
700 } else if (unlikely(float64_is_quiet_nan(farg
.d
) ||
701 float64_is_infinity(farg
.d
))) {
702 /* qNan / infinity conversion */
703 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
);
705 farg
.ll
= float64_to_int64(farg
.d
, &env
->fp_status
);
710 /* fctidz - fctidz. */
711 uint64_t helper_fctidz(CPUPPCState
*env
, uint64_t arg
)
717 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
718 /* sNaN conversion */
719 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
720 POWERPC_EXCP_FP_VXCVI
);
721 } else if (unlikely(float64_is_quiet_nan(farg
.d
) ||
722 float64_is_infinity(farg
.d
))) {
723 /* qNan / infinity conversion */
724 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
);
726 farg
.ll
= float64_to_int64_round_to_zero(farg
.d
, &env
->fp_status
);
733 static inline uint64_t do_fri(CPUPPCState
*env
, uint64_t arg
,
740 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
742 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
743 POWERPC_EXCP_FP_VXCVI
);
744 } else if (unlikely(float64_is_quiet_nan(farg
.d
) ||
745 float64_is_infinity(farg
.d
))) {
746 /* qNan / infinity round */
747 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXCVI
);
749 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
750 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
751 /* Restore rounding mode from FPSCR */
752 fpscr_set_rounding_mode(env
);
757 uint64_t helper_frin(CPUPPCState
*env
, uint64_t arg
)
759 return do_fri(env
, arg
, float_round_nearest_even
);
762 uint64_t helper_friz(CPUPPCState
*env
, uint64_t arg
)
764 return do_fri(env
, arg
, float_round_to_zero
);
767 uint64_t helper_frip(CPUPPCState
*env
, uint64_t arg
)
769 return do_fri(env
, arg
, float_round_up
);
772 uint64_t helper_frim(CPUPPCState
*env
, uint64_t arg
)
774 return do_fri(env
, arg
, float_round_down
);
778 uint64_t helper_fmadd(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
781 CPU_DoubleU farg1
, farg2
, farg3
;
787 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
788 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
789 /* Multiplication of zero by infinity */
790 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
);
792 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
793 float64_is_signaling_nan(farg2
.d
) ||
794 float64_is_signaling_nan(farg3
.d
))) {
796 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
798 /* This is the way the PowerPC specification defines it */
799 float128 ft0_128
, ft1_128
;
801 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
802 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
803 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
804 if (unlikely(float128_is_infinity(ft0_128
) &&
805 float64_is_infinity(farg3
.d
) &&
806 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
807 /* Magnitude subtraction of infinities */
808 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
);
810 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
811 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
812 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
820 uint64_t helper_fmsub(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
823 CPU_DoubleU farg1
, farg2
, farg3
;
829 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
830 (float64_is_zero(farg1
.d
) &&
831 float64_is_infinity(farg2
.d
)))) {
832 /* Multiplication of zero by infinity */
833 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
);
835 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
836 float64_is_signaling_nan(farg2
.d
) ||
837 float64_is_signaling_nan(farg3
.d
))) {
839 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
841 /* This is the way the PowerPC specification defines it */
842 float128 ft0_128
, ft1_128
;
844 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
845 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
846 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
847 if (unlikely(float128_is_infinity(ft0_128
) &&
848 float64_is_infinity(farg3
.d
) &&
849 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
850 /* Magnitude subtraction of infinities */
851 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
);
853 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
854 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
855 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
861 /* fnmadd - fnmadd. */
862 uint64_t helper_fnmadd(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
865 CPU_DoubleU farg1
, farg2
, farg3
;
871 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
872 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
873 /* Multiplication of zero by infinity */
874 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
);
876 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
877 float64_is_signaling_nan(farg2
.d
) ||
878 float64_is_signaling_nan(farg3
.d
))) {
880 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
882 /* This is the way the PowerPC specification defines it */
883 float128 ft0_128
, ft1_128
;
885 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
886 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
887 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
888 if (unlikely(float128_is_infinity(ft0_128
) &&
889 float64_is_infinity(farg3
.d
) &&
890 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
891 /* Magnitude subtraction of infinities */
892 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
);
894 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
895 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
896 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
898 if (likely(!float64_is_any_nan(farg1
.d
))) {
899 farg1
.d
= float64_chs(farg1
.d
);
905 /* fnmsub - fnmsub. */
906 uint64_t helper_fnmsub(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
909 CPU_DoubleU farg1
, farg2
, farg3
;
915 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
916 (float64_is_zero(farg1
.d
) &&
917 float64_is_infinity(farg2
.d
)))) {
918 /* Multiplication of zero by infinity */
919 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXIMZ
);
921 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
922 float64_is_signaling_nan(farg2
.d
) ||
923 float64_is_signaling_nan(farg3
.d
))) {
925 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
927 /* This is the way the PowerPC specification defines it */
928 float128 ft0_128
, ft1_128
;
930 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
931 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
932 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
933 if (unlikely(float128_is_infinity(ft0_128
) &&
934 float64_is_infinity(farg3
.d
) &&
935 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
936 /* Magnitude subtraction of infinities */
937 farg1
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXISI
);
939 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
940 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
941 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
943 if (likely(!float64_is_any_nan(farg1
.d
))) {
944 farg1
.d
= float64_chs(farg1
.d
);
951 uint64_t helper_frsp(CPUPPCState
*env
, uint64_t arg
)
958 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
959 /* sNaN square root */
960 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
962 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
963 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
969 uint64_t helper_fsqrt(CPUPPCState
*env
, uint64_t arg
)
975 if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
976 /* Square root of a negative nonzero number */
977 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
);
979 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
980 /* sNaN square root */
981 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
983 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
989 uint64_t helper_fre(CPUPPCState
*env
, uint64_t arg
)
995 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
996 /* sNaN reciprocal */
997 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
999 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1004 uint64_t helper_fres(CPUPPCState
*env
, uint64_t arg
)
1011 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1012 /* sNaN reciprocal */
1013 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
1015 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1016 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1017 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1022 /* frsqrte - frsqrte. */
1023 uint64_t helper_frsqrte(CPUPPCState
*env
, uint64_t arg
)
1030 if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
1031 /* Reciprocal square root of a negative nonzero number */
1032 farg
.ll
= fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSQRT
);
1034 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1035 /* sNaN reciprocal square root */
1036 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
1038 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1039 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1040 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1041 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1047 uint64_t helper_fsel(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1054 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) &&
1055 !float64_is_any_nan(farg1
.d
)) {
1062 void helper_fcmpu(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1065 CPU_DoubleU farg1
, farg2
;
1071 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1072 float64_is_any_nan(farg2
.d
))) {
1074 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1076 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1082 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1083 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1084 env
->crf
[crfD
] = ret
;
1085 if (unlikely(ret
== 0x01UL
1086 && (float64_is_signaling_nan(farg1
.d
) ||
1087 float64_is_signaling_nan(farg2
.d
)))) {
1088 /* sNaN comparison */
1089 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
);
1093 void helper_fcmpo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
,
1096 CPU_DoubleU farg1
, farg2
;
1102 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1103 float64_is_any_nan(farg2
.d
))) {
1105 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1107 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1113 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1114 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1115 env
->crf
[crfD
] = ret
;
1116 if (unlikely(ret
== 0x01UL
)) {
1117 if (float64_is_signaling_nan(farg1
.d
) ||
1118 float64_is_signaling_nan(farg2
.d
)) {
1119 /* sNaN comparison */
1120 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXSNAN
|
1121 POWERPC_EXCP_FP_VXVC
);
1123 /* qNaN comparison */
1124 fload_invalid_op_excp(env
, POWERPC_EXCP_FP_VXVC
);
1129 /* Single-precision floating-point conversions */
1130 static inline uint32_t efscfsi(CPUPPCState
*env
, uint32_t val
)
1134 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1139 static inline uint32_t efscfui(CPUPPCState
*env
, uint32_t val
)
1143 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1148 static inline int32_t efsctsi(CPUPPCState
*env
, uint32_t val
)
1153 /* NaN are not treated the same way IEEE 754 does */
1154 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1158 return float32_to_int32(u
.f
, &env
->vec_status
);
1161 static inline uint32_t efsctui(CPUPPCState
*env
, uint32_t val
)
1166 /* NaN are not treated the same way IEEE 754 does */
1167 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1171 return float32_to_uint32(u
.f
, &env
->vec_status
);
1174 static inline uint32_t efsctsiz(CPUPPCState
*env
, uint32_t val
)
1179 /* NaN are not treated the same way IEEE 754 does */
1180 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1184 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
1187 static inline uint32_t efsctuiz(CPUPPCState
*env
, uint32_t val
)
1192 /* NaN are not treated the same way IEEE 754 does */
1193 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1197 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
1200 static inline uint32_t efscfsf(CPUPPCState
*env
, uint32_t val
)
1205 u
.f
= int32_to_float32(val
, &env
->vec_status
);
1206 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
1207 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1212 static inline uint32_t efscfuf(CPUPPCState
*env
, uint32_t val
)
1217 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
1218 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1219 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
1224 static inline uint32_t efsctsf(CPUPPCState
*env
, uint32_t val
)
1230 /* NaN are not treated the same way IEEE 754 does */
1231 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1234 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1235 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1237 return float32_to_int32(u
.f
, &env
->vec_status
);
1240 static inline uint32_t efsctuf(CPUPPCState
*env
, uint32_t val
)
1246 /* NaN are not treated the same way IEEE 754 does */
1247 if (unlikely(float32_is_quiet_nan(u
.f
))) {
1250 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
1251 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
1253 return float32_to_uint32(u
.f
, &env
->vec_status
);
1256 #define HELPER_SPE_SINGLE_CONV(name) \
1257 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1259 return e##name(env, val); \
1262 HELPER_SPE_SINGLE_CONV(fscfsi
);
1264 HELPER_SPE_SINGLE_CONV(fscfui
);
1266 HELPER_SPE_SINGLE_CONV(fscfuf
);
1268 HELPER_SPE_SINGLE_CONV(fscfsf
);
1270 HELPER_SPE_SINGLE_CONV(fsctsi
);
1272 HELPER_SPE_SINGLE_CONV(fsctui
);
1274 HELPER_SPE_SINGLE_CONV(fsctsiz
);
1276 HELPER_SPE_SINGLE_CONV(fsctuiz
);
1278 HELPER_SPE_SINGLE_CONV(fsctsf
);
1280 HELPER_SPE_SINGLE_CONV(fsctuf
);
1282 #define HELPER_SPE_VECTOR_CONV(name) \
1283 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1285 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1286 (uint64_t)e##name(env, val); \
1289 HELPER_SPE_VECTOR_CONV(fscfsi
);
1291 HELPER_SPE_VECTOR_CONV(fscfui
);
1293 HELPER_SPE_VECTOR_CONV(fscfuf
);
1295 HELPER_SPE_VECTOR_CONV(fscfsf
);
1297 HELPER_SPE_VECTOR_CONV(fsctsi
);
1299 HELPER_SPE_VECTOR_CONV(fsctui
);
1301 HELPER_SPE_VECTOR_CONV(fsctsiz
);
1303 HELPER_SPE_VECTOR_CONV(fsctuiz
);
1305 HELPER_SPE_VECTOR_CONV(fsctsf
);
1307 HELPER_SPE_VECTOR_CONV(fsctuf
);
1309 /* Single-precision floating-point arithmetic */
1310 static inline uint32_t efsadd(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1316 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
1320 static inline uint32_t efssub(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1326 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
1330 static inline uint32_t efsmul(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1336 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
1340 static inline uint32_t efsdiv(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1346 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
1350 #define HELPER_SPE_SINGLE_ARITH(name) \
1351 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1353 return e##name(env, op1, op2); \
1356 HELPER_SPE_SINGLE_ARITH(fsadd
);
1358 HELPER_SPE_SINGLE_ARITH(fssub
);
1360 HELPER_SPE_SINGLE_ARITH(fsmul
);
1362 HELPER_SPE_SINGLE_ARITH(fsdiv
);
1364 #define HELPER_SPE_VECTOR_ARITH(name) \
1365 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1367 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1368 (uint64_t)e##name(env, op1, op2); \
1371 HELPER_SPE_VECTOR_ARITH(fsadd
);
1373 HELPER_SPE_VECTOR_ARITH(fssub
);
1375 HELPER_SPE_VECTOR_ARITH(fsmul
);
1377 HELPER_SPE_VECTOR_ARITH(fsdiv
);
1379 /* Single-precision floating-point comparisons */
1380 static inline uint32_t efscmplt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1386 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1389 static inline uint32_t efscmpgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1395 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
1398 static inline uint32_t efscmpeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1404 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
1407 static inline uint32_t efststlt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1409 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1410 return efscmplt(env
, op1
, op2
);
1413 static inline uint32_t efststgt(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1415 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1416 return efscmpgt(env
, op1
, op2
);
1419 static inline uint32_t efststeq(CPUPPCState
*env
, uint32_t op1
, uint32_t op2
)
1421 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1422 return efscmpeq(env
, op1
, op2
);
1425 #define HELPER_SINGLE_SPE_CMP(name) \
1426 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1428 return e##name(env, op1, op2) << 2; \
1431 HELPER_SINGLE_SPE_CMP(fststlt
);
1433 HELPER_SINGLE_SPE_CMP(fststgt
);
1435 HELPER_SINGLE_SPE_CMP(fststeq
);
1437 HELPER_SINGLE_SPE_CMP(fscmplt
);
1439 HELPER_SINGLE_SPE_CMP(fscmpgt
);
1441 HELPER_SINGLE_SPE_CMP(fscmpeq
);
1443 static inline uint32_t evcmp_merge(int t0
, int t1
)
1445 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
1448 #define HELPER_VECTOR_SPE_CMP(name) \
1449 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1451 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1452 e##name(env, op1, op2)); \
1455 HELPER_VECTOR_SPE_CMP(fststlt
);
1457 HELPER_VECTOR_SPE_CMP(fststgt
);
1459 HELPER_VECTOR_SPE_CMP(fststeq
);
1461 HELPER_VECTOR_SPE_CMP(fscmplt
);
1463 HELPER_VECTOR_SPE_CMP(fscmpgt
);
1465 HELPER_VECTOR_SPE_CMP(fscmpeq
);
1467 /* Double-precision floating-point conversion */
1468 uint64_t helper_efdcfsi(CPUPPCState
*env
, uint32_t val
)
1472 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1477 uint64_t helper_efdcfsid(CPUPPCState
*env
, uint64_t val
)
1481 u
.d
= int64_to_float64(val
, &env
->vec_status
);
1486 uint64_t helper_efdcfui(CPUPPCState
*env
, uint32_t val
)
1490 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1495 uint64_t helper_efdcfuid(CPUPPCState
*env
, uint64_t val
)
1499 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
1504 uint32_t helper_efdctsi(CPUPPCState
*env
, uint64_t val
)
1509 /* NaN are not treated the same way IEEE 754 does */
1510 if (unlikely(float64_is_any_nan(u
.d
))) {
1514 return float64_to_int32(u
.d
, &env
->vec_status
);
1517 uint32_t helper_efdctui(CPUPPCState
*env
, uint64_t val
)
1522 /* NaN are not treated the same way IEEE 754 does */
1523 if (unlikely(float64_is_any_nan(u
.d
))) {
1527 return float64_to_uint32(u
.d
, &env
->vec_status
);
1530 uint32_t helper_efdctsiz(CPUPPCState
*env
, uint64_t val
)
1535 /* NaN are not treated the same way IEEE 754 does */
1536 if (unlikely(float64_is_any_nan(u
.d
))) {
1540 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
1543 uint64_t helper_efdctsidz(CPUPPCState
*env
, uint64_t val
)
1548 /* NaN are not treated the same way IEEE 754 does */
1549 if (unlikely(float64_is_any_nan(u
.d
))) {
1553 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
1556 uint32_t helper_efdctuiz(CPUPPCState
*env
, uint64_t val
)
1561 /* NaN are not treated the same way IEEE 754 does */
1562 if (unlikely(float64_is_any_nan(u
.d
))) {
1566 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
1569 uint64_t helper_efdctuidz(CPUPPCState
*env
, uint64_t val
)
1574 /* NaN are not treated the same way IEEE 754 does */
1575 if (unlikely(float64_is_any_nan(u
.d
))) {
1579 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
1582 uint64_t helper_efdcfsf(CPUPPCState
*env
, uint32_t val
)
1587 u
.d
= int32_to_float64(val
, &env
->vec_status
);
1588 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1589 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1594 uint64_t helper_efdcfuf(CPUPPCState
*env
, uint32_t val
)
1599 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
1600 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
1601 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
1606 uint32_t helper_efdctsf(CPUPPCState
*env
, uint64_t val
)
1612 /* NaN are not treated the same way IEEE 754 does */
1613 if (unlikely(float64_is_any_nan(u
.d
))) {
1616 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1617 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1619 return float64_to_int32(u
.d
, &env
->vec_status
);
1622 uint32_t helper_efdctuf(CPUPPCState
*env
, uint64_t val
)
1628 /* NaN are not treated the same way IEEE 754 does */
1629 if (unlikely(float64_is_any_nan(u
.d
))) {
1632 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
1633 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
1635 return float64_to_uint32(u
.d
, &env
->vec_status
);
1638 uint32_t helper_efscfd(CPUPPCState
*env
, uint64_t val
)
1644 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
1649 uint64_t helper_efdcfs(CPUPPCState
*env
, uint32_t val
)
1655 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
1660 /* Double precision fixed-point arithmetic */
1661 uint64_t helper_efdadd(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1667 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
1671 uint64_t helper_efdsub(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1677 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
1681 uint64_t helper_efdmul(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1687 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
1691 uint64_t helper_efddiv(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1697 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
1701 /* Double precision floating point helpers */
1702 uint32_t helper_efdtstlt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1708 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1711 uint32_t helper_efdtstgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1717 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
1720 uint32_t helper_efdtsteq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1726 return float64_eq_quiet(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
1729 uint32_t helper_efdcmplt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1731 /* XXX: TODO: test special values (NaN, infinites, ...) */
1732 return helper_efdtstlt(env
, op1
, op2
);
1735 uint32_t helper_efdcmpgt(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1737 /* XXX: TODO: test special values (NaN, infinites, ...) */
1738 return helper_efdtstgt(env
, op1
, op2
);
1741 uint32_t helper_efdcmpeq(CPUPPCState
*env
, uint64_t op1
, uint64_t op2
)
1743 /* XXX: TODO: test special values (NaN, infinites, ...) */
1744 return helper_efdtsteq(env
, op1
, op2
);