target/ppc: Remove float_check_status
[qemu/armbru.git] / target / ppc / fpu_helper.c
blobc9198f826dfe9956e38e1cc0e2b830aa8360ca13
1 /*
2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 #include "fpu/softfloat.h"
26 static inline float128 float128_snan_to_qnan(float128 x)
28 float128 r;
30 r.high = x.high | 0x0000800000000000;
31 r.low = x.low;
32 return r;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 static inline bool fp_exceptions_enabled(CPUPPCState *env)
41 #ifdef CONFIG_USER_ONLY
42 return true;
43 #else
44 return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
45 #endif
48 /*****************************************************************************/
49 /* Floating point operations helpers */
52 * This is the non-arithmatic conversion that happens e.g. on loads.
53 * In the Power ISA pseudocode, this is called DOUBLE.
55 uint64_t helper_todouble(uint32_t arg)
57 uint32_t abs_arg = arg & 0x7fffffff;
58 uint64_t ret;
60 if (likely(abs_arg >= 0x00800000)) {
61 /* Normalized operand, or Inf, or NaN. */
62 ret = (uint64_t)extract32(arg, 30, 2) << 62;
63 ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
64 ret |= (uint64_t)extract32(arg, 0, 30) << 29;
65 } else {
66 /* Zero or Denormalized operand. */
67 ret = (uint64_t)extract32(arg, 31, 1) << 63;
68 if (unlikely(abs_arg != 0)) {
69 /* Denormalized operand. */
70 int shift = clz32(abs_arg) - 9;
71 int exp = -126 - shift + 1023;
72 ret |= (uint64_t)exp << 52;
73 ret |= abs_arg << (shift + 29);
76 return ret;
80 * This is the non-arithmatic conversion that happens e.g. on stores.
81 * In the Power ISA pseudocode, this is called SINGLE.
83 uint32_t helper_tosingle(uint64_t arg)
85 int exp = extract64(arg, 52, 11);
86 uint32_t ret;
88 if (likely(exp > 896)) {
89 /* No denormalization required (includes Inf, NaN). */
90 ret = extract64(arg, 62, 2) << 30;
91 ret |= extract64(arg, 29, 30);
92 } else {
93 /* Zero or Denormal result. If the exponent is in bounds for
94 * a single-precision denormal result, extract the proper bits.
95 * If the input is not zero, and the exponent is out of bounds,
96 * then the result is undefined; this underflows to zero.
98 ret = extract64(arg, 63, 1) << 31;
99 if (unlikely(exp >= 874)) {
100 /* Denormal result. */
101 ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
104 return ret;
107 static inline int ppc_float32_get_unbiased_exp(float32 f)
109 return ((f >> 23) & 0xFF) - 127;
112 static inline int ppc_float64_get_unbiased_exp(float64 f)
114 return ((f >> 52) & 0x7FF) - 1023;
117 #define COMPUTE_FPRF(tp) \
118 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
120 int isneg; \
121 int fprf; \
123 isneg = tp##_is_neg(arg); \
124 if (unlikely(tp##_is_any_nan(arg))) { \
125 if (tp##_is_signaling_nan(arg, &env->fp_status)) { \
126 /* Signaling NaN: flags are undefined */ \
127 fprf = 0x00; \
128 } else { \
129 /* Quiet NaN */ \
130 fprf = 0x11; \
132 } else if (unlikely(tp##_is_infinity(arg))) { \
133 /* +/- infinity */ \
134 if (isneg) { \
135 fprf = 0x09; \
136 } else { \
137 fprf = 0x05; \
139 } else { \
140 if (tp##_is_zero(arg)) { \
141 /* +/- zero */ \
142 if (isneg) { \
143 fprf = 0x12; \
144 } else { \
145 fprf = 0x02; \
147 } else { \
148 if (tp##_is_zero_or_denormal(arg)) { \
149 /* Denormalized numbers */ \
150 fprf = 0x10; \
151 } else { \
152 /* Normalized numbers */ \
153 fprf = 0x00; \
155 if (isneg) { \
156 fprf |= 0x08; \
157 } else { \
158 fprf |= 0x04; \
162 /* We update FPSCR_FPRF */ \
163 env->fpscr &= ~(0x1F << FPSCR_FPRF); \
164 env->fpscr |= fprf << FPSCR_FPRF; \
167 COMPUTE_FPRF(float16)
168 COMPUTE_FPRF(float32)
169 COMPUTE_FPRF(float64)
170 COMPUTE_FPRF(float128)
172 /* Floating-point invalid operations exception */
173 static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
175 /* Update the floating-point invalid operation summary */
176 env->fpscr |= 1 << FPSCR_VX;
177 /* Update the floating-point exception summary */
178 env->fpscr |= FP_FX;
179 if (fpscr_ve != 0) {
180 /* Update the floating-point enabled exception summary */
181 env->fpscr |= 1 << FPSCR_FEX;
182 if (fp_exceptions_enabled(env)) {
183 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
184 POWERPC_EXCP_FP | op, retaddr);
189 static void finish_invalid_op_arith(CPUPPCState *env, int op,
190 bool set_fpcc, uintptr_t retaddr)
192 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
193 if (fpscr_ve == 0) {
194 if (set_fpcc) {
195 env->fpscr &= ~(0xF << FPSCR_FPCC);
196 env->fpscr |= 0x11 << FPSCR_FPCC;
199 finish_invalid_op_excp(env, op, retaddr);
202 /* Signalling NaN */
203 static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
205 env->fpscr |= 1 << FPSCR_VXSNAN;
206 finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
209 /* Magnitude subtraction of infinities */
210 static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
211 uintptr_t retaddr)
213 env->fpscr |= 1 << FPSCR_VXISI;
214 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
217 /* Division of infinity by infinity */
218 static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
219 uintptr_t retaddr)
221 env->fpscr |= 1 << FPSCR_VXIDI;
222 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
225 /* Division of zero by zero */
226 static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
227 uintptr_t retaddr)
229 env->fpscr |= 1 << FPSCR_VXZDZ;
230 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
233 /* Multiplication of zero by infinity */
234 static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
235 uintptr_t retaddr)
237 env->fpscr |= 1 << FPSCR_VXIMZ;
238 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
241 /* Square root of a negative number */
242 static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
243 uintptr_t retaddr)
245 env->fpscr |= 1 << FPSCR_VXSQRT;
246 finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
249 /* Ordered comparison of NaN */
250 static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
251 uintptr_t retaddr)
253 env->fpscr |= 1 << FPSCR_VXVC;
254 if (set_fpcc) {
255 env->fpscr &= ~(0xF << FPSCR_FPCC);
256 env->fpscr |= 0x11 << FPSCR_FPCC;
258 /* Update the floating-point invalid operation summary */
259 env->fpscr |= 1 << FPSCR_VX;
260 /* Update the floating-point exception summary */
261 env->fpscr |= FP_FX;
262 /* We must update the target FPR before raising the exception */
263 if (fpscr_ve != 0) {
264 CPUState *cs = CPU(ppc_env_get_cpu(env));
266 cs->exception_index = POWERPC_EXCP_PROGRAM;
267 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
268 /* Update the floating-point enabled exception summary */
269 env->fpscr |= 1 << FPSCR_FEX;
270 /* Exception is differed */
274 /* Invalid conversion */
275 static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
276 uintptr_t retaddr)
278 env->fpscr |= 1 << FPSCR_VXCVI;
279 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
280 if (fpscr_ve == 0) {
281 if (set_fpcc) {
282 env->fpscr &= ~(0xF << FPSCR_FPCC);
283 env->fpscr |= 0x11 << FPSCR_FPCC;
286 finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
289 static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
291 env->fpscr |= 1 << FPSCR_ZX;
292 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
293 /* Update the floating-point exception summary */
294 env->fpscr |= FP_FX;
295 if (fpscr_ze != 0) {
296 /* Update the floating-point enabled exception summary */
297 env->fpscr |= 1 << FPSCR_FEX;
298 if (fp_exceptions_enabled(env)) {
299 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
300 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
301 raddr);
306 static inline void float_overflow_excp(CPUPPCState *env)
308 CPUState *cs = CPU(ppc_env_get_cpu(env));
310 env->fpscr |= 1 << FPSCR_OX;
311 /* Update the floating-point exception summary */
312 env->fpscr |= FP_FX;
313 if (fpscr_oe != 0) {
314 /* XXX: should adjust the result */
315 /* Update the floating-point enabled exception summary */
316 env->fpscr |= 1 << FPSCR_FEX;
317 /* We must update the target FPR before raising the exception */
318 cs->exception_index = POWERPC_EXCP_PROGRAM;
319 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
320 } else {
321 env->fpscr |= 1 << FPSCR_XX;
322 env->fpscr |= 1 << FPSCR_FI;
326 static inline void float_underflow_excp(CPUPPCState *env)
328 CPUState *cs = CPU(ppc_env_get_cpu(env));
330 env->fpscr |= 1 << FPSCR_UX;
331 /* Update the floating-point exception summary */
332 env->fpscr |= FP_FX;
333 if (fpscr_ue != 0) {
334 /* XXX: should adjust the result */
335 /* Update the floating-point enabled exception summary */
336 env->fpscr |= 1 << FPSCR_FEX;
337 /* We must update the target FPR before raising the exception */
338 cs->exception_index = POWERPC_EXCP_PROGRAM;
339 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
343 static inline void float_inexact_excp(CPUPPCState *env)
345 CPUState *cs = CPU(ppc_env_get_cpu(env));
347 env->fpscr |= 1 << FPSCR_FI;
348 env->fpscr |= 1 << FPSCR_XX;
349 /* Update the floating-point exception summary */
350 env->fpscr |= FP_FX;
351 if (fpscr_xe != 0) {
352 /* Update the floating-point enabled exception summary */
353 env->fpscr |= 1 << FPSCR_FEX;
354 /* We must update the target FPR before raising the exception */
355 cs->exception_index = POWERPC_EXCP_PROGRAM;
356 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
360 static inline void fpscr_set_rounding_mode(CPUPPCState *env)
362 int rnd_type;
364 /* Set rounding mode */
365 switch (fpscr_rn) {
366 case 0:
367 /* Best approximation (round to nearest) */
368 rnd_type = float_round_nearest_even;
369 break;
370 case 1:
371 /* Smaller magnitude (round toward zero) */
372 rnd_type = float_round_to_zero;
373 break;
374 case 2:
375 /* Round toward +infinite */
376 rnd_type = float_round_up;
377 break;
378 default:
379 case 3:
380 /* Round toward -infinite */
381 rnd_type = float_round_down;
382 break;
384 set_float_rounding_mode(rnd_type, &env->fp_status);
387 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
389 int prev;
391 prev = (env->fpscr >> bit) & 1;
392 env->fpscr &= ~(1 << bit);
393 if (prev == 1) {
394 switch (bit) {
395 case FPSCR_RN1:
396 case FPSCR_RN:
397 fpscr_set_rounding_mode(env);
398 break;
399 case FPSCR_VXSNAN:
400 case FPSCR_VXISI:
401 case FPSCR_VXIDI:
402 case FPSCR_VXZDZ:
403 case FPSCR_VXIMZ:
404 case FPSCR_VXVC:
405 case FPSCR_VXSOFT:
406 case FPSCR_VXSQRT:
407 case FPSCR_VXCVI:
408 if (!fpscr_ix) {
409 /* Set VX bit to zero */
410 env->fpscr &= ~(1 << FPSCR_VX);
412 break;
413 case FPSCR_OX:
414 case FPSCR_UX:
415 case FPSCR_ZX:
416 case FPSCR_XX:
417 case FPSCR_VE:
418 case FPSCR_OE:
419 case FPSCR_UE:
420 case FPSCR_ZE:
421 case FPSCR_XE:
422 if (!fpscr_eex) {
423 /* Set the FEX bit */
424 env->fpscr &= ~(1 << FPSCR_FEX);
426 break;
427 default:
428 break;
433 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
435 CPUState *cs = CPU(ppc_env_get_cpu(env));
436 int prev;
438 prev = (env->fpscr >> bit) & 1;
439 env->fpscr |= 1 << bit;
440 if (prev == 0) {
441 switch (bit) {
442 case FPSCR_VX:
443 env->fpscr |= FP_FX;
444 if (fpscr_ve) {
445 goto raise_ve;
447 break;
448 case FPSCR_OX:
449 env->fpscr |= FP_FX;
450 if (fpscr_oe) {
451 goto raise_oe;
453 break;
454 case FPSCR_UX:
455 env->fpscr |= FP_FX;
456 if (fpscr_ue) {
457 goto raise_ue;
459 break;
460 case FPSCR_ZX:
461 env->fpscr |= FP_FX;
462 if (fpscr_ze) {
463 goto raise_ze;
465 break;
466 case FPSCR_XX:
467 env->fpscr |= FP_FX;
468 if (fpscr_xe) {
469 goto raise_xe;
471 break;
472 case FPSCR_VXSNAN:
473 case FPSCR_VXISI:
474 case FPSCR_VXIDI:
475 case FPSCR_VXZDZ:
476 case FPSCR_VXIMZ:
477 case FPSCR_VXVC:
478 case FPSCR_VXSOFT:
479 case FPSCR_VXSQRT:
480 case FPSCR_VXCVI:
481 env->fpscr |= 1 << FPSCR_VX;
482 env->fpscr |= FP_FX;
483 if (fpscr_ve != 0) {
484 goto raise_ve;
486 break;
487 case FPSCR_VE:
488 if (fpscr_vx != 0) {
489 raise_ve:
490 env->error_code = POWERPC_EXCP_FP;
491 if (fpscr_vxsnan) {
492 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
494 if (fpscr_vxisi) {
495 env->error_code |= POWERPC_EXCP_FP_VXISI;
497 if (fpscr_vxidi) {
498 env->error_code |= POWERPC_EXCP_FP_VXIDI;
500 if (fpscr_vxzdz) {
501 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
503 if (fpscr_vximz) {
504 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
506 if (fpscr_vxvc) {
507 env->error_code |= POWERPC_EXCP_FP_VXVC;
509 if (fpscr_vxsoft) {
510 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
512 if (fpscr_vxsqrt) {
513 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
515 if (fpscr_vxcvi) {
516 env->error_code |= POWERPC_EXCP_FP_VXCVI;
518 goto raise_excp;
520 break;
521 case FPSCR_OE:
522 if (fpscr_ox != 0) {
523 raise_oe:
524 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
525 goto raise_excp;
527 break;
528 case FPSCR_UE:
529 if (fpscr_ux != 0) {
530 raise_ue:
531 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
532 goto raise_excp;
534 break;
535 case FPSCR_ZE:
536 if (fpscr_zx != 0) {
537 raise_ze:
538 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
539 goto raise_excp;
541 break;
542 case FPSCR_XE:
543 if (fpscr_xx != 0) {
544 raise_xe:
545 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
546 goto raise_excp;
548 break;
549 case FPSCR_RN1:
550 case FPSCR_RN:
551 fpscr_set_rounding_mode(env);
552 break;
553 default:
554 break;
555 raise_excp:
556 /* Update the floating-point enabled exception summary */
557 env->fpscr |= 1 << FPSCR_FEX;
558 /* We have to update Rc1 before raising the exception */
559 cs->exception_index = POWERPC_EXCP_PROGRAM;
560 break;
565 void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
567 CPUState *cs = CPU(ppc_env_get_cpu(env));
568 target_ulong prev, new;
569 int i;
571 prev = env->fpscr;
572 new = (target_ulong)arg;
573 new &= ~0x60000000LL;
574 new |= prev & 0x60000000LL;
575 for (i = 0; i < sizeof(target_ulong) * 2; i++) {
576 if (mask & (1 << i)) {
577 env->fpscr &= ~(0xFLL << (4 * i));
578 env->fpscr |= new & (0xFLL << (4 * i));
581 /* Update VX and FEX */
582 if (fpscr_ix != 0) {
583 env->fpscr |= 1 << FPSCR_VX;
584 } else {
585 env->fpscr &= ~(1 << FPSCR_VX);
587 if ((fpscr_ex & fpscr_eex) != 0) {
588 env->fpscr |= 1 << FPSCR_FEX;
589 cs->exception_index = POWERPC_EXCP_PROGRAM;
590 /* XXX: we should compute it properly */
591 env->error_code = POWERPC_EXCP_FP;
592 } else {
593 env->fpscr &= ~(1 << FPSCR_FEX);
595 fpscr_set_rounding_mode(env);
598 void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
600 helper_store_fpscr(env, arg, mask);
603 static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
605 CPUState *cs = CPU(ppc_env_get_cpu(env));
606 int status = get_float_exception_flags(&env->fp_status);
607 bool inexact_happened = false;
609 if (status & float_flag_overflow) {
610 float_overflow_excp(env);
611 } else if (status & float_flag_underflow) {
612 float_underflow_excp(env);
613 } else if (status & float_flag_inexact) {
614 float_inexact_excp(env);
615 inexact_happened = true;
618 /* if the inexact flag was not set */
619 if (inexact_happened == false) {
620 env->fpscr &= ~(1 << FPSCR_FI); /* clear the FPSCR[FI] bit */
623 if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
624 (env->error_code & POWERPC_EXCP_FP)) {
625 /* Differred floating-point exception after target FPR update */
626 if (fp_exceptions_enabled(env)) {
627 raise_exception_err_ra(env, cs->exception_index,
628 env->error_code, raddr);
633 void helper_float_check_status(CPUPPCState *env)
635 do_float_check_status(env, GETPC());
638 void helper_reset_fpstatus(CPUPPCState *env)
640 set_float_exception_flags(0, &env->fp_status);
643 /* fadd - fadd. */
644 float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
646 float64 ret = float64_add(arg1, arg2, &env->fp_status);
647 int status = get_float_exception_flags(&env->fp_status);
649 if (unlikely(status & float_flag_invalid)) {
650 if (float64_is_infinity(arg1) && float64_is_infinity(arg2)) {
651 /* Magnitude subtraction of infinities */
652 float_invalid_op_vxisi(env, 1, GETPC());
653 } else if (float64_is_signaling_nan(arg1, &env->fp_status) ||
654 float64_is_signaling_nan(arg2, &env->fp_status)) {
655 /* sNaN addition */
656 float_invalid_op_vxsnan(env, GETPC());
660 return ret;
663 /* fsub - fsub. */
664 float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
666 float64 ret = float64_sub(arg1, arg2, &env->fp_status);
667 int status = get_float_exception_flags(&env->fp_status);
669 if (unlikely(status & float_flag_invalid)) {
670 if (float64_is_infinity(arg1) && float64_is_infinity(arg2)) {
671 /* Magnitude subtraction of infinities */
672 float_invalid_op_vxisi(env, 1, GETPC());
673 } else if (float64_is_signaling_nan(arg1, &env->fp_status) ||
674 float64_is_signaling_nan(arg2, &env->fp_status)) {
675 /* sNaN addition */
676 float_invalid_op_vxsnan(env, GETPC());
680 return ret;
683 /* fmul - fmul. */
684 float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
686 float64 ret = float64_mul(arg1, arg2, &env->fp_status);
687 int status = get_float_exception_flags(&env->fp_status);
689 if (unlikely(status & float_flag_invalid)) {
690 if ((float64_is_infinity(arg1) && float64_is_zero(arg2)) ||
691 (float64_is_zero(arg1) && float64_is_infinity(arg2))) {
692 /* Multiplication of zero by infinity */
693 float_invalid_op_vximz(env, 1, GETPC());
694 } else if (float64_is_signaling_nan(arg1, &env->fp_status) ||
695 float64_is_signaling_nan(arg2, &env->fp_status)) {
696 /* sNaN multiplication */
697 float_invalid_op_vxsnan(env, GETPC());
701 return ret;
704 /* fdiv - fdiv. */
705 float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
707 float64 ret = float64_div(arg1, arg2, &env->fp_status);
708 int status = get_float_exception_flags(&env->fp_status);
710 if (unlikely(status)) {
711 if (status & float_flag_invalid) {
712 /* Determine what kind of invalid operation was seen. */
713 if (float64_is_infinity(arg1) && float64_is_infinity(arg2)) {
714 /* Division of infinity by infinity */
715 float_invalid_op_vxidi(env, 1, GETPC());
716 } else if (float64_is_zero(arg1) && float64_is_zero(arg2)) {
717 /* Division of zero by zero */
718 float_invalid_op_vxzdz(env, 1, GETPC());
719 } else if (float64_is_signaling_nan(arg1, &env->fp_status) ||
720 float64_is_signaling_nan(arg2, &env->fp_status)) {
721 /* sNaN division */
722 float_invalid_op_vxsnan(env, GETPC());
725 if (status & float_flag_divbyzero) {
726 float_zero_divide_excp(env, GETPC());
730 return ret;
734 #define FPU_FCTI(op, cvt, nanval) \
735 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
737 CPU_DoubleU farg; \
739 farg.ll = arg; \
740 farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \
742 if (unlikely(env->fp_status.float_exception_flags)) { \
743 if (float64_is_any_nan(arg)) { \
744 float_invalid_op_vxcvi(env, 1, GETPC()); \
745 if (float64_is_signaling_nan(arg, &env->fp_status)) { \
746 float_invalid_op_vxsnan(env, GETPC()); \
748 farg.ll = nanval; \
749 } else if (env->fp_status.float_exception_flags & \
750 float_flag_invalid) { \
751 float_invalid_op_vxcvi(env, 1, GETPC()); \
753 do_float_check_status(env, GETPC()); \
755 return farg.ll; \
758 FPU_FCTI(fctiw, int32, 0x80000000U)
759 FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
760 FPU_FCTI(fctiwu, uint32, 0x00000000U)
761 FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
762 FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
763 FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
764 FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
765 FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
767 #define FPU_FCFI(op, cvtr, is_single) \
768 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
770 CPU_DoubleU farg; \
772 if (is_single) { \
773 float32 tmp = cvtr(arg, &env->fp_status); \
774 farg.d = float32_to_float64(tmp, &env->fp_status); \
775 } else { \
776 farg.d = cvtr(arg, &env->fp_status); \
778 do_float_check_status(env, GETPC()); \
779 return farg.ll; \
782 FPU_FCFI(fcfid, int64_to_float64, 0)
783 FPU_FCFI(fcfids, int64_to_float32, 1)
784 FPU_FCFI(fcfidu, uint64_to_float64, 0)
785 FPU_FCFI(fcfidus, uint64_to_float32, 1)
787 static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
788 int rounding_mode)
790 CPU_DoubleU farg;
792 farg.ll = arg;
794 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
795 /* sNaN round */
796 float_invalid_op_vxsnan(env, GETPC());
797 farg.ll = arg | 0x0008000000000000ULL;
798 } else {
799 int inexact = get_float_exception_flags(&env->fp_status) &
800 float_flag_inexact;
801 set_float_rounding_mode(rounding_mode, &env->fp_status);
802 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
803 /* Restore rounding mode from FPSCR */
804 fpscr_set_rounding_mode(env);
806 /* fri* does not set FPSCR[XX] */
807 if (!inexact) {
808 env->fp_status.float_exception_flags &= ~float_flag_inexact;
811 do_float_check_status(env, GETPC());
812 return farg.ll;
815 uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
817 return do_fri(env, arg, float_round_ties_away);
820 uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
822 return do_fri(env, arg, float_round_to_zero);
825 uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
827 return do_fri(env, arg, float_round_up);
830 uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
832 return do_fri(env, arg, float_round_down);
835 #define FPU_MADDSUB_UPDATE(NAME, TP) \
836 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
837 unsigned int madd_flags, uintptr_t retaddr) \
839 if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
840 TP##_is_signaling_nan(arg2, &env->fp_status) || \
841 TP##_is_signaling_nan(arg3, &env->fp_status)) { \
842 /* sNaN operation */ \
843 float_invalid_op_vxsnan(env, retaddr); \
845 if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
846 (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
847 /* Multiplication of zero by infinity */ \
848 float_invalid_op_vximz(env, 1, retaddr); \
850 if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
851 TP##_is_infinity(arg3)) { \
852 uint8_t aSign, bSign, cSign; \
854 aSign = TP##_is_neg(arg1); \
855 bSign = TP##_is_neg(arg2); \
856 cSign = TP##_is_neg(arg3); \
857 if (madd_flags & float_muladd_negate_c) { \
858 cSign ^= 1; \
860 if (aSign ^ bSign ^ cSign) { \
861 float_invalid_op_vxisi(env, 1, retaddr); \
865 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
866 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
868 #define FPU_FMADD(op, madd_flags) \
869 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
870 uint64_t arg2, uint64_t arg3) \
872 uint32_t flags; \
873 float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
874 &env->fp_status); \
875 flags = get_float_exception_flags(&env->fp_status); \
876 if (flags) { \
877 if (flags & float_flag_invalid) { \
878 float64_maddsub_update_excp(env, arg1, arg2, arg3, \
879 madd_flags, GETPC()); \
881 do_float_check_status(env, GETPC()); \
883 return ret; \
886 #define MADD_FLGS 0
887 #define MSUB_FLGS float_muladd_negate_c
888 #define NMADD_FLGS float_muladd_negate_result
889 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
891 FPU_FMADD(fmadd, MADD_FLGS)
892 FPU_FMADD(fnmadd, NMADD_FLGS)
893 FPU_FMADD(fmsub, MSUB_FLGS)
894 FPU_FMADD(fnmsub, NMSUB_FLGS)
896 /* frsp - frsp. */
897 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
899 CPU_DoubleU farg;
900 float32 f32;
902 farg.ll = arg;
904 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
905 float_invalid_op_vxsnan(env, GETPC());
907 f32 = float64_to_float32(farg.d, &env->fp_status);
908 farg.d = float32_to_float64(f32, &env->fp_status);
910 return farg.ll;
913 /* fsqrt - fsqrt. */
914 float64 helper_fsqrt(CPUPPCState *env, float64 arg)
916 float64 ret = float64_sqrt(arg, &env->fp_status);
917 int status = get_float_exception_flags(&env->fp_status);
919 if (unlikely(status & float_flag_invalid)) {
920 if (unlikely(float64_is_any_nan(arg))) {
921 if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) {
922 /* sNaN square root */
923 float_invalid_op_vxsnan(env, GETPC());
925 } else {
926 /* Square root of a negative nonzero number */
927 float_invalid_op_vxsqrt(env, 1, GETPC());
931 return ret;
934 /* fre - fre. */
935 float64 helper_fre(CPUPPCState *env, float64 arg)
937 /* "Estimate" the reciprocal with actual division. */
938 float64 ret = float64_div(float64_one, arg, &env->fp_status);
939 int status = get_float_exception_flags(&env->fp_status);
941 if (unlikely(status)) {
942 if (status & float_flag_invalid) {
943 if (float64_is_signaling_nan(arg, &env->fp_status)) {
944 /* sNaN reciprocal */
945 float_invalid_op_vxsnan(env, GETPC());
948 if (status & float_flag_divbyzero) {
949 float_zero_divide_excp(env, GETPC());
950 /* For FPSCR.ZE == 0, the result is 1/2. */
951 ret = float64_set_sign(float64_half, float64_is_neg(arg));
955 return ret;
958 /* fres - fres. */
959 uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
961 CPU_DoubleU farg;
962 float32 f32;
964 farg.ll = arg;
966 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
967 /* sNaN reciprocal */
968 float_invalid_op_vxsnan(env, GETPC());
970 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
971 f32 = float64_to_float32(farg.d, &env->fp_status);
972 farg.d = float32_to_float64(f32, &env->fp_status);
974 return farg.ll;
977 /* frsqrte - frsqrte. */
978 float64 helper_frsqrte(CPUPPCState *env, float64 arg)
980 /* "Estimate" the reciprocal with actual division. */
981 float64 rets = float64_sqrt(arg, &env->fp_status);
982 float64 retd = float64_div(float64_one, rets, &env->fp_status);
983 int status = get_float_exception_flags(&env->fp_status);
985 if (unlikely(status)) {
986 if (status & float_flag_invalid) {
987 if (float64_is_signaling_nan(arg, &env->fp_status)) {
988 /* sNaN reciprocal */
989 float_invalid_op_vxsnan(env, GETPC());
990 } else {
991 /* Square root of a negative nonzero number */
992 float_invalid_op_vxsqrt(env, 1, GETPC());
995 if (status & float_flag_divbyzero) {
996 /* Reciprocal of (square root of) zero. */
997 float_zero_divide_excp(env, GETPC());
1001 return retd;
1004 /* fsel - fsel. */
1005 uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1006 uint64_t arg3)
1008 CPU_DoubleU farg1;
1010 farg1.ll = arg1;
1012 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1013 !float64_is_any_nan(farg1.d)) {
1014 return arg2;
1015 } else {
1016 return arg3;
1020 uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
1022 int fe_flag = 0;
1023 int fg_flag = 0;
1025 if (unlikely(float64_is_infinity(fra) ||
1026 float64_is_infinity(frb) ||
1027 float64_is_zero(frb))) {
1028 fe_flag = 1;
1029 fg_flag = 1;
1030 } else {
1031 int e_a = ppc_float64_get_unbiased_exp(fra);
1032 int e_b = ppc_float64_get_unbiased_exp(frb);
1034 if (unlikely(float64_is_any_nan(fra) ||
1035 float64_is_any_nan(frb))) {
1036 fe_flag = 1;
1037 } else if ((e_b <= -1022) || (e_b >= 1021)) {
1038 fe_flag = 1;
1039 } else if (!float64_is_zero(fra) &&
1040 (((e_a - e_b) >= 1023) ||
1041 ((e_a - e_b) <= -1021) ||
1042 (e_a <= -970))) {
1043 fe_flag = 1;
1046 if (unlikely(float64_is_zero_or_denormal(frb))) {
1047 /* XB is not zero because of the above check and */
1048 /* so must be denormalized. */
1049 fg_flag = 1;
1053 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1056 uint32_t helper_ftsqrt(uint64_t frb)
1058 int fe_flag = 0;
1059 int fg_flag = 0;
1061 if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
1062 fe_flag = 1;
1063 fg_flag = 1;
1064 } else {
1065 int e_b = ppc_float64_get_unbiased_exp(frb);
1067 if (unlikely(float64_is_any_nan(frb))) {
1068 fe_flag = 1;
1069 } else if (unlikely(float64_is_zero(frb))) {
1070 fe_flag = 1;
1071 } else if (unlikely(float64_is_neg(frb))) {
1072 fe_flag = 1;
1073 } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
1074 fe_flag = 1;
1077 if (unlikely(float64_is_zero_or_denormal(frb))) {
1078 /* XB is not zero because of the above check and */
1079 /* therefore must be denormalized. */
1080 fg_flag = 1;
1084 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1087 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1088 uint32_t crfD)
1090 CPU_DoubleU farg1, farg2;
1091 uint32_t ret = 0;
1093 farg1.ll = arg1;
1094 farg2.ll = arg2;
1096 if (unlikely(float64_is_any_nan(farg1.d) ||
1097 float64_is_any_nan(farg2.d))) {
1098 ret = 0x01UL;
1099 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1100 ret = 0x08UL;
1101 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1102 ret = 0x04UL;
1103 } else {
1104 ret = 0x02UL;
1107 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1108 env->fpscr |= ret << FPSCR_FPRF;
1109 env->crf[crfD] = ret;
1110 if (unlikely(ret == 0x01UL
1111 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1112 float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1113 /* sNaN comparison */
1114 float_invalid_op_vxsnan(env, GETPC());
1118 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1119 uint32_t crfD)
1121 CPU_DoubleU farg1, farg2;
1122 uint32_t ret = 0;
1124 farg1.ll = arg1;
1125 farg2.ll = arg2;
1127 if (unlikely(float64_is_any_nan(farg1.d) ||
1128 float64_is_any_nan(farg2.d))) {
1129 ret = 0x01UL;
1130 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1131 ret = 0x08UL;
1132 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1133 ret = 0x04UL;
1134 } else {
1135 ret = 0x02UL;
1138 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1139 env->fpscr |= ret << FPSCR_FPRF;
1140 env->crf[crfD] = ret;
1141 if (unlikely(ret == 0x01UL)) {
1142 float_invalid_op_vxvc(env, 1, GETPC());
1143 if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1144 float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1145 /* sNaN comparison */
1146 float_invalid_op_vxsnan(env, GETPC());
1151 /* Single-precision floating-point conversions */
1152 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1154 CPU_FloatU u;
1156 u.f = int32_to_float32(val, &env->vec_status);
1158 return u.l;
1161 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1163 CPU_FloatU u;
1165 u.f = uint32_to_float32(val, &env->vec_status);
1167 return u.l;
1170 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1172 CPU_FloatU u;
1174 u.l = val;
1175 /* NaN are not treated the same way IEEE 754 does */
1176 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1177 return 0;
1180 return float32_to_int32(u.f, &env->vec_status);
1183 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1185 CPU_FloatU u;
1187 u.l = val;
1188 /* NaN are not treated the same way IEEE 754 does */
1189 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1190 return 0;
1193 return float32_to_uint32(u.f, &env->vec_status);
1196 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1198 CPU_FloatU u;
1200 u.l = val;
1201 /* NaN are not treated the same way IEEE 754 does */
1202 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1203 return 0;
1206 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1209 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1211 CPU_FloatU u;
1213 u.l = val;
1214 /* NaN are not treated the same way IEEE 754 does */
1215 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1216 return 0;
1219 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1222 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1224 CPU_FloatU u;
1225 float32 tmp;
1227 u.f = int32_to_float32(val, &env->vec_status);
1228 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1229 u.f = float32_div(u.f, tmp, &env->vec_status);
1231 return u.l;
1234 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1236 CPU_FloatU u;
1237 float32 tmp;
1239 u.f = uint32_to_float32(val, &env->vec_status);
1240 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1241 u.f = float32_div(u.f, tmp, &env->vec_status);
1243 return u.l;
1246 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1248 CPU_FloatU u;
1249 float32 tmp;
1251 u.l = val;
1252 /* NaN are not treated the same way IEEE 754 does */
1253 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1254 return 0;
1256 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1257 u.f = float32_mul(u.f, tmp, &env->vec_status);
1259 return float32_to_int32(u.f, &env->vec_status);
1262 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1264 CPU_FloatU u;
1265 float32 tmp;
1267 u.l = val;
1268 /* NaN are not treated the same way IEEE 754 does */
1269 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1270 return 0;
1272 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1273 u.f = float32_mul(u.f, tmp, &env->vec_status);
1275 return float32_to_uint32(u.f, &env->vec_status);
1278 #define HELPER_SPE_SINGLE_CONV(name) \
1279 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1281 return e##name(env, val); \
1283 /* efscfsi */
1284 HELPER_SPE_SINGLE_CONV(fscfsi);
1285 /* efscfui */
1286 HELPER_SPE_SINGLE_CONV(fscfui);
1287 /* efscfuf */
1288 HELPER_SPE_SINGLE_CONV(fscfuf);
1289 /* efscfsf */
1290 HELPER_SPE_SINGLE_CONV(fscfsf);
1291 /* efsctsi */
1292 HELPER_SPE_SINGLE_CONV(fsctsi);
1293 /* efsctui */
1294 HELPER_SPE_SINGLE_CONV(fsctui);
1295 /* efsctsiz */
1296 HELPER_SPE_SINGLE_CONV(fsctsiz);
1297 /* efsctuiz */
1298 HELPER_SPE_SINGLE_CONV(fsctuiz);
1299 /* efsctsf */
1300 HELPER_SPE_SINGLE_CONV(fsctsf);
1301 /* efsctuf */
1302 HELPER_SPE_SINGLE_CONV(fsctuf);
1304 #define HELPER_SPE_VECTOR_CONV(name) \
1305 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1307 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1308 (uint64_t)e##name(env, val); \
1310 /* evfscfsi */
1311 HELPER_SPE_VECTOR_CONV(fscfsi);
1312 /* evfscfui */
1313 HELPER_SPE_VECTOR_CONV(fscfui);
1314 /* evfscfuf */
1315 HELPER_SPE_VECTOR_CONV(fscfuf);
1316 /* evfscfsf */
1317 HELPER_SPE_VECTOR_CONV(fscfsf);
1318 /* evfsctsi */
1319 HELPER_SPE_VECTOR_CONV(fsctsi);
1320 /* evfsctui */
1321 HELPER_SPE_VECTOR_CONV(fsctui);
1322 /* evfsctsiz */
1323 HELPER_SPE_VECTOR_CONV(fsctsiz);
1324 /* evfsctuiz */
1325 HELPER_SPE_VECTOR_CONV(fsctuiz);
1326 /* evfsctsf */
1327 HELPER_SPE_VECTOR_CONV(fsctsf);
1328 /* evfsctuf */
1329 HELPER_SPE_VECTOR_CONV(fsctuf);
1331 /* Single-precision floating-point arithmetic */
1332 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1334 CPU_FloatU u1, u2;
1336 u1.l = op1;
1337 u2.l = op2;
1338 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1339 return u1.l;
1342 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1344 CPU_FloatU u1, u2;
1346 u1.l = op1;
1347 u2.l = op2;
1348 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1349 return u1.l;
1352 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1354 CPU_FloatU u1, u2;
1356 u1.l = op1;
1357 u2.l = op2;
1358 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1359 return u1.l;
1362 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1364 CPU_FloatU u1, u2;
1366 u1.l = op1;
1367 u2.l = op2;
1368 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1369 return u1.l;
1372 #define HELPER_SPE_SINGLE_ARITH(name) \
1373 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1375 return e##name(env, op1, op2); \
1377 /* efsadd */
1378 HELPER_SPE_SINGLE_ARITH(fsadd);
1379 /* efssub */
1380 HELPER_SPE_SINGLE_ARITH(fssub);
1381 /* efsmul */
1382 HELPER_SPE_SINGLE_ARITH(fsmul);
1383 /* efsdiv */
1384 HELPER_SPE_SINGLE_ARITH(fsdiv);
1386 #define HELPER_SPE_VECTOR_ARITH(name) \
1387 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1389 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1390 (uint64_t)e##name(env, op1, op2); \
1392 /* evfsadd */
1393 HELPER_SPE_VECTOR_ARITH(fsadd);
1394 /* evfssub */
1395 HELPER_SPE_VECTOR_ARITH(fssub);
1396 /* evfsmul */
1397 HELPER_SPE_VECTOR_ARITH(fsmul);
1398 /* evfsdiv */
1399 HELPER_SPE_VECTOR_ARITH(fsdiv);
1401 /* Single-precision floating-point comparisons */
1402 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1404 CPU_FloatU u1, u2;
1406 u1.l = op1;
1407 u2.l = op2;
1408 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1411 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1413 CPU_FloatU u1, u2;
1415 u1.l = op1;
1416 u2.l = op2;
1417 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1420 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1422 CPU_FloatU u1, u2;
1424 u1.l = op1;
1425 u2.l = op2;
1426 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1429 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1431 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1432 return efscmplt(env, op1, op2);
1435 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1437 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1438 return efscmpgt(env, op1, op2);
1441 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1443 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1444 return efscmpeq(env, op1, op2);
1447 #define HELPER_SINGLE_SPE_CMP(name) \
1448 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1450 return e##name(env, op1, op2); \
1452 /* efststlt */
1453 HELPER_SINGLE_SPE_CMP(fststlt);
1454 /* efststgt */
1455 HELPER_SINGLE_SPE_CMP(fststgt);
1456 /* efststeq */
1457 HELPER_SINGLE_SPE_CMP(fststeq);
1458 /* efscmplt */
1459 HELPER_SINGLE_SPE_CMP(fscmplt);
1460 /* efscmpgt */
1461 HELPER_SINGLE_SPE_CMP(fscmpgt);
1462 /* efscmpeq */
1463 HELPER_SINGLE_SPE_CMP(fscmpeq);
1465 static inline uint32_t evcmp_merge(int t0, int t1)
1467 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1470 #define HELPER_VECTOR_SPE_CMP(name) \
1471 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1473 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1474 e##name(env, op1, op2)); \
1476 /* evfststlt */
1477 HELPER_VECTOR_SPE_CMP(fststlt);
1478 /* evfststgt */
1479 HELPER_VECTOR_SPE_CMP(fststgt);
1480 /* evfststeq */
1481 HELPER_VECTOR_SPE_CMP(fststeq);
1482 /* evfscmplt */
1483 HELPER_VECTOR_SPE_CMP(fscmplt);
1484 /* evfscmpgt */
1485 HELPER_VECTOR_SPE_CMP(fscmpgt);
1486 /* evfscmpeq */
1487 HELPER_VECTOR_SPE_CMP(fscmpeq);
1489 /* Double-precision floating-point conversion */
1490 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1492 CPU_DoubleU u;
1494 u.d = int32_to_float64(val, &env->vec_status);
1496 return u.ll;
1499 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1501 CPU_DoubleU u;
1503 u.d = int64_to_float64(val, &env->vec_status);
1505 return u.ll;
1508 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1510 CPU_DoubleU u;
1512 u.d = uint32_to_float64(val, &env->vec_status);
1514 return u.ll;
1517 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1519 CPU_DoubleU u;
1521 u.d = uint64_to_float64(val, &env->vec_status);
1523 return u.ll;
1526 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1528 CPU_DoubleU u;
1530 u.ll = val;
1531 /* NaN are not treated the same way IEEE 754 does */
1532 if (unlikely(float64_is_any_nan(u.d))) {
1533 return 0;
1536 return float64_to_int32(u.d, &env->vec_status);
1539 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1541 CPU_DoubleU u;
1543 u.ll = val;
1544 /* NaN are not treated the same way IEEE 754 does */
1545 if (unlikely(float64_is_any_nan(u.d))) {
1546 return 0;
1549 return float64_to_uint32(u.d, &env->vec_status);
1552 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1554 CPU_DoubleU u;
1556 u.ll = val;
1557 /* NaN are not treated the same way IEEE 754 does */
1558 if (unlikely(float64_is_any_nan(u.d))) {
1559 return 0;
1562 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1565 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1567 CPU_DoubleU u;
1569 u.ll = val;
1570 /* NaN are not treated the same way IEEE 754 does */
1571 if (unlikely(float64_is_any_nan(u.d))) {
1572 return 0;
1575 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1578 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1580 CPU_DoubleU u;
1582 u.ll = val;
1583 /* NaN are not treated the same way IEEE 754 does */
1584 if (unlikely(float64_is_any_nan(u.d))) {
1585 return 0;
1588 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1591 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1593 CPU_DoubleU u;
1595 u.ll = val;
1596 /* NaN are not treated the same way IEEE 754 does */
1597 if (unlikely(float64_is_any_nan(u.d))) {
1598 return 0;
1601 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1604 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1606 CPU_DoubleU u;
1607 float64 tmp;
1609 u.d = int32_to_float64(val, &env->vec_status);
1610 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1611 u.d = float64_div(u.d, tmp, &env->vec_status);
1613 return u.ll;
1616 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1618 CPU_DoubleU u;
1619 float64 tmp;
1621 u.d = uint32_to_float64(val, &env->vec_status);
1622 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1623 u.d = float64_div(u.d, tmp, &env->vec_status);
1625 return u.ll;
1628 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1630 CPU_DoubleU u;
1631 float64 tmp;
1633 u.ll = val;
1634 /* NaN are not treated the same way IEEE 754 does */
1635 if (unlikely(float64_is_any_nan(u.d))) {
1636 return 0;
1638 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1639 u.d = float64_mul(u.d, tmp, &env->vec_status);
1641 return float64_to_int32(u.d, &env->vec_status);
1644 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1646 CPU_DoubleU u;
1647 float64 tmp;
1649 u.ll = val;
1650 /* NaN are not treated the same way IEEE 754 does */
1651 if (unlikely(float64_is_any_nan(u.d))) {
1652 return 0;
1654 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1655 u.d = float64_mul(u.d, tmp, &env->vec_status);
1657 return float64_to_uint32(u.d, &env->vec_status);
1660 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1662 CPU_DoubleU u1;
1663 CPU_FloatU u2;
1665 u1.ll = val;
1666 u2.f = float64_to_float32(u1.d, &env->vec_status);
1668 return u2.l;
1671 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1673 CPU_DoubleU u2;
1674 CPU_FloatU u1;
1676 u1.l = val;
1677 u2.d = float32_to_float64(u1.f, &env->vec_status);
1679 return u2.ll;
1682 /* Double precision fixed-point arithmetic */
1683 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1685 CPU_DoubleU u1, u2;
1687 u1.ll = op1;
1688 u2.ll = op2;
1689 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1690 return u1.ll;
1693 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1695 CPU_DoubleU u1, u2;
1697 u1.ll = op1;
1698 u2.ll = op2;
1699 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1700 return u1.ll;
1703 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1705 CPU_DoubleU u1, u2;
1707 u1.ll = op1;
1708 u2.ll = op2;
1709 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1710 return u1.ll;
1713 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1715 CPU_DoubleU u1, u2;
1717 u1.ll = op1;
1718 u2.ll = op2;
1719 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1720 return u1.ll;
1723 /* Double precision floating point helpers */
1724 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1726 CPU_DoubleU u1, u2;
1728 u1.ll = op1;
1729 u2.ll = op2;
1730 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1733 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1735 CPU_DoubleU u1, u2;
1737 u1.ll = op1;
1738 u2.ll = op2;
1739 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1742 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1744 CPU_DoubleU u1, u2;
1746 u1.ll = op1;
1747 u2.ll = op2;
1748 return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1751 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1753 /* XXX: TODO: test special values (NaN, infinites, ...) */
1754 return helper_efdtstlt(env, op1, op2);
1757 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1759 /* XXX: TODO: test special values (NaN, infinites, ...) */
1760 return helper_efdtstgt(env, op1, op2);
1763 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1765 /* XXX: TODO: test special values (NaN, infinites, ...) */
1766 return helper_efdtsteq(env, op1, op2);
1769 #define float64_to_float64(x, env) x
1772 /* VSX_ADD_SUB - VSX floating point add/subract
1773 * name - instruction mnemonic
1774 * op - operation (add or sub)
1775 * nels - number of elements (1, 2 or 4)
1776 * tp - type (float32 or float64)
1777 * fld - vsr_t field (VsrD(*) or VsrW(*))
1778 * sfprf - set FPRF
1780 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1781 void helper_##name(CPUPPCState *env, uint32_t opcode) \
1783 ppc_vsr_t xt, xa, xb; \
1784 int i; \
1786 getVSR(xA(opcode), &xa, env); \
1787 getVSR(xB(opcode), &xb, env); \
1788 getVSR(xT(opcode), &xt, env); \
1789 helper_reset_fpstatus(env); \
1791 for (i = 0; i < nels; i++) { \
1792 float_status tstat = env->fp_status; \
1793 set_float_exception_flags(0, &tstat); \
1794 xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
1795 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1797 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1798 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1799 float_invalid_op_vxisi(env, sfprf, GETPC()); \
1800 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1801 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1802 float_invalid_op_vxsnan(env, GETPC()); \
1806 if (r2sp) { \
1807 xt.fld = helper_frsp(env, xt.fld); \
1810 if (sfprf) { \
1811 helper_compute_fprf_float64(env, xt.fld); \
1814 putVSR(xT(opcode), &xt, env); \
1815 do_float_check_status(env, GETPC()); \
1818 VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1819 VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1820 VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1821 VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1822 VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1823 VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1824 VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1825 VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1827 void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
1829 ppc_vsr_t xt, xa, xb;
1830 float_status tstat;
1832 getVSR(rA(opcode) + 32, &xa, env);
1833 getVSR(rB(opcode) + 32, &xb, env);
1834 getVSR(rD(opcode) + 32, &xt, env);
1835 helper_reset_fpstatus(env);
1837 tstat = env->fp_status;
1838 if (unlikely(Rc(opcode) != 0)) {
1839 tstat.float_rounding_mode = float_round_to_odd;
1842 set_float_exception_flags(0, &tstat);
1843 xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
1844 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1846 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1847 if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1848 float_invalid_op_vxisi(env, 1, GETPC());
1849 } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1850 float128_is_signaling_nan(xb.f128, &tstat)) {
1851 float_invalid_op_vxsnan(env, GETPC());
1855 helper_compute_fprf_float128(env, xt.f128);
1857 putVSR(rD(opcode) + 32, &xt, env);
1858 do_float_check_status(env, GETPC());
1861 /* VSX_MUL - VSX floating point multiply
1862 * op - instruction mnemonic
1863 * nels - number of elements (1, 2 or 4)
1864 * tp - type (float32 or float64)
1865 * fld - vsr_t field (VsrD(*) or VsrW(*))
1866 * sfprf - set FPRF
1868 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1869 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1871 ppc_vsr_t xt, xa, xb; \
1872 int i; \
1874 getVSR(xA(opcode), &xa, env); \
1875 getVSR(xB(opcode), &xb, env); \
1876 getVSR(xT(opcode), &xt, env); \
1877 helper_reset_fpstatus(env); \
1879 for (i = 0; i < nels; i++) { \
1880 float_status tstat = env->fp_status; \
1881 set_float_exception_flags(0, &tstat); \
1882 xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
1883 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1885 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1886 if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
1887 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
1888 float_invalid_op_vximz(env, sfprf, GETPC()); \
1889 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1890 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1891 float_invalid_op_vxsnan(env, GETPC()); \
1895 if (r2sp) { \
1896 xt.fld = helper_frsp(env, xt.fld); \
1899 if (sfprf) { \
1900 helper_compute_fprf_float64(env, xt.fld); \
1904 putVSR(xT(opcode), &xt, env); \
1905 do_float_check_status(env, GETPC()); \
1908 VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1909 VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1910 VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1911 VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1913 void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
1915 ppc_vsr_t xt, xa, xb;
1916 float_status tstat;
1918 getVSR(rA(opcode) + 32, &xa, env);
1919 getVSR(rB(opcode) + 32, &xb, env);
1920 getVSR(rD(opcode) + 32, &xt, env);
1922 helper_reset_fpstatus(env);
1923 tstat = env->fp_status;
1924 if (unlikely(Rc(opcode) != 0)) {
1925 tstat.float_rounding_mode = float_round_to_odd;
1928 set_float_exception_flags(0, &tstat);
1929 xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
1930 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1932 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1933 if ((float128_is_infinity(xa.f128) && float128_is_zero(xb.f128)) ||
1934 (float128_is_infinity(xb.f128) && float128_is_zero(xa.f128))) {
1935 float_invalid_op_vximz(env, 1, GETPC());
1936 } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1937 float128_is_signaling_nan(xb.f128, &tstat)) {
1938 float_invalid_op_vxsnan(env, GETPC());
1941 helper_compute_fprf_float128(env, xt.f128);
1943 putVSR(rD(opcode) + 32, &xt, env);
1944 do_float_check_status(env, GETPC());
1947 /* VSX_DIV - VSX floating point divide
1948 * op - instruction mnemonic
1949 * nels - number of elements (1, 2 or 4)
1950 * tp - type (float32 or float64)
1951 * fld - vsr_t field (VsrD(*) or VsrW(*))
1952 * sfprf - set FPRF
1954 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1955 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1957 ppc_vsr_t xt, xa, xb; \
1958 int i; \
1960 getVSR(xA(opcode), &xa, env); \
1961 getVSR(xB(opcode), &xb, env); \
1962 getVSR(xT(opcode), &xt, env); \
1963 helper_reset_fpstatus(env); \
1965 for (i = 0; i < nels; i++) { \
1966 float_status tstat = env->fp_status; \
1967 set_float_exception_flags(0, &tstat); \
1968 xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
1969 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1971 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1972 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1973 float_invalid_op_vxidi(env, sfprf, GETPC()); \
1974 } else if (tp##_is_zero(xa.fld) && tp##_is_zero(xb.fld)) { \
1975 float_invalid_op_vxzdz(env, sfprf, GETPC()); \
1976 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1977 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1978 float_invalid_op_vxsnan(env, GETPC()); \
1981 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1982 float_zero_divide_excp(env, GETPC()); \
1985 if (r2sp) { \
1986 xt.fld = helper_frsp(env, xt.fld); \
1989 if (sfprf) { \
1990 helper_compute_fprf_float64(env, xt.fld); \
1994 putVSR(xT(opcode), &xt, env); \
1995 do_float_check_status(env, GETPC()); \
1998 VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1999 VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
2000 VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
2001 VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
2003 void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
2005 ppc_vsr_t xt, xa, xb;
2006 float_status tstat;
2008 getVSR(rA(opcode) + 32, &xa, env);
2009 getVSR(rB(opcode) + 32, &xb, env);
2010 getVSR(rD(opcode) + 32, &xt, env);
2012 helper_reset_fpstatus(env);
2013 tstat = env->fp_status;
2014 if (unlikely(Rc(opcode) != 0)) {
2015 tstat.float_rounding_mode = float_round_to_odd;
2018 set_float_exception_flags(0, &tstat);
2019 xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
2020 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2022 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
2023 if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
2024 float_invalid_op_vxidi(env, 1, GETPC());
2025 } else if (float128_is_zero(xa.f128) && float128_is_zero(xb.f128)) {
2026 float_invalid_op_vxzdz(env, 1, GETPC());
2027 } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
2028 float128_is_signaling_nan(xb.f128, &tstat)) {
2029 float_invalid_op_vxsnan(env, GETPC());
2032 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
2033 float_zero_divide_excp(env, GETPC());
2036 helper_compute_fprf_float128(env, xt.f128);
2037 putVSR(rD(opcode) + 32, &xt, env);
2038 do_float_check_status(env, GETPC());
2041 /* VSX_RE - VSX floating point reciprocal estimate
2042 * op - instruction mnemonic
2043 * nels - number of elements (1, 2 or 4)
2044 * tp - type (float32 or float64)
2045 * fld - vsr_t field (VsrD(*) or VsrW(*))
2046 * sfprf - set FPRF
2048 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
2049 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2051 ppc_vsr_t xt, xb; \
2052 int i; \
2054 getVSR(xB(opcode), &xb, env); \
2055 getVSR(xT(opcode), &xt, env); \
2056 helper_reset_fpstatus(env); \
2058 for (i = 0; i < nels; i++) { \
2059 if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2060 float_invalid_op_vxsnan(env, GETPC()); \
2062 xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
2064 if (r2sp) { \
2065 xt.fld = helper_frsp(env, xt.fld); \
2068 if (sfprf) { \
2069 helper_compute_fprf_float64(env, xt.fld); \
2073 putVSR(xT(opcode), &xt, env); \
2074 do_float_check_status(env, GETPC()); \
2077 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2078 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2079 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2080 VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2082 /* VSX_SQRT - VSX floating point square root
2083 * op - instruction mnemonic
2084 * nels - number of elements (1, 2 or 4)
2085 * tp - type (float32 or float64)
2086 * fld - vsr_t field (VsrD(*) or VsrW(*))
2087 * sfprf - set FPRF
2089 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
2090 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2092 ppc_vsr_t xt, xb; \
2093 int i; \
2095 getVSR(xB(opcode), &xb, env); \
2096 getVSR(xT(opcode), &xt, env); \
2097 helper_reset_fpstatus(env); \
2099 for (i = 0; i < nels; i++) { \
2100 float_status tstat = env->fp_status; \
2101 set_float_exception_flags(0, &tstat); \
2102 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2103 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2105 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2106 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2107 float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
2108 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2109 float_invalid_op_vxsnan(env, GETPC()); \
2113 if (r2sp) { \
2114 xt.fld = helper_frsp(env, xt.fld); \
2117 if (sfprf) { \
2118 helper_compute_fprf_float64(env, xt.fld); \
2122 putVSR(xT(opcode), &xt, env); \
2123 do_float_check_status(env, GETPC()); \
2126 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2127 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2128 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2129 VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2131 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2132 * op - instruction mnemonic
2133 * nels - number of elements (1, 2 or 4)
2134 * tp - type (float32 or float64)
2135 * fld - vsr_t field (VsrD(*) or VsrW(*))
2136 * sfprf - set FPRF
2138 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2139 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2141 ppc_vsr_t xt, xb; \
2142 int i; \
2144 getVSR(xB(opcode), &xb, env); \
2145 getVSR(xT(opcode), &xt, env); \
2146 helper_reset_fpstatus(env); \
2148 for (i = 0; i < nels; i++) { \
2149 float_status tstat = env->fp_status; \
2150 set_float_exception_flags(0, &tstat); \
2151 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2152 xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
2153 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2155 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2156 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2157 float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
2158 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2159 float_invalid_op_vxsnan(env, GETPC()); \
2163 if (r2sp) { \
2164 xt.fld = helper_frsp(env, xt.fld); \
2167 if (sfprf) { \
2168 helper_compute_fprf_float64(env, xt.fld); \
2172 putVSR(xT(opcode), &xt, env); \
2173 do_float_check_status(env, GETPC()); \
2176 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2177 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2178 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2179 VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2181 /* VSX_TDIV - VSX floating point test for divide
2182 * op - instruction mnemonic
2183 * nels - number of elements (1, 2 or 4)
2184 * tp - type (float32 or float64)
2185 * fld - vsr_t field (VsrD(*) or VsrW(*))
2186 * emin - minimum unbiased exponent
2187 * emax - maximum unbiased exponent
2188 * nbits - number of fraction bits
2190 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2191 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2193 ppc_vsr_t xa, xb; \
2194 int i; \
2195 int fe_flag = 0; \
2196 int fg_flag = 0; \
2198 getVSR(xA(opcode), &xa, env); \
2199 getVSR(xB(opcode), &xb, env); \
2201 for (i = 0; i < nels; i++) { \
2202 if (unlikely(tp##_is_infinity(xa.fld) || \
2203 tp##_is_infinity(xb.fld) || \
2204 tp##_is_zero(xb.fld))) { \
2205 fe_flag = 1; \
2206 fg_flag = 1; \
2207 } else { \
2208 int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
2209 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2211 if (unlikely(tp##_is_any_nan(xa.fld) || \
2212 tp##_is_any_nan(xb.fld))) { \
2213 fe_flag = 1; \
2214 } else if ((e_b <= emin) || (e_b >= (emax-2))) { \
2215 fe_flag = 1; \
2216 } else if (!tp##_is_zero(xa.fld) && \
2217 (((e_a - e_b) >= emax) || \
2218 ((e_a - e_b) <= (emin+1)) || \
2219 (e_a <= (emin+nbits)))) { \
2220 fe_flag = 1; \
2223 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2224 /* XB is not zero because of the above check and */ \
2225 /* so must be denormalized. */ \
2226 fg_flag = 1; \
2231 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2234 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2235 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2236 VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2238 /* VSX_TSQRT - VSX floating point test for square root
2239 * op - instruction mnemonic
2240 * nels - number of elements (1, 2 or 4)
2241 * tp - type (float32 or float64)
2242 * fld - vsr_t field (VsrD(*) or VsrW(*))
2243 * emin - minimum unbiased exponent
2244 * emax - maximum unbiased exponent
2245 * nbits - number of fraction bits
2247 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2248 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2250 ppc_vsr_t xa, xb; \
2251 int i; \
2252 int fe_flag = 0; \
2253 int fg_flag = 0; \
2255 getVSR(xA(opcode), &xa, env); \
2256 getVSR(xB(opcode), &xb, env); \
2258 for (i = 0; i < nels; i++) { \
2259 if (unlikely(tp##_is_infinity(xb.fld) || \
2260 tp##_is_zero(xb.fld))) { \
2261 fe_flag = 1; \
2262 fg_flag = 1; \
2263 } else { \
2264 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2266 if (unlikely(tp##_is_any_nan(xb.fld))) { \
2267 fe_flag = 1; \
2268 } else if (unlikely(tp##_is_zero(xb.fld))) { \
2269 fe_flag = 1; \
2270 } else if (unlikely(tp##_is_neg(xb.fld))) { \
2271 fe_flag = 1; \
2272 } else if (!tp##_is_zero(xb.fld) && \
2273 (e_b <= (emin+nbits))) { \
2274 fe_flag = 1; \
2277 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2278 /* XB is not zero because of the above check and */ \
2279 /* therefore must be denormalized. */ \
2280 fg_flag = 1; \
2285 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2288 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2289 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2290 VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2292 /* VSX_MADD - VSX floating point muliply/add variations
2293 * op - instruction mnemonic
2294 * nels - number of elements (1, 2 or 4)
2295 * tp - type (float32 or float64)
2296 * fld - vsr_t field (VsrD(*) or VsrW(*))
2297 * maddflgs - flags for the float*muladd routine that control the
2298 * various forms (madd, msub, nmadd, nmsub)
2299 * afrm - A form (1=A, 0=M)
2300 * sfprf - set FPRF
2302 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
2303 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2305 ppc_vsr_t xt_in, xa, xb, xt_out; \
2306 ppc_vsr_t *b, *c; \
2307 int i; \
2309 if (afrm) { /* AxB + T */ \
2310 b = &xb; \
2311 c = &xt_in; \
2312 } else { /* AxT + B */ \
2313 b = &xt_in; \
2314 c = &xb; \
2317 getVSR(xA(opcode), &xa, env); \
2318 getVSR(xB(opcode), &xb, env); \
2319 getVSR(xT(opcode), &xt_in, env); \
2321 xt_out = xt_in; \
2323 helper_reset_fpstatus(env); \
2325 for (i = 0; i < nels; i++) { \
2326 float_status tstat = env->fp_status; \
2327 set_float_exception_flags(0, &tstat); \
2328 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2329 /* Avoid double rounding errors by rounding the intermediate */ \
2330 /* result to odd. */ \
2331 set_float_rounding_mode(float_round_to_zero, &tstat); \
2332 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2333 maddflgs, &tstat); \
2334 xt_out.fld |= (get_float_exception_flags(&tstat) & \
2335 float_flag_inexact) != 0; \
2336 } else { \
2337 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2338 maddflgs, &tstat); \
2340 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2342 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2343 tp##_maddsub_update_excp(env, xa.fld, b->fld, \
2344 c->fld, maddflgs, GETPC()); \
2347 if (r2sp) { \
2348 xt_out.fld = helper_frsp(env, xt_out.fld); \
2351 if (sfprf) { \
2352 helper_compute_fprf_float64(env, xt_out.fld); \
2355 putVSR(xT(opcode), &xt_out, env); \
2356 do_float_check_status(env, GETPC()); \
2359 VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2360 VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2361 VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2362 VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2363 VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2364 VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2365 VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2366 VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2368 VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2369 VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2370 VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2371 VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2372 VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2373 VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2374 VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2375 VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2377 VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2378 VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2379 VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2380 VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2381 VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2382 VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2383 VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2384 VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2386 VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2387 VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2388 VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2389 VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2390 VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2391 VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2392 VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2393 VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2395 /* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2396 * op - instruction mnemonic
2397 * cmp - comparison operation
2398 * exp - expected result of comparison
2399 * svxvc - set VXVC bit
2401 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2402 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2404 ppc_vsr_t xt, xa, xb; \
2405 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2407 getVSR(xA(opcode), &xa, env); \
2408 getVSR(xB(opcode), &xb, env); \
2409 getVSR(xT(opcode), &xt, env); \
2411 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2412 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2413 vxsnan_flag = true; \
2414 if (fpscr_ve == 0 && svxvc) { \
2415 vxvc_flag = true; \
2417 } else if (svxvc) { \
2418 vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2419 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
2421 if (vxsnan_flag) { \
2422 float_invalid_op_vxsnan(env, GETPC()); \
2424 if (vxvc_flag) { \
2425 float_invalid_op_vxvc(env, 0, GETPC()); \
2427 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2429 if (!vex_flag) { \
2430 if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
2431 xt.VsrD(0) = -1; \
2432 xt.VsrD(1) = 0; \
2433 } else { \
2434 xt.VsrD(0) = 0; \
2435 xt.VsrD(1) = 0; \
2438 putVSR(xT(opcode), &xt, env); \
2439 do_float_check_status(env, GETPC()); \
2442 VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2443 VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2444 VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2445 VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2447 void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
2449 ppc_vsr_t xa, xb;
2450 int64_t exp_a, exp_b;
2451 uint32_t cc;
2453 getVSR(xA(opcode), &xa, env);
2454 getVSR(xB(opcode), &xb, env);
2456 exp_a = extract64(xa.VsrD(0), 52, 11);
2457 exp_b = extract64(xb.VsrD(0), 52, 11);
2459 if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||
2460 float64_is_any_nan(xb.VsrD(0)))) {
2461 cc = CRF_SO;
2462 } else {
2463 if (exp_a < exp_b) {
2464 cc = CRF_LT;
2465 } else if (exp_a > exp_b) {
2466 cc = CRF_GT;
2467 } else {
2468 cc = CRF_EQ;
2472 env->fpscr &= ~(0x0F << FPSCR_FPRF);
2473 env->fpscr |= cc << FPSCR_FPRF;
2474 env->crf[BF(opcode)] = cc;
2476 do_float_check_status(env, GETPC());
2479 void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
2481 ppc_vsr_t xa, xb;
2482 int64_t exp_a, exp_b;
2483 uint32_t cc;
2485 getVSR(rA(opcode) + 32, &xa, env);
2486 getVSR(rB(opcode) + 32, &xb, env);
2488 exp_a = extract64(xa.VsrD(0), 48, 15);
2489 exp_b = extract64(xb.VsrD(0), 48, 15);
2491 if (unlikely(float128_is_any_nan(xa.f128) ||
2492 float128_is_any_nan(xb.f128))) {
2493 cc = CRF_SO;
2494 } else {
2495 if (exp_a < exp_b) {
2496 cc = CRF_LT;
2497 } else if (exp_a > exp_b) {
2498 cc = CRF_GT;
2499 } else {
2500 cc = CRF_EQ;
2504 env->fpscr &= ~(0x0F << FPSCR_FPRF);
2505 env->fpscr |= cc << FPSCR_FPRF;
2506 env->crf[BF(opcode)] = cc;
2508 do_float_check_status(env, GETPC());
2511 #define VSX_SCALAR_CMP(op, ordered) \
2512 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2514 ppc_vsr_t xa, xb; \
2515 uint32_t cc = 0; \
2516 bool vxsnan_flag = false, vxvc_flag = false; \
2518 helper_reset_fpstatus(env); \
2519 getVSR(xA(opcode), &xa, env); \
2520 getVSR(xB(opcode), &xb, env); \
2522 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2523 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2524 vxsnan_flag = true; \
2525 cc = CRF_SO; \
2526 if (fpscr_ve == 0 && ordered) { \
2527 vxvc_flag = true; \
2529 } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2530 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) { \
2531 cc = CRF_SO; \
2532 if (ordered) { \
2533 vxvc_flag = true; \
2536 if (vxsnan_flag) { \
2537 float_invalid_op_vxsnan(env, GETPC()); \
2539 if (vxvc_flag) { \
2540 float_invalid_op_vxvc(env, 0, GETPC()); \
2543 if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2544 cc |= CRF_LT; \
2545 } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2546 cc |= CRF_GT; \
2547 } else { \
2548 cc |= CRF_EQ; \
2551 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2552 env->fpscr |= cc << FPSCR_FPRF; \
2553 env->crf[BF(opcode)] = cc; \
2555 do_float_check_status(env, GETPC()); \
2558 VSX_SCALAR_CMP(xscmpodp, 1)
2559 VSX_SCALAR_CMP(xscmpudp, 0)
2561 #define VSX_SCALAR_CMPQ(op, ordered) \
2562 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2564 ppc_vsr_t xa, xb; \
2565 uint32_t cc = 0; \
2566 bool vxsnan_flag = false, vxvc_flag = false; \
2568 helper_reset_fpstatus(env); \
2569 getVSR(rA(opcode) + 32, &xa, env); \
2570 getVSR(rB(opcode) + 32, &xb, env); \
2572 if (float128_is_signaling_nan(xa.f128, &env->fp_status) || \
2573 float128_is_signaling_nan(xb.f128, &env->fp_status)) { \
2574 vxsnan_flag = true; \
2575 cc = CRF_SO; \
2576 if (fpscr_ve == 0 && ordered) { \
2577 vxvc_flag = true; \
2579 } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) || \
2580 float128_is_quiet_nan(xb.f128, &env->fp_status)) { \
2581 cc = CRF_SO; \
2582 if (ordered) { \
2583 vxvc_flag = true; \
2586 if (vxsnan_flag) { \
2587 float_invalid_op_vxsnan(env, GETPC()); \
2589 if (vxvc_flag) { \
2590 float_invalid_op_vxvc(env, 0, GETPC()); \
2593 if (float128_lt(xa.f128, xb.f128, &env->fp_status)) { \
2594 cc |= CRF_LT; \
2595 } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) { \
2596 cc |= CRF_GT; \
2597 } else { \
2598 cc |= CRF_EQ; \
2601 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2602 env->fpscr |= cc << FPSCR_FPRF; \
2603 env->crf[BF(opcode)] = cc; \
2605 do_float_check_status(env, GETPC()); \
2608 VSX_SCALAR_CMPQ(xscmpoqp, 1)
2609 VSX_SCALAR_CMPQ(xscmpuqp, 0)
2611 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2612 * name - instruction mnemonic
2613 * op - operation (max or min)
2614 * nels - number of elements (1, 2 or 4)
2615 * tp - type (float32 or float64)
2616 * fld - vsr_t field (VsrD(*) or VsrW(*))
2618 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2619 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2621 ppc_vsr_t xt, xa, xb; \
2622 int i; \
2624 getVSR(xA(opcode), &xa, env); \
2625 getVSR(xB(opcode), &xb, env); \
2626 getVSR(xT(opcode), &xt, env); \
2628 for (i = 0; i < nels; i++) { \
2629 xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
2630 if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2631 tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2632 float_invalid_op_vxsnan(env, GETPC()); \
2636 putVSR(xT(opcode), &xt, env); \
2637 do_float_check_status(env, GETPC()); \
2640 VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2641 VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2642 VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2643 VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2644 VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2645 VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2647 #define VSX_MAX_MINC(name, max) \
2648 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2650 ppc_vsr_t xt, xa, xb; \
2651 bool vxsnan_flag = false, vex_flag = false; \
2653 getVSR(rA(opcode) + 32, &xa, env); \
2654 getVSR(rB(opcode) + 32, &xb, env); \
2655 getVSR(rD(opcode) + 32, &xt, env); \
2657 if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
2658 float64_is_any_nan(xb.VsrD(0)))) { \
2659 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2660 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2661 vxsnan_flag = true; \
2663 xt.VsrD(0) = xb.VsrD(0); \
2664 } else if ((max && \
2665 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2666 (!max && \
2667 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2668 xt.VsrD(0) = xa.VsrD(0); \
2669 } else { \
2670 xt.VsrD(0) = xb.VsrD(0); \
2673 vex_flag = fpscr_ve & vxsnan_flag; \
2674 if (vxsnan_flag) { \
2675 float_invalid_op_vxsnan(env, GETPC()); \
2677 if (!vex_flag) { \
2678 putVSR(rD(opcode) + 32, &xt, env); \
2682 VSX_MAX_MINC(xsmaxcdp, 1);
2683 VSX_MAX_MINC(xsmincdp, 0);
2685 #define VSX_MAX_MINJ(name, max) \
2686 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2688 ppc_vsr_t xt, xa, xb; \
2689 bool vxsnan_flag = false, vex_flag = false; \
2691 getVSR(rA(opcode) + 32, &xa, env); \
2692 getVSR(rB(opcode) + 32, &xb, env); \
2693 getVSR(rD(opcode) + 32, &xt, env); \
2695 if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \
2696 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \
2697 vxsnan_flag = true; \
2699 xt.VsrD(0) = xa.VsrD(0); \
2700 } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \
2701 if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2702 vxsnan_flag = true; \
2704 xt.VsrD(0) = xb.VsrD(0); \
2705 } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \
2706 if (max) { \
2707 if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2708 xt.VsrD(0) = 0ULL; \
2709 } else { \
2710 xt.VsrD(0) = 0x8000000000000000ULL; \
2712 } else { \
2713 if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \
2714 xt.VsrD(0) = 0x8000000000000000ULL; \
2715 } else { \
2716 xt.VsrD(0) = 0ULL; \
2719 } else if ((max && \
2720 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2721 (!max && \
2722 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2723 xt.VsrD(0) = xa.VsrD(0); \
2724 } else { \
2725 xt.VsrD(0) = xb.VsrD(0); \
2728 vex_flag = fpscr_ve & vxsnan_flag; \
2729 if (vxsnan_flag) { \
2730 float_invalid_op_vxsnan(env, GETPC()); \
2732 if (!vex_flag) { \
2733 putVSR(rD(opcode) + 32, &xt, env); \
2737 VSX_MAX_MINJ(xsmaxjdp, 1);
2738 VSX_MAX_MINJ(xsminjdp, 0);
2740 /* VSX_CMP - VSX floating point compare
2741 * op - instruction mnemonic
2742 * nels - number of elements (1, 2 or 4)
2743 * tp - type (float32 or float64)
2744 * fld - vsr_t field (VsrD(*) or VsrW(*))
2745 * cmp - comparison operation
2746 * svxvc - set VXVC bit
2747 * exp - expected result of comparison
2749 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2750 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2752 ppc_vsr_t xt, xa, xb; \
2753 int i; \
2754 int all_true = 1; \
2755 int all_false = 1; \
2757 getVSR(xA(opcode), &xa, env); \
2758 getVSR(xB(opcode), &xb, env); \
2759 getVSR(xT(opcode), &xt, env); \
2761 for (i = 0; i < nels; i++) { \
2762 if (unlikely(tp##_is_any_nan(xa.fld) || \
2763 tp##_is_any_nan(xb.fld))) { \
2764 if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2765 tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
2766 float_invalid_op_vxsnan(env, GETPC()); \
2768 if (svxvc) { \
2769 float_invalid_op_vxvc(env, 0, GETPC()); \
2771 xt.fld = 0; \
2772 all_true = 0; \
2773 } else { \
2774 if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
2775 xt.fld = -1; \
2776 all_false = 0; \
2777 } else { \
2778 xt.fld = 0; \
2779 all_true = 0; \
2784 putVSR(xT(opcode), &xt, env); \
2785 if ((opcode >> (31-21)) & 1) { \
2786 env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2788 do_float_check_status(env, GETPC()); \
2791 VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2792 VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2793 VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2794 VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2795 VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2796 VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2797 VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2798 VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2800 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2801 * op - instruction mnemonic
2802 * nels - number of elements (1, 2 or 4)
2803 * stp - source type (float32 or float64)
2804 * ttp - target type (float32 or float64)
2805 * sfld - source vsr_t field
2806 * tfld - target vsr_t field (f32 or f64)
2807 * sfprf - set FPRF
2809 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2810 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2812 ppc_vsr_t xt, xb; \
2813 int i; \
2815 getVSR(xB(opcode), &xb, env); \
2816 getVSR(xT(opcode), &xt, env); \
2818 for (i = 0; i < nels; i++) { \
2819 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2820 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2821 &env->fp_status))) { \
2822 float_invalid_op_vxsnan(env, GETPC()); \
2823 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2825 if (sfprf) { \
2826 helper_compute_fprf_##ttp(env, xt.tfld); \
2830 putVSR(xT(opcode), &xt, env); \
2831 do_float_check_status(env, GETPC()); \
2834 VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2835 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2836 VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2837 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2839 /* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2840 * op - instruction mnemonic
2841 * nels - number of elements (1, 2 or 4)
2842 * stp - source type (float32 or float64)
2843 * ttp - target type (float32 or float64)
2844 * sfld - source vsr_t field
2845 * tfld - target vsr_t field (f32 or f64)
2846 * sfprf - set FPRF
2848 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2849 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2851 ppc_vsr_t xt, xb; \
2852 int i; \
2854 getVSR(rB(opcode) + 32, &xb, env); \
2855 getVSR(rD(opcode) + 32, &xt, env); \
2857 for (i = 0; i < nels; i++) { \
2858 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2859 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2860 &env->fp_status))) { \
2861 float_invalid_op_vxsnan(env, GETPC()); \
2862 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2864 if (sfprf) { \
2865 helper_compute_fprf_##ttp(env, xt.tfld); \
2869 putVSR(rD(opcode) + 32, &xt, env); \
2870 do_float_check_status(env, GETPC()); \
2873 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2875 /* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2876 * involving one half precision value
2877 * op - instruction mnemonic
2878 * nels - number of elements (1, 2 or 4)
2879 * stp - source type
2880 * ttp - target type
2881 * sfld - source vsr_t field
2882 * tfld - target vsr_t field
2883 * sfprf - set FPRF
2885 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2886 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2888 ppc_vsr_t xt, xb; \
2889 int i; \
2891 getVSR(xB(opcode), &xb, env); \
2892 memset(&xt, 0, sizeof(xt)); \
2894 for (i = 0; i < nels; i++) { \
2895 xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status); \
2896 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2897 &env->fp_status))) { \
2898 float_invalid_op_vxsnan(env, GETPC()); \
2899 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2901 if (sfprf) { \
2902 helper_compute_fprf_##ttp(env, xt.tfld); \
2906 putVSR(xT(opcode), &xt, env); \
2907 do_float_check_status(env, GETPC()); \
2910 VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2911 VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2912 VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0)
2913 VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2916 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2917 * added to this later.
2919 void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
2921 ppc_vsr_t xt, xb;
2922 float_status tstat;
2924 getVSR(rB(opcode) + 32, &xb, env);
2925 memset(&xt, 0, sizeof(xt));
2927 tstat = env->fp_status;
2928 if (unlikely(Rc(opcode) != 0)) {
2929 tstat.float_rounding_mode = float_round_to_odd;
2932 xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
2933 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2934 if (unlikely(float128_is_signaling_nan(xb.f128, &tstat))) {
2935 float_invalid_op_vxsnan(env, GETPC());
2936 xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
2938 helper_compute_fprf_float64(env, xt.VsrD(0));
2940 putVSR(rD(opcode) + 32, &xt, env);
2941 do_float_check_status(env, GETPC());
2944 uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2946 float_status tstat = env->fp_status;
2947 set_float_exception_flags(0, &tstat);
2949 return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2952 uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2954 float_status tstat = env->fp_status;
2955 set_float_exception_flags(0, &tstat);
2957 return float32_to_float64(xb >> 32, &tstat);
2960 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2961 * op - instruction mnemonic
2962 * nels - number of elements (1, 2 or 4)
2963 * stp - source type (float32 or float64)
2964 * ttp - target type (int32, uint32, int64 or uint64)
2965 * sfld - source vsr_t field
2966 * tfld - target vsr_t field
2967 * rnan - resulting NaN
2969 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2970 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2972 ppc_vsr_t xt, xb; \
2973 int i; \
2975 getVSR(xB(opcode), &xb, env); \
2976 getVSR(xT(opcode), &xt, env); \
2978 for (i = 0; i < nels; i++) { \
2979 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2980 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
2981 float_invalid_op_vxsnan(env, GETPC()); \
2983 float_invalid_op_vxcvi(env, 0, GETPC()); \
2984 xt.tfld = rnan; \
2985 } else { \
2986 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2987 &env->fp_status); \
2988 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2989 float_invalid_op_vxcvi(env, 0, GETPC()); \
2994 putVSR(xT(opcode), &xt, env); \
2995 do_float_check_status(env, GETPC()); \
2998 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2999 0x8000000000000000ULL)
3000 VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
3001 0x80000000U)
3002 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
3003 VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
3004 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
3005 0x8000000000000000ULL)
3006 VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
3007 0x80000000U)
3008 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
3009 VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
3010 VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
3011 0x8000000000000000ULL)
3012 VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
3013 VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
3014 VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
3016 /* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
3017 * op - instruction mnemonic
3018 * stp - source type (float32 or float64)
3019 * ttp - target type (int32, uint32, int64 or uint64)
3020 * sfld - source vsr_t field
3021 * tfld - target vsr_t field
3022 * rnan - resulting NaN
3024 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
3025 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3027 ppc_vsr_t xt, xb; \
3029 getVSR(rB(opcode) + 32, &xb, env); \
3030 memset(&xt, 0, sizeof(xt)); \
3032 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
3033 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
3034 float_invalid_op_vxsnan(env, GETPC()); \
3036 float_invalid_op_vxcvi(env, 0, GETPC()); \
3037 xt.tfld = rnan; \
3038 } else { \
3039 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
3040 &env->fp_status); \
3041 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
3042 float_invalid_op_vxcvi(env, 0, GETPC()); \
3046 putVSR(rD(opcode) + 32, &xt, env); \
3047 do_float_check_status(env, GETPC()); \
3050 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
3051 0x8000000000000000ULL)
3053 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
3054 0xffffffff80000000ULL)
3055 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
3056 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
3058 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3059 * op - instruction mnemonic
3060 * nels - number of elements (1, 2 or 4)
3061 * stp - source type (int32, uint32, int64 or uint64)
3062 * ttp - target type (float32 or float64)
3063 * sfld - source vsr_t field
3064 * tfld - target vsr_t field
3065 * jdef - definition of the j index (i or 2*i)
3066 * sfprf - set FPRF
3068 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
3069 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3071 ppc_vsr_t xt, xb; \
3072 int i; \
3074 getVSR(xB(opcode), &xb, env); \
3075 getVSR(xT(opcode), &xt, env); \
3077 for (i = 0; i < nels; i++) { \
3078 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3079 if (r2sp) { \
3080 xt.tfld = helper_frsp(env, xt.tfld); \
3082 if (sfprf) { \
3083 helper_compute_fprf_float64(env, xt.tfld); \
3087 putVSR(xT(opcode), &xt, env); \
3088 do_float_check_status(env, GETPC()); \
3091 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3092 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3093 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3094 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3095 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3096 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3097 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
3098 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
3099 VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
3100 VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
3101 VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3102 VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3104 /* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3105 * op - instruction mnemonic
3106 * stp - source type (int32, uint32, int64 or uint64)
3107 * ttp - target type (float32 or float64)
3108 * sfld - source vsr_t field
3109 * tfld - target vsr_t field
3111 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3112 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3114 ppc_vsr_t xt, xb; \
3116 getVSR(rB(opcode) + 32, &xb, env); \
3117 getVSR(rD(opcode) + 32, &xt, env); \
3119 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3120 helper_compute_fprf_##ttp(env, xt.tfld); \
3122 putVSR(xT(opcode) + 32, &xt, env); \
3123 do_float_check_status(env, GETPC()); \
3126 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3127 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3129 /* For "use current rounding mode", define a value that will not be one of
3130 * the existing rounding model enums.
3132 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3133 float_round_up + float_round_to_zero)
3135 /* VSX_ROUND - VSX floating point round
3136 * op - instruction mnemonic
3137 * nels - number of elements (1, 2 or 4)
3138 * tp - type (float32 or float64)
3139 * fld - vsr_t field (VsrD(*) or VsrW(*))
3140 * rmode - rounding mode
3141 * sfprf - set FPRF
3143 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
3144 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3146 ppc_vsr_t xt, xb; \
3147 int i; \
3148 getVSR(xB(opcode), &xb, env); \
3149 getVSR(xT(opcode), &xt, env); \
3151 if (rmode != FLOAT_ROUND_CURRENT) { \
3152 set_float_rounding_mode(rmode, &env->fp_status); \
3155 for (i = 0; i < nels; i++) { \
3156 if (unlikely(tp##_is_signaling_nan(xb.fld, \
3157 &env->fp_status))) { \
3158 float_invalid_op_vxsnan(env, GETPC()); \
3159 xt.fld = tp##_snan_to_qnan(xb.fld); \
3160 } else { \
3161 xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
3163 if (sfprf) { \
3164 helper_compute_fprf_float64(env, xt.fld); \
3168 /* If this is not a "use current rounding mode" instruction, \
3169 * then inhibit setting of the XX bit and restore rounding \
3170 * mode from FPSCR */ \
3171 if (rmode != FLOAT_ROUND_CURRENT) { \
3172 fpscr_set_rounding_mode(env); \
3173 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3176 putVSR(xT(opcode), &xt, env); \
3177 do_float_check_status(env, GETPC()); \
3180 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3181 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3182 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3183 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3184 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3186 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3187 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3188 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3189 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3190 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3192 VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3193 VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3194 VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3195 VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3196 VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3198 uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3200 helper_reset_fpstatus(env);
3202 uint64_t xt = helper_frsp(env, xb);
3204 helper_compute_fprf_float64(env, xt);
3205 do_float_check_status(env, GETPC());
3206 return xt;
3209 #define VSX_XXPERM(op, indexed) \
3210 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3212 ppc_vsr_t xt, xa, pcv, xto; \
3213 int i, idx; \
3215 getVSR(xA(opcode), &xa, env); \
3216 getVSR(xT(opcode), &xt, env); \
3217 getVSR(xB(opcode), &pcv, env); \
3219 for (i = 0; i < 16; i++) { \
3220 idx = pcv.VsrB(i) & 0x1F; \
3221 if (indexed) { \
3222 idx = 31 - idx; \
3224 xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3226 putVSR(xT(opcode), &xto, env); \
3229 VSX_XXPERM(xxperm, 0)
3230 VSX_XXPERM(xxpermr, 1)
3232 void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
3234 ppc_vsr_t xt, xb;
3235 uint32_t exp, i, fraction;
3237 getVSR(xB(opcode), &xb, env);
3238 memset(&xt, 0, sizeof(xt));
3240 for (i = 0; i < 4; i++) {
3241 exp = (xb.VsrW(i) >> 23) & 0xFF;
3242 fraction = xb.VsrW(i) & 0x7FFFFF;
3243 if (exp != 0 && exp != 255) {
3244 xt.VsrW(i) = fraction | 0x00800000;
3245 } else {
3246 xt.VsrW(i) = fraction;
3249 putVSR(xT(opcode), &xt, env);
3252 /* VSX_TEST_DC - VSX floating point test data class
3253 * op - instruction mnemonic
3254 * nels - number of elements (1, 2 or 4)
3255 * xbn - VSR register number
3256 * tp - type (float32 or float64)
3257 * fld - vsr_t field (VsrD(*) or VsrW(*))
3258 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3259 * fld_max - target field max
3260 * scrf - set result in CR and FPCC
3262 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3263 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3265 ppc_vsr_t xt, xb; \
3266 uint32_t i, sign, dcmx; \
3267 uint32_t cc, match = 0; \
3269 getVSR(xbn, &xb, env); \
3270 if (!scrf) { \
3271 memset(&xt, 0, sizeof(xt)); \
3272 dcmx = DCMX_XV(opcode); \
3273 } else { \
3274 dcmx = DCMX(opcode); \
3277 for (i = 0; i < nels; i++) { \
3278 sign = tp##_is_neg(xb.fld); \
3279 if (tp##_is_any_nan(xb.fld)) { \
3280 match = extract32(dcmx, 6, 1); \
3281 } else if (tp##_is_infinity(xb.fld)) { \
3282 match = extract32(dcmx, 4 + !sign, 1); \
3283 } else if (tp##_is_zero(xb.fld)) { \
3284 match = extract32(dcmx, 2 + !sign, 1); \
3285 } else if (tp##_is_zero_or_denormal(xb.fld)) { \
3286 match = extract32(dcmx, 0 + !sign, 1); \
3289 if (scrf) { \
3290 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3291 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
3292 env->fpscr |= cc << FPSCR_FPRF; \
3293 env->crf[BF(opcode)] = cc; \
3294 } else { \
3295 xt.tfld = match ? fld_max : 0; \
3297 match = 0; \
3299 if (!scrf) { \
3300 putVSR(xT(opcode), &xt, env); \
3304 VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3305 VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3306 VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3307 VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3309 void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
3311 ppc_vsr_t xb;
3312 uint32_t dcmx, sign, exp;
3313 uint32_t cc, match = 0, not_sp = 0;
3315 getVSR(xB(opcode), &xb, env);
3316 dcmx = DCMX(opcode);
3317 exp = (xb.VsrD(0) >> 52) & 0x7FF;
3319 sign = float64_is_neg(xb.VsrD(0));
3320 if (float64_is_any_nan(xb.VsrD(0))) {
3321 match = extract32(dcmx, 6, 1);
3322 } else if (float64_is_infinity(xb.VsrD(0))) {
3323 match = extract32(dcmx, 4 + !sign, 1);
3324 } else if (float64_is_zero(xb.VsrD(0))) {
3325 match = extract32(dcmx, 2 + !sign, 1);
3326 } else if (float64_is_zero_or_denormal(xb.VsrD(0)) ||
3327 (exp > 0 && exp < 0x381)) {
3328 match = extract32(dcmx, 0 + !sign, 1);
3331 not_sp = !float64_eq(xb.VsrD(0),
3332 float32_to_float64(
3333 float64_to_float32(xb.VsrD(0), &env->fp_status),
3334 &env->fp_status), &env->fp_status);
3336 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3337 env->fpscr &= ~(0x0F << FPSCR_FPRF);
3338 env->fpscr |= cc << FPSCR_FPRF;
3339 env->crf[BF(opcode)] = cc;
3342 void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
3344 ppc_vsr_t xb;
3345 ppc_vsr_t xt;
3346 uint8_t r = Rrm(opcode);
3347 uint8_t ex = Rc(opcode);
3348 uint8_t rmc = RMC(opcode);
3349 uint8_t rmode = 0;
3350 float_status tstat;
3352 getVSR(rB(opcode) + 32, &xb, env);
3353 memset(&xt, 0, sizeof(xt));
3354 helper_reset_fpstatus(env);
3356 if (r == 0 && rmc == 0) {
3357 rmode = float_round_ties_away;
3358 } else if (r == 0 && rmc == 0x3) {
3359 rmode = fpscr_rn;
3360 } else if (r == 1) {
3361 switch (rmc) {
3362 case 0:
3363 rmode = float_round_nearest_even;
3364 break;
3365 case 1:
3366 rmode = float_round_to_zero;
3367 break;
3368 case 2:
3369 rmode = float_round_up;
3370 break;
3371 case 3:
3372 rmode = float_round_down;
3373 break;
3374 default:
3375 abort();
3379 tstat = env->fp_status;
3380 set_float_exception_flags(0, &tstat);
3381 set_float_rounding_mode(rmode, &tstat);
3382 xt.f128 = float128_round_to_int(xb.f128, &tstat);
3383 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3385 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3386 if (float128_is_signaling_nan(xb.f128, &tstat)) {
3387 float_invalid_op_vxsnan(env, GETPC());
3388 xt.f128 = float128_snan_to_qnan(xt.f128);
3392 if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3393 env->fp_status.float_exception_flags &= ~float_flag_inexact;
3396 helper_compute_fprf_float128(env, xt.f128);
3397 do_float_check_status(env, GETPC());
3398 putVSR(rD(opcode) + 32, &xt, env);
3401 void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
3403 ppc_vsr_t xb;
3404 ppc_vsr_t xt;
3405 uint8_t r = Rrm(opcode);
3406 uint8_t rmc = RMC(opcode);
3407 uint8_t rmode = 0;
3408 floatx80 round_res;
3409 float_status tstat;
3411 getVSR(rB(opcode) + 32, &xb, env);
3412 memset(&xt, 0, sizeof(xt));
3413 helper_reset_fpstatus(env);
3415 if (r == 0 && rmc == 0) {
3416 rmode = float_round_ties_away;
3417 } else if (r == 0 && rmc == 0x3) {
3418 rmode = fpscr_rn;
3419 } else if (r == 1) {
3420 switch (rmc) {
3421 case 0:
3422 rmode = float_round_nearest_even;
3423 break;
3424 case 1:
3425 rmode = float_round_to_zero;
3426 break;
3427 case 2:
3428 rmode = float_round_up;
3429 break;
3430 case 3:
3431 rmode = float_round_down;
3432 break;
3433 default:
3434 abort();
3438 tstat = env->fp_status;
3439 set_float_exception_flags(0, &tstat);
3440 set_float_rounding_mode(rmode, &tstat);
3441 round_res = float128_to_floatx80(xb.f128, &tstat);
3442 xt.f128 = floatx80_to_float128(round_res, &tstat);
3443 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3445 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3446 if (float128_is_signaling_nan(xb.f128, &tstat)) {
3447 float_invalid_op_vxsnan(env, GETPC());
3448 xt.f128 = float128_snan_to_qnan(xt.f128);
3452 helper_compute_fprf_float128(env, xt.f128);
3453 putVSR(rD(opcode) + 32, &xt, env);
3454 do_float_check_status(env, GETPC());
3457 void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
3459 ppc_vsr_t xb;
3460 ppc_vsr_t xt;
3461 float_status tstat;
3463 getVSR(rB(opcode) + 32, &xb, env);
3464 memset(&xt, 0, sizeof(xt));
3465 helper_reset_fpstatus(env);
3467 tstat = env->fp_status;
3468 if (unlikely(Rc(opcode) != 0)) {
3469 tstat.float_rounding_mode = float_round_to_odd;
3472 set_float_exception_flags(0, &tstat);
3473 xt.f128 = float128_sqrt(xb.f128, &tstat);
3474 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3476 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3477 if (float128_is_signaling_nan(xb.f128, &tstat)) {
3478 float_invalid_op_vxsnan(env, GETPC());
3479 xt.f128 = float128_snan_to_qnan(xb.f128);
3480 } else if (float128_is_quiet_nan(xb.f128, &tstat)) {
3481 xt.f128 = xb.f128;
3482 } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
3483 float_invalid_op_vxsqrt(env, 1, GETPC());
3484 xt.f128 = float128_default_nan(&env->fp_status);
3488 helper_compute_fprf_float128(env, xt.f128);
3489 putVSR(rD(opcode) + 32, &xt, env);
3490 do_float_check_status(env, GETPC());
3493 void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
3495 ppc_vsr_t xt, xa, xb;
3496 float_status tstat;
3498 getVSR(rA(opcode) + 32, &xa, env);
3499 getVSR(rB(opcode) + 32, &xb, env);
3500 getVSR(rD(opcode) + 32, &xt, env);
3501 helper_reset_fpstatus(env);
3503 tstat = env->fp_status;
3504 if (unlikely(Rc(opcode) != 0)) {
3505 tstat.float_rounding_mode = float_round_to_odd;
3508 set_float_exception_flags(0, &tstat);
3509 xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
3510 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3512 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3513 if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
3514 float_invalid_op_vxisi(env, 1, GETPC());
3515 } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
3516 float128_is_signaling_nan(xb.f128, &tstat)) {
3517 float_invalid_op_vxsnan(env, GETPC());
3521 helper_compute_fprf_float128(env, xt.f128);
3522 putVSR(rD(opcode) + 32, &xt, env);
3523 do_float_check_status(env, GETPC());