target/arm: Implement HCR.PTW
[qemu/ar7.git] / target / ppc / fpu_helper.c
blobb9bb1b856ea33dba9729c3807a92d2381ceda166
1 /*
2 * PowerPC floating point and SPE emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 #include "fpu/softfloat.h"
26 static inline float128 float128_snan_to_qnan(float128 x)
28 float128 r;
30 r.high = x.high | 0x0000800000000000;
31 r.low = x.low;
32 return r;
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
39 static inline bool fp_exceptions_enabled(CPUPPCState *env)
41 #ifdef CONFIG_USER_ONLY
42 return true;
43 #else
44 return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
45 #endif
48 /*****************************************************************************/
49 /* Floating point operations helpers */
52 * This is the non-arithmatic conversion that happens e.g. on loads.
53 * In the Power ISA pseudocode, this is called DOUBLE.
55 uint64_t helper_todouble(uint32_t arg)
57 uint32_t abs_arg = arg & 0x7fffffff;
58 uint64_t ret;
60 if (likely(abs_arg >= 0x00800000)) {
61 /* Normalized operand, or Inf, or NaN. */
62 ret = (uint64_t)extract32(arg, 30, 2) << 62;
63 ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
64 ret |= (uint64_t)extract32(arg, 0, 30) << 29;
65 } else {
66 /* Zero or Denormalized operand. */
67 ret = (uint64_t)extract32(arg, 31, 1) << 63;
68 if (unlikely(abs_arg != 0)) {
69 /* Denormalized operand. */
70 int shift = clz32(abs_arg) - 9;
71 int exp = -126 - shift + 1023;
72 ret |= (uint64_t)exp << 52;
73 ret |= abs_arg << (shift + 29);
76 return ret;
80 * This is the non-arithmatic conversion that happens e.g. on stores.
81 * In the Power ISA pseudocode, this is called SINGLE.
83 uint32_t helper_tosingle(uint64_t arg)
85 int exp = extract64(arg, 52, 11);
86 uint32_t ret;
88 if (likely(exp > 896)) {
89 /* No denormalization required (includes Inf, NaN). */
90 ret = extract64(arg, 62, 2) << 30;
91 ret |= extract64(arg, 29, 30);
92 } else {
93 /* Zero or Denormal result. If the exponent is in bounds for
94 * a single-precision denormal result, extract the proper bits.
95 * If the input is not zero, and the exponent is out of bounds,
96 * then the result is undefined; this underflows to zero.
98 ret = extract64(arg, 63, 1) << 31;
99 if (unlikely(exp >= 874)) {
100 /* Denormal result. */
101 ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
104 return ret;
107 static inline int ppc_float32_get_unbiased_exp(float32 f)
109 return ((f >> 23) & 0xFF) - 127;
112 static inline int ppc_float64_get_unbiased_exp(float64 f)
114 return ((f >> 52) & 0x7FF) - 1023;
117 #define COMPUTE_FPRF(tp) \
118 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
120 int isneg; \
121 int fprf; \
123 isneg = tp##_is_neg(arg); \
124 if (unlikely(tp##_is_any_nan(arg))) { \
125 if (tp##_is_signaling_nan(arg, &env->fp_status)) { \
126 /* Signaling NaN: flags are undefined */ \
127 fprf = 0x00; \
128 } else { \
129 /* Quiet NaN */ \
130 fprf = 0x11; \
132 } else if (unlikely(tp##_is_infinity(arg))) { \
133 /* +/- infinity */ \
134 if (isneg) { \
135 fprf = 0x09; \
136 } else { \
137 fprf = 0x05; \
139 } else { \
140 if (tp##_is_zero(arg)) { \
141 /* +/- zero */ \
142 if (isneg) { \
143 fprf = 0x12; \
144 } else { \
145 fprf = 0x02; \
147 } else { \
148 if (tp##_is_zero_or_denormal(arg)) { \
149 /* Denormalized numbers */ \
150 fprf = 0x10; \
151 } else { \
152 /* Normalized numbers */ \
153 fprf = 0x00; \
155 if (isneg) { \
156 fprf |= 0x08; \
157 } else { \
158 fprf |= 0x04; \
162 /* We update FPSCR_FPRF */ \
163 env->fpscr &= ~(0x1F << FPSCR_FPRF); \
164 env->fpscr |= fprf << FPSCR_FPRF; \
167 COMPUTE_FPRF(float16)
168 COMPUTE_FPRF(float32)
169 COMPUTE_FPRF(float64)
170 COMPUTE_FPRF(float128)
172 /* Floating-point invalid operations exception */
173 static inline __attribute__((__always_inline__))
174 uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc)
176 CPUState *cs = CPU(ppc_env_get_cpu(env));
177 uint64_t ret = 0;
178 int ve;
180 ve = fpscr_ve;
181 switch (op) {
182 case POWERPC_EXCP_FP_VXSNAN:
183 env->fpscr |= 1 << FPSCR_VXSNAN;
184 break;
185 case POWERPC_EXCP_FP_VXSOFT:
186 env->fpscr |= 1 << FPSCR_VXSOFT;
187 break;
188 case POWERPC_EXCP_FP_VXISI:
189 /* Magnitude subtraction of infinities */
190 env->fpscr |= 1 << FPSCR_VXISI;
191 goto update_arith;
192 case POWERPC_EXCP_FP_VXIDI:
193 /* Division of infinity by infinity */
194 env->fpscr |= 1 << FPSCR_VXIDI;
195 goto update_arith;
196 case POWERPC_EXCP_FP_VXZDZ:
197 /* Division of zero by zero */
198 env->fpscr |= 1 << FPSCR_VXZDZ;
199 goto update_arith;
200 case POWERPC_EXCP_FP_VXIMZ:
201 /* Multiplication of zero by infinity */
202 env->fpscr |= 1 << FPSCR_VXIMZ;
203 goto update_arith;
204 case POWERPC_EXCP_FP_VXVC:
205 /* Ordered comparison of NaN */
206 env->fpscr |= 1 << FPSCR_VXVC;
207 if (set_fpcc) {
208 env->fpscr &= ~(0xF << FPSCR_FPCC);
209 env->fpscr |= 0x11 << FPSCR_FPCC;
211 /* We must update the target FPR before raising the exception */
212 if (ve != 0) {
213 cs->exception_index = POWERPC_EXCP_PROGRAM;
214 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
215 /* Update the floating-point enabled exception summary */
216 env->fpscr |= 1 << FPSCR_FEX;
217 /* Exception is differed */
218 ve = 0;
220 break;
221 case POWERPC_EXCP_FP_VXSQRT:
222 /* Square root of a negative number */
223 env->fpscr |= 1 << FPSCR_VXSQRT;
224 update_arith:
225 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
226 if (ve == 0) {
227 /* Set the result to quiet NaN */
228 ret = 0x7FF8000000000000ULL;
229 if (set_fpcc) {
230 env->fpscr &= ~(0xF << FPSCR_FPCC);
231 env->fpscr |= 0x11 << FPSCR_FPCC;
234 break;
235 case POWERPC_EXCP_FP_VXCVI:
236 /* Invalid conversion */
237 env->fpscr |= 1 << FPSCR_VXCVI;
238 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
239 if (ve == 0) {
240 /* Set the result to quiet NaN */
241 ret = 0x7FF8000000000000ULL;
242 if (set_fpcc) {
243 env->fpscr &= ~(0xF << FPSCR_FPCC);
244 env->fpscr |= 0x11 << FPSCR_FPCC;
247 break;
249 /* Update the floating-point invalid operation summary */
250 env->fpscr |= 1 << FPSCR_VX;
251 /* Update the floating-point exception summary */
252 env->fpscr |= FP_FX;
253 if (ve != 0) {
254 /* Update the floating-point enabled exception summary */
255 env->fpscr |= 1 << FPSCR_FEX;
256 if (fp_exceptions_enabled(env)) {
257 /* GETPC() works here because this is inline */
258 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
259 POWERPC_EXCP_FP | op, GETPC());
262 return ret;
265 static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
267 env->fpscr |= 1 << FPSCR_ZX;
268 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
269 /* Update the floating-point exception summary */
270 env->fpscr |= FP_FX;
271 if (fpscr_ze != 0) {
272 /* Update the floating-point enabled exception summary */
273 env->fpscr |= 1 << FPSCR_FEX;
274 if (fp_exceptions_enabled(env)) {
275 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
276 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
277 raddr);
282 static inline void float_overflow_excp(CPUPPCState *env)
284 CPUState *cs = CPU(ppc_env_get_cpu(env));
286 env->fpscr |= 1 << FPSCR_OX;
287 /* Update the floating-point exception summary */
288 env->fpscr |= FP_FX;
289 if (fpscr_oe != 0) {
290 /* XXX: should adjust the result */
291 /* Update the floating-point enabled exception summary */
292 env->fpscr |= 1 << FPSCR_FEX;
293 /* We must update the target FPR before raising the exception */
294 cs->exception_index = POWERPC_EXCP_PROGRAM;
295 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
296 } else {
297 env->fpscr |= 1 << FPSCR_XX;
298 env->fpscr |= 1 << FPSCR_FI;
302 static inline void float_underflow_excp(CPUPPCState *env)
304 CPUState *cs = CPU(ppc_env_get_cpu(env));
306 env->fpscr |= 1 << FPSCR_UX;
307 /* Update the floating-point exception summary */
308 env->fpscr |= FP_FX;
309 if (fpscr_ue != 0) {
310 /* XXX: should adjust the result */
311 /* Update the floating-point enabled exception summary */
312 env->fpscr |= 1 << FPSCR_FEX;
313 /* We must update the target FPR before raising the exception */
314 cs->exception_index = POWERPC_EXCP_PROGRAM;
315 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
319 static inline void float_inexact_excp(CPUPPCState *env)
321 CPUState *cs = CPU(ppc_env_get_cpu(env));
323 env->fpscr |= 1 << FPSCR_FI;
324 env->fpscr |= 1 << FPSCR_XX;
325 /* Update the floating-point exception summary */
326 env->fpscr |= FP_FX;
327 if (fpscr_xe != 0) {
328 /* Update the floating-point enabled exception summary */
329 env->fpscr |= 1 << FPSCR_FEX;
330 /* We must update the target FPR before raising the exception */
331 cs->exception_index = POWERPC_EXCP_PROGRAM;
332 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
336 static inline void fpscr_set_rounding_mode(CPUPPCState *env)
338 int rnd_type;
340 /* Set rounding mode */
341 switch (fpscr_rn) {
342 case 0:
343 /* Best approximation (round to nearest) */
344 rnd_type = float_round_nearest_even;
345 break;
346 case 1:
347 /* Smaller magnitude (round toward zero) */
348 rnd_type = float_round_to_zero;
349 break;
350 case 2:
351 /* Round toward +infinite */
352 rnd_type = float_round_up;
353 break;
354 default:
355 case 3:
356 /* Round toward -infinite */
357 rnd_type = float_round_down;
358 break;
360 set_float_rounding_mode(rnd_type, &env->fp_status);
363 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
365 int prev;
367 prev = (env->fpscr >> bit) & 1;
368 env->fpscr &= ~(1 << bit);
369 if (prev == 1) {
370 switch (bit) {
371 case FPSCR_RN1:
372 case FPSCR_RN:
373 fpscr_set_rounding_mode(env);
374 break;
375 case FPSCR_VXSNAN:
376 case FPSCR_VXISI:
377 case FPSCR_VXIDI:
378 case FPSCR_VXZDZ:
379 case FPSCR_VXIMZ:
380 case FPSCR_VXVC:
381 case FPSCR_VXSOFT:
382 case FPSCR_VXSQRT:
383 case FPSCR_VXCVI:
384 if (!fpscr_ix) {
385 /* Set VX bit to zero */
386 env->fpscr &= ~(1 << FPSCR_VX);
388 break;
389 case FPSCR_OX:
390 case FPSCR_UX:
391 case FPSCR_ZX:
392 case FPSCR_XX:
393 case FPSCR_VE:
394 case FPSCR_OE:
395 case FPSCR_UE:
396 case FPSCR_ZE:
397 case FPSCR_XE:
398 if (!fpscr_eex) {
399 /* Set the FEX bit */
400 env->fpscr &= ~(1 << FPSCR_FEX);
402 break;
403 default:
404 break;
409 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
411 CPUState *cs = CPU(ppc_env_get_cpu(env));
412 int prev;
414 prev = (env->fpscr >> bit) & 1;
415 env->fpscr |= 1 << bit;
416 if (prev == 0) {
417 switch (bit) {
418 case FPSCR_VX:
419 env->fpscr |= FP_FX;
420 if (fpscr_ve) {
421 goto raise_ve;
423 break;
424 case FPSCR_OX:
425 env->fpscr |= FP_FX;
426 if (fpscr_oe) {
427 goto raise_oe;
429 break;
430 case FPSCR_UX:
431 env->fpscr |= FP_FX;
432 if (fpscr_ue) {
433 goto raise_ue;
435 break;
436 case FPSCR_ZX:
437 env->fpscr |= FP_FX;
438 if (fpscr_ze) {
439 goto raise_ze;
441 break;
442 case FPSCR_XX:
443 env->fpscr |= FP_FX;
444 if (fpscr_xe) {
445 goto raise_xe;
447 break;
448 case FPSCR_VXSNAN:
449 case FPSCR_VXISI:
450 case FPSCR_VXIDI:
451 case FPSCR_VXZDZ:
452 case FPSCR_VXIMZ:
453 case FPSCR_VXVC:
454 case FPSCR_VXSOFT:
455 case FPSCR_VXSQRT:
456 case FPSCR_VXCVI:
457 env->fpscr |= 1 << FPSCR_VX;
458 env->fpscr |= FP_FX;
459 if (fpscr_ve != 0) {
460 goto raise_ve;
462 break;
463 case FPSCR_VE:
464 if (fpscr_vx != 0) {
465 raise_ve:
466 env->error_code = POWERPC_EXCP_FP;
467 if (fpscr_vxsnan) {
468 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
470 if (fpscr_vxisi) {
471 env->error_code |= POWERPC_EXCP_FP_VXISI;
473 if (fpscr_vxidi) {
474 env->error_code |= POWERPC_EXCP_FP_VXIDI;
476 if (fpscr_vxzdz) {
477 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
479 if (fpscr_vximz) {
480 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
482 if (fpscr_vxvc) {
483 env->error_code |= POWERPC_EXCP_FP_VXVC;
485 if (fpscr_vxsoft) {
486 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
488 if (fpscr_vxsqrt) {
489 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
491 if (fpscr_vxcvi) {
492 env->error_code |= POWERPC_EXCP_FP_VXCVI;
494 goto raise_excp;
496 break;
497 case FPSCR_OE:
498 if (fpscr_ox != 0) {
499 raise_oe:
500 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
501 goto raise_excp;
503 break;
504 case FPSCR_UE:
505 if (fpscr_ux != 0) {
506 raise_ue:
507 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
508 goto raise_excp;
510 break;
511 case FPSCR_ZE:
512 if (fpscr_zx != 0) {
513 raise_ze:
514 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
515 goto raise_excp;
517 break;
518 case FPSCR_XE:
519 if (fpscr_xx != 0) {
520 raise_xe:
521 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
522 goto raise_excp;
524 break;
525 case FPSCR_RN1:
526 case FPSCR_RN:
527 fpscr_set_rounding_mode(env);
528 break;
529 default:
530 break;
531 raise_excp:
532 /* Update the floating-point enabled exception summary */
533 env->fpscr |= 1 << FPSCR_FEX;
534 /* We have to update Rc1 before raising the exception */
535 cs->exception_index = POWERPC_EXCP_PROGRAM;
536 break;
541 void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
543 CPUState *cs = CPU(ppc_env_get_cpu(env));
544 target_ulong prev, new;
545 int i;
547 prev = env->fpscr;
548 new = (target_ulong)arg;
549 new &= ~0x60000000LL;
550 new |= prev & 0x60000000LL;
551 for (i = 0; i < sizeof(target_ulong) * 2; i++) {
552 if (mask & (1 << i)) {
553 env->fpscr &= ~(0xFLL << (4 * i));
554 env->fpscr |= new & (0xFLL << (4 * i));
557 /* Update VX and FEX */
558 if (fpscr_ix != 0) {
559 env->fpscr |= 1 << FPSCR_VX;
560 } else {
561 env->fpscr &= ~(1 << FPSCR_VX);
563 if ((fpscr_ex & fpscr_eex) != 0) {
564 env->fpscr |= 1 << FPSCR_FEX;
565 cs->exception_index = POWERPC_EXCP_PROGRAM;
566 /* XXX: we should compute it properly */
567 env->error_code = POWERPC_EXCP_FP;
568 } else {
569 env->fpscr &= ~(1 << FPSCR_FEX);
571 fpscr_set_rounding_mode(env);
574 void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
576 helper_store_fpscr(env, arg, mask);
579 static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
581 CPUState *cs = CPU(ppc_env_get_cpu(env));
582 int status = get_float_exception_flags(&env->fp_status);
583 bool inexact_happened = false;
585 if (status & float_flag_overflow) {
586 float_overflow_excp(env);
587 } else if (status & float_flag_underflow) {
588 float_underflow_excp(env);
589 } else if (status & float_flag_inexact) {
590 float_inexact_excp(env);
591 inexact_happened = true;
594 /* if the inexact flag was not set */
595 if (inexact_happened == false) {
596 env->fpscr &= ~(1 << FPSCR_FI); /* clear the FPSCR[FI] bit */
599 if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
600 (env->error_code & POWERPC_EXCP_FP)) {
601 /* Differred floating-point exception after target FPR update */
602 if (fp_exceptions_enabled(env)) {
603 raise_exception_err_ra(env, cs->exception_index,
604 env->error_code, raddr);
609 static inline __attribute__((__always_inline__))
610 void float_check_status(CPUPPCState *env)
612 /* GETPC() works here because this is inline */
613 do_float_check_status(env, GETPC());
616 void helper_float_check_status(CPUPPCState *env)
618 do_float_check_status(env, GETPC());
621 void helper_reset_fpstatus(CPUPPCState *env)
623 set_float_exception_flags(0, &env->fp_status);
626 /* fadd - fadd. */
627 float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
629 float64 ret = float64_add(arg1, arg2, &env->fp_status);
630 int status = get_float_exception_flags(&env->fp_status);
632 if (unlikely(status & float_flag_invalid)) {
633 if (float64_is_infinity(arg1) && float64_is_infinity(arg2)) {
634 /* Magnitude subtraction of infinities */
635 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
636 } else if (float64_is_signaling_nan(arg1, &env->fp_status) ||
637 float64_is_signaling_nan(arg2, &env->fp_status)) {
638 /* sNaN addition */
639 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
643 return ret;
646 /* fsub - fsub. */
647 float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
649 float64 ret = float64_sub(arg1, arg2, &env->fp_status);
650 int status = get_float_exception_flags(&env->fp_status);
652 if (unlikely(status & float_flag_invalid)) {
653 if (float64_is_infinity(arg1) && float64_is_infinity(arg2)) {
654 /* Magnitude subtraction of infinities */
655 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
656 } else if (float64_is_signaling_nan(arg1, &env->fp_status) ||
657 float64_is_signaling_nan(arg2, &env->fp_status)) {
658 /* sNaN addition */
659 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
663 return ret;
666 /* fmul - fmul. */
667 float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
669 float64 ret = float64_mul(arg1, arg2, &env->fp_status);
670 int status = get_float_exception_flags(&env->fp_status);
672 if (unlikely(status & float_flag_invalid)) {
673 if ((float64_is_infinity(arg1) && float64_is_zero(arg2)) ||
674 (float64_is_zero(arg1) && float64_is_infinity(arg2))) {
675 /* Multiplication of zero by infinity */
676 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
677 } else if (float64_is_signaling_nan(arg1, &env->fp_status) ||
678 float64_is_signaling_nan(arg2, &env->fp_status)) {
679 /* sNaN multiplication */
680 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
684 return ret;
687 /* fdiv - fdiv. */
688 float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
690 float64 ret = float64_div(arg1, arg2, &env->fp_status);
691 int status = get_float_exception_flags(&env->fp_status);
693 if (unlikely(status)) {
694 if (status & float_flag_invalid) {
695 /* Determine what kind of invalid operation was seen. */
696 if (float64_is_infinity(arg1) && float64_is_infinity(arg2)) {
697 /* Division of infinity by infinity */
698 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
699 } else if (float64_is_zero(arg1) && float64_is_zero(arg2)) {
700 /* Division of zero by zero */
701 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
702 } else if (float64_is_signaling_nan(arg1, &env->fp_status) ||
703 float64_is_signaling_nan(arg2, &env->fp_status)) {
704 /* sNaN division */
705 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
708 if (status & float_flag_divbyzero) {
709 float_zero_divide_excp(env, GETPC());
713 return ret;
717 #define FPU_FCTI(op, cvt, nanval) \
718 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
720 CPU_DoubleU farg; \
722 farg.ll = arg; \
723 farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \
725 if (unlikely(env->fp_status.float_exception_flags)) { \
726 if (float64_is_any_nan(arg)) { \
727 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
728 if (float64_is_signaling_nan(arg, &env->fp_status)) { \
729 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
731 farg.ll = nanval; \
732 } else if (env->fp_status.float_exception_flags & \
733 float_flag_invalid) { \
734 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
736 float_check_status(env); \
738 return farg.ll; \
741 FPU_FCTI(fctiw, int32, 0x80000000U)
742 FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
743 FPU_FCTI(fctiwu, uint32, 0x00000000U)
744 FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
745 FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
746 FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
747 FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
748 FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
750 #define FPU_FCFI(op, cvtr, is_single) \
751 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
753 CPU_DoubleU farg; \
755 if (is_single) { \
756 float32 tmp = cvtr(arg, &env->fp_status); \
757 farg.d = float32_to_float64(tmp, &env->fp_status); \
758 } else { \
759 farg.d = cvtr(arg, &env->fp_status); \
761 float_check_status(env); \
762 return farg.ll; \
765 FPU_FCFI(fcfid, int64_to_float64, 0)
766 FPU_FCFI(fcfids, int64_to_float32, 1)
767 FPU_FCFI(fcfidu, uint64_to_float64, 0)
768 FPU_FCFI(fcfidus, uint64_to_float32, 1)
770 static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
771 int rounding_mode)
773 CPU_DoubleU farg;
775 farg.ll = arg;
777 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
778 /* sNaN round */
779 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
780 farg.ll = arg | 0x0008000000000000ULL;
781 } else {
782 int inexact = get_float_exception_flags(&env->fp_status) &
783 float_flag_inexact;
784 set_float_rounding_mode(rounding_mode, &env->fp_status);
785 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
786 /* Restore rounding mode from FPSCR */
787 fpscr_set_rounding_mode(env);
789 /* fri* does not set FPSCR[XX] */
790 if (!inexact) {
791 env->fp_status.float_exception_flags &= ~float_flag_inexact;
794 float_check_status(env);
795 return farg.ll;
798 uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
800 return do_fri(env, arg, float_round_ties_away);
803 uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
805 return do_fri(env, arg, float_round_to_zero);
808 uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
810 return do_fri(env, arg, float_round_up);
813 uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
815 return do_fri(env, arg, float_round_down);
818 #define FPU_MADDSUB_UPDATE(NAME, TP) \
819 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
820 unsigned int madd_flags) \
822 if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
823 TP##_is_signaling_nan(arg2, &env->fp_status) || \
824 TP##_is_signaling_nan(arg3, &env->fp_status)) { \
825 /* sNaN operation */ \
826 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
828 if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
829 (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
830 /* Multiplication of zero by infinity */ \
831 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); \
833 if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
834 TP##_is_infinity(arg3)) { \
835 uint8_t aSign, bSign, cSign; \
837 aSign = TP##_is_neg(arg1); \
838 bSign = TP##_is_neg(arg2); \
839 cSign = TP##_is_neg(arg3); \
840 if (madd_flags & float_muladd_negate_c) { \
841 cSign ^= 1; \
843 if (aSign ^ bSign ^ cSign) { \
844 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); \
848 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
849 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
851 #define FPU_FMADD(op, madd_flags) \
852 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
853 uint64_t arg2, uint64_t arg3) \
855 uint32_t flags; \
856 float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
857 &env->fp_status); \
858 flags = get_float_exception_flags(&env->fp_status); \
859 if (flags) { \
860 if (flags & float_flag_invalid) { \
861 float64_maddsub_update_excp(env, arg1, arg2, arg3, \
862 madd_flags); \
864 float_check_status(env); \
866 return ret; \
869 #define MADD_FLGS 0
870 #define MSUB_FLGS float_muladd_negate_c
871 #define NMADD_FLGS float_muladd_negate_result
872 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
874 FPU_FMADD(fmadd, MADD_FLGS)
875 FPU_FMADD(fnmadd, NMADD_FLGS)
876 FPU_FMADD(fmsub, MSUB_FLGS)
877 FPU_FMADD(fnmsub, NMSUB_FLGS)
879 /* frsp - frsp. */
880 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
882 CPU_DoubleU farg;
883 float32 f32;
885 farg.ll = arg;
887 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
888 /* sNaN square root */
889 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
891 f32 = float64_to_float32(farg.d, &env->fp_status);
892 farg.d = float32_to_float64(f32, &env->fp_status);
894 return farg.ll;
897 /* fsqrt - fsqrt. */
898 float64 helper_fsqrt(CPUPPCState *env, float64 arg)
900 float64 ret = float64_sqrt(arg, &env->fp_status);
901 int status = get_float_exception_flags(&env->fp_status);
903 if (unlikely(status & float_flag_invalid)) {
904 if (unlikely(float64_is_any_nan(arg))) {
905 if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) {
906 /* sNaN square root */
907 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
909 } else {
910 /* Square root of a negative nonzero number */
911 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
915 return ret;
918 /* fre - fre. */
919 float64 helper_fre(CPUPPCState *env, float64 arg)
921 /* "Estimate" the reciprocal with actual division. */
922 float64 ret = float64_div(float64_one, arg, &env->fp_status);
923 int status = get_float_exception_flags(&env->fp_status);
925 if (unlikely(status)) {
926 if (status & float_flag_invalid) {
927 if (float64_is_signaling_nan(arg, &env->fp_status)) {
928 /* sNaN reciprocal */
929 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
932 if (status & float_flag_divbyzero) {
933 float_zero_divide_excp(env, GETPC());
934 /* For FPSCR.ZE == 0, the result is 1/2. */
935 ret = float64_set_sign(float64_half, float64_is_neg(arg));
939 return ret;
942 /* fres - fres. */
943 uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
945 CPU_DoubleU farg;
946 float32 f32;
948 farg.ll = arg;
950 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
951 /* sNaN reciprocal */
952 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
954 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
955 f32 = float64_to_float32(farg.d, &env->fp_status);
956 farg.d = float32_to_float64(f32, &env->fp_status);
958 return farg.ll;
961 /* frsqrte - frsqrte. */
962 float64 helper_frsqrte(CPUPPCState *env, float64 arg)
964 /* "Estimate" the reciprocal with actual division. */
965 float64 rets = float64_sqrt(arg, &env->fp_status);
966 float64 retd = float64_div(float64_one, rets, &env->fp_status);
967 int status = get_float_exception_flags(&env->fp_status);
969 if (unlikely(status)) {
970 if (status & float_flag_invalid) {
971 if (float64_is_signaling_nan(arg, &env->fp_status)) {
972 /* sNaN reciprocal */
973 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
974 } else {
975 /* Square root of a negative nonzero number */
976 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
979 if (status & float_flag_divbyzero) {
980 /* Reciprocal of (square root of) zero. */
981 float_zero_divide_excp(env, GETPC());
985 return retd;
988 /* fsel - fsel. */
989 uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
990 uint64_t arg3)
992 CPU_DoubleU farg1;
994 farg1.ll = arg1;
996 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
997 !float64_is_any_nan(farg1.d)) {
998 return arg2;
999 } else {
1000 return arg3;
1004 uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
1006 int fe_flag = 0;
1007 int fg_flag = 0;
1009 if (unlikely(float64_is_infinity(fra) ||
1010 float64_is_infinity(frb) ||
1011 float64_is_zero(frb))) {
1012 fe_flag = 1;
1013 fg_flag = 1;
1014 } else {
1015 int e_a = ppc_float64_get_unbiased_exp(fra);
1016 int e_b = ppc_float64_get_unbiased_exp(frb);
1018 if (unlikely(float64_is_any_nan(fra) ||
1019 float64_is_any_nan(frb))) {
1020 fe_flag = 1;
1021 } else if ((e_b <= -1022) || (e_b >= 1021)) {
1022 fe_flag = 1;
1023 } else if (!float64_is_zero(fra) &&
1024 (((e_a - e_b) >= 1023) ||
1025 ((e_a - e_b) <= -1021) ||
1026 (e_a <= -970))) {
1027 fe_flag = 1;
1030 if (unlikely(float64_is_zero_or_denormal(frb))) {
1031 /* XB is not zero because of the above check and */
1032 /* so must be denormalized. */
1033 fg_flag = 1;
1037 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1040 uint32_t helper_ftsqrt(uint64_t frb)
1042 int fe_flag = 0;
1043 int fg_flag = 0;
1045 if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
1046 fe_flag = 1;
1047 fg_flag = 1;
1048 } else {
1049 int e_b = ppc_float64_get_unbiased_exp(frb);
1051 if (unlikely(float64_is_any_nan(frb))) {
1052 fe_flag = 1;
1053 } else if (unlikely(float64_is_zero(frb))) {
1054 fe_flag = 1;
1055 } else if (unlikely(float64_is_neg(frb))) {
1056 fe_flag = 1;
1057 } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
1058 fe_flag = 1;
1061 if (unlikely(float64_is_zero_or_denormal(frb))) {
1062 /* XB is not zero because of the above check and */
1063 /* therefore must be denormalized. */
1064 fg_flag = 1;
1068 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1071 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1072 uint32_t crfD)
1074 CPU_DoubleU farg1, farg2;
1075 uint32_t ret = 0;
1077 farg1.ll = arg1;
1078 farg2.ll = arg2;
1080 if (unlikely(float64_is_any_nan(farg1.d) ||
1081 float64_is_any_nan(farg2.d))) {
1082 ret = 0x01UL;
1083 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1084 ret = 0x08UL;
1085 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1086 ret = 0x04UL;
1087 } else {
1088 ret = 0x02UL;
1091 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1092 env->fpscr |= ret << FPSCR_FPRF;
1093 env->crf[crfD] = ret;
1094 if (unlikely(ret == 0x01UL
1095 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1096 float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1097 /* sNaN comparison */
1098 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1102 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1103 uint32_t crfD)
1105 CPU_DoubleU farg1, farg2;
1106 uint32_t ret = 0;
1108 farg1.ll = arg1;
1109 farg2.ll = arg2;
1111 if (unlikely(float64_is_any_nan(farg1.d) ||
1112 float64_is_any_nan(farg2.d))) {
1113 ret = 0x01UL;
1114 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1115 ret = 0x08UL;
1116 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1117 ret = 0x04UL;
1118 } else {
1119 ret = 0x02UL;
1122 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1123 env->fpscr |= ret << FPSCR_FPRF;
1124 env->crf[crfD] = ret;
1125 if (unlikely(ret == 0x01UL)) {
1126 if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1127 float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1128 /* sNaN comparison */
1129 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1130 POWERPC_EXCP_FP_VXVC, 1);
1131 } else {
1132 /* qNaN comparison */
1133 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1138 /* Single-precision floating-point conversions */
1139 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1141 CPU_FloatU u;
1143 u.f = int32_to_float32(val, &env->vec_status);
1145 return u.l;
1148 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1150 CPU_FloatU u;
1152 u.f = uint32_to_float32(val, &env->vec_status);
1154 return u.l;
1157 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1159 CPU_FloatU u;
1161 u.l = val;
1162 /* NaN are not treated the same way IEEE 754 does */
1163 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1164 return 0;
1167 return float32_to_int32(u.f, &env->vec_status);
1170 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1172 CPU_FloatU u;
1174 u.l = val;
1175 /* NaN are not treated the same way IEEE 754 does */
1176 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1177 return 0;
1180 return float32_to_uint32(u.f, &env->vec_status);
1183 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1185 CPU_FloatU u;
1187 u.l = val;
1188 /* NaN are not treated the same way IEEE 754 does */
1189 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1190 return 0;
1193 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1196 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1198 CPU_FloatU u;
1200 u.l = val;
1201 /* NaN are not treated the same way IEEE 754 does */
1202 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1203 return 0;
1206 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1209 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1211 CPU_FloatU u;
1212 float32 tmp;
1214 u.f = int32_to_float32(val, &env->vec_status);
1215 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1216 u.f = float32_div(u.f, tmp, &env->vec_status);
1218 return u.l;
1221 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1223 CPU_FloatU u;
1224 float32 tmp;
1226 u.f = uint32_to_float32(val, &env->vec_status);
1227 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1228 u.f = float32_div(u.f, tmp, &env->vec_status);
1230 return u.l;
1233 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1235 CPU_FloatU u;
1236 float32 tmp;
1238 u.l = val;
1239 /* NaN are not treated the same way IEEE 754 does */
1240 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1241 return 0;
1243 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1244 u.f = float32_mul(u.f, tmp, &env->vec_status);
1246 return float32_to_int32(u.f, &env->vec_status);
1249 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1251 CPU_FloatU u;
1252 float32 tmp;
1254 u.l = val;
1255 /* NaN are not treated the same way IEEE 754 does */
1256 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1257 return 0;
1259 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1260 u.f = float32_mul(u.f, tmp, &env->vec_status);
1262 return float32_to_uint32(u.f, &env->vec_status);
1265 #define HELPER_SPE_SINGLE_CONV(name) \
1266 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
1268 return e##name(env, val); \
1270 /* efscfsi */
1271 HELPER_SPE_SINGLE_CONV(fscfsi);
1272 /* efscfui */
1273 HELPER_SPE_SINGLE_CONV(fscfui);
1274 /* efscfuf */
1275 HELPER_SPE_SINGLE_CONV(fscfuf);
1276 /* efscfsf */
1277 HELPER_SPE_SINGLE_CONV(fscfsf);
1278 /* efsctsi */
1279 HELPER_SPE_SINGLE_CONV(fsctsi);
1280 /* efsctui */
1281 HELPER_SPE_SINGLE_CONV(fsctui);
1282 /* efsctsiz */
1283 HELPER_SPE_SINGLE_CONV(fsctsiz);
1284 /* efsctuiz */
1285 HELPER_SPE_SINGLE_CONV(fsctuiz);
1286 /* efsctsf */
1287 HELPER_SPE_SINGLE_CONV(fsctsf);
1288 /* efsctuf */
1289 HELPER_SPE_SINGLE_CONV(fsctuf);
1291 #define HELPER_SPE_VECTOR_CONV(name) \
1292 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
1294 return ((uint64_t)e##name(env, val >> 32) << 32) | \
1295 (uint64_t)e##name(env, val); \
1297 /* evfscfsi */
1298 HELPER_SPE_VECTOR_CONV(fscfsi);
1299 /* evfscfui */
1300 HELPER_SPE_VECTOR_CONV(fscfui);
1301 /* evfscfuf */
1302 HELPER_SPE_VECTOR_CONV(fscfuf);
1303 /* evfscfsf */
1304 HELPER_SPE_VECTOR_CONV(fscfsf);
1305 /* evfsctsi */
1306 HELPER_SPE_VECTOR_CONV(fsctsi);
1307 /* evfsctui */
1308 HELPER_SPE_VECTOR_CONV(fsctui);
1309 /* evfsctsiz */
1310 HELPER_SPE_VECTOR_CONV(fsctsiz);
1311 /* evfsctuiz */
1312 HELPER_SPE_VECTOR_CONV(fsctuiz);
1313 /* evfsctsf */
1314 HELPER_SPE_VECTOR_CONV(fsctsf);
1315 /* evfsctuf */
1316 HELPER_SPE_VECTOR_CONV(fsctuf);
1318 /* Single-precision floating-point arithmetic */
1319 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1321 CPU_FloatU u1, u2;
1323 u1.l = op1;
1324 u2.l = op2;
1325 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1326 return u1.l;
1329 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1331 CPU_FloatU u1, u2;
1333 u1.l = op1;
1334 u2.l = op2;
1335 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1336 return u1.l;
1339 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1341 CPU_FloatU u1, u2;
1343 u1.l = op1;
1344 u2.l = op2;
1345 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1346 return u1.l;
1349 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1351 CPU_FloatU u1, u2;
1353 u1.l = op1;
1354 u2.l = op2;
1355 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1356 return u1.l;
1359 #define HELPER_SPE_SINGLE_ARITH(name) \
1360 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1362 return e##name(env, op1, op2); \
1364 /* efsadd */
1365 HELPER_SPE_SINGLE_ARITH(fsadd);
1366 /* efssub */
1367 HELPER_SPE_SINGLE_ARITH(fssub);
1368 /* efsmul */
1369 HELPER_SPE_SINGLE_ARITH(fsmul);
1370 /* efsdiv */
1371 HELPER_SPE_SINGLE_ARITH(fsdiv);
1373 #define HELPER_SPE_VECTOR_ARITH(name) \
1374 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1376 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
1377 (uint64_t)e##name(env, op1, op2); \
1379 /* evfsadd */
1380 HELPER_SPE_VECTOR_ARITH(fsadd);
1381 /* evfssub */
1382 HELPER_SPE_VECTOR_ARITH(fssub);
1383 /* evfsmul */
1384 HELPER_SPE_VECTOR_ARITH(fsmul);
1385 /* evfsdiv */
1386 HELPER_SPE_VECTOR_ARITH(fsdiv);
1388 /* Single-precision floating-point comparisons */
1389 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1391 CPU_FloatU u1, u2;
1393 u1.l = op1;
1394 u2.l = op2;
1395 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1398 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1400 CPU_FloatU u1, u2;
1402 u1.l = op1;
1403 u2.l = op2;
1404 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1407 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1409 CPU_FloatU u1, u2;
1411 u1.l = op1;
1412 u2.l = op2;
1413 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1416 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1418 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1419 return efscmplt(env, op1, op2);
1422 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1424 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1425 return efscmpgt(env, op1, op2);
1428 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1430 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1431 return efscmpeq(env, op1, op2);
1434 #define HELPER_SINGLE_SPE_CMP(name) \
1435 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1437 return e##name(env, op1, op2); \
1439 /* efststlt */
1440 HELPER_SINGLE_SPE_CMP(fststlt);
1441 /* efststgt */
1442 HELPER_SINGLE_SPE_CMP(fststgt);
1443 /* efststeq */
1444 HELPER_SINGLE_SPE_CMP(fststeq);
1445 /* efscmplt */
1446 HELPER_SINGLE_SPE_CMP(fscmplt);
1447 /* efscmpgt */
1448 HELPER_SINGLE_SPE_CMP(fscmpgt);
1449 /* efscmpeq */
1450 HELPER_SINGLE_SPE_CMP(fscmpeq);
1452 static inline uint32_t evcmp_merge(int t0, int t1)
1454 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1457 #define HELPER_VECTOR_SPE_CMP(name) \
1458 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1460 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
1461 e##name(env, op1, op2)); \
1463 /* evfststlt */
1464 HELPER_VECTOR_SPE_CMP(fststlt);
1465 /* evfststgt */
1466 HELPER_VECTOR_SPE_CMP(fststgt);
1467 /* evfststeq */
1468 HELPER_VECTOR_SPE_CMP(fststeq);
1469 /* evfscmplt */
1470 HELPER_VECTOR_SPE_CMP(fscmplt);
1471 /* evfscmpgt */
1472 HELPER_VECTOR_SPE_CMP(fscmpgt);
1473 /* evfscmpeq */
1474 HELPER_VECTOR_SPE_CMP(fscmpeq);
1476 /* Double-precision floating-point conversion */
1477 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1479 CPU_DoubleU u;
1481 u.d = int32_to_float64(val, &env->vec_status);
1483 return u.ll;
1486 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1488 CPU_DoubleU u;
1490 u.d = int64_to_float64(val, &env->vec_status);
1492 return u.ll;
1495 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1497 CPU_DoubleU u;
1499 u.d = uint32_to_float64(val, &env->vec_status);
1501 return u.ll;
1504 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1506 CPU_DoubleU u;
1508 u.d = uint64_to_float64(val, &env->vec_status);
1510 return u.ll;
1513 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1515 CPU_DoubleU u;
1517 u.ll = val;
1518 /* NaN are not treated the same way IEEE 754 does */
1519 if (unlikely(float64_is_any_nan(u.d))) {
1520 return 0;
1523 return float64_to_int32(u.d, &env->vec_status);
1526 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1528 CPU_DoubleU u;
1530 u.ll = val;
1531 /* NaN are not treated the same way IEEE 754 does */
1532 if (unlikely(float64_is_any_nan(u.d))) {
1533 return 0;
1536 return float64_to_uint32(u.d, &env->vec_status);
1539 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1541 CPU_DoubleU u;
1543 u.ll = val;
1544 /* NaN are not treated the same way IEEE 754 does */
1545 if (unlikely(float64_is_any_nan(u.d))) {
1546 return 0;
1549 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1552 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1554 CPU_DoubleU u;
1556 u.ll = val;
1557 /* NaN are not treated the same way IEEE 754 does */
1558 if (unlikely(float64_is_any_nan(u.d))) {
1559 return 0;
1562 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1565 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1567 CPU_DoubleU u;
1569 u.ll = val;
1570 /* NaN are not treated the same way IEEE 754 does */
1571 if (unlikely(float64_is_any_nan(u.d))) {
1572 return 0;
1575 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1578 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1580 CPU_DoubleU u;
1582 u.ll = val;
1583 /* NaN are not treated the same way IEEE 754 does */
1584 if (unlikely(float64_is_any_nan(u.d))) {
1585 return 0;
1588 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1591 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1593 CPU_DoubleU u;
1594 float64 tmp;
1596 u.d = int32_to_float64(val, &env->vec_status);
1597 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1598 u.d = float64_div(u.d, tmp, &env->vec_status);
1600 return u.ll;
1603 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1605 CPU_DoubleU u;
1606 float64 tmp;
1608 u.d = uint32_to_float64(val, &env->vec_status);
1609 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1610 u.d = float64_div(u.d, tmp, &env->vec_status);
1612 return u.ll;
1615 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1617 CPU_DoubleU u;
1618 float64 tmp;
1620 u.ll = val;
1621 /* NaN are not treated the same way IEEE 754 does */
1622 if (unlikely(float64_is_any_nan(u.d))) {
1623 return 0;
1625 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1626 u.d = float64_mul(u.d, tmp, &env->vec_status);
1628 return float64_to_int32(u.d, &env->vec_status);
1631 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1633 CPU_DoubleU u;
1634 float64 tmp;
1636 u.ll = val;
1637 /* NaN are not treated the same way IEEE 754 does */
1638 if (unlikely(float64_is_any_nan(u.d))) {
1639 return 0;
1641 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1642 u.d = float64_mul(u.d, tmp, &env->vec_status);
1644 return float64_to_uint32(u.d, &env->vec_status);
1647 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1649 CPU_DoubleU u1;
1650 CPU_FloatU u2;
1652 u1.ll = val;
1653 u2.f = float64_to_float32(u1.d, &env->vec_status);
1655 return u2.l;
1658 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1660 CPU_DoubleU u2;
1661 CPU_FloatU u1;
1663 u1.l = val;
1664 u2.d = float32_to_float64(u1.f, &env->vec_status);
1666 return u2.ll;
1669 /* Double precision fixed-point arithmetic */
1670 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1672 CPU_DoubleU u1, u2;
1674 u1.ll = op1;
1675 u2.ll = op2;
1676 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1677 return u1.ll;
1680 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1682 CPU_DoubleU u1, u2;
1684 u1.ll = op1;
1685 u2.ll = op2;
1686 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1687 return u1.ll;
1690 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1692 CPU_DoubleU u1, u2;
1694 u1.ll = op1;
1695 u2.ll = op2;
1696 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1697 return u1.ll;
1700 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1702 CPU_DoubleU u1, u2;
1704 u1.ll = op1;
1705 u2.ll = op2;
1706 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1707 return u1.ll;
1710 /* Double precision floating point helpers */
1711 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1713 CPU_DoubleU u1, u2;
1715 u1.ll = op1;
1716 u2.ll = op2;
1717 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1720 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1722 CPU_DoubleU u1, u2;
1724 u1.ll = op1;
1725 u2.ll = op2;
1726 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1729 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1731 CPU_DoubleU u1, u2;
1733 u1.ll = op1;
1734 u2.ll = op2;
1735 return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1738 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1740 /* XXX: TODO: test special values (NaN, infinites, ...) */
1741 return helper_efdtstlt(env, op1, op2);
1744 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1746 /* XXX: TODO: test special values (NaN, infinites, ...) */
1747 return helper_efdtstgt(env, op1, op2);
1750 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1752 /* XXX: TODO: test special values (NaN, infinites, ...) */
1753 return helper_efdtsteq(env, op1, op2);
1756 #define float64_to_float64(x, env) x
1759 /* VSX_ADD_SUB - VSX floating point add/subract
1760 * name - instruction mnemonic
1761 * op - operation (add or sub)
1762 * nels - number of elements (1, 2 or 4)
1763 * tp - type (float32 or float64)
1764 * fld - vsr_t field (VsrD(*) or VsrW(*))
1765 * sfprf - set FPRF
1767 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
1768 void helper_##name(CPUPPCState *env, uint32_t opcode) \
1770 ppc_vsr_t xt, xa, xb; \
1771 int i; \
1773 getVSR(xA(opcode), &xa, env); \
1774 getVSR(xB(opcode), &xb, env); \
1775 getVSR(xT(opcode), &xt, env); \
1776 helper_reset_fpstatus(env); \
1778 for (i = 0; i < nels; i++) { \
1779 float_status tstat = env->fp_status; \
1780 set_float_exception_flags(0, &tstat); \
1781 xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
1782 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1784 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1785 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1786 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
1787 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1788 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1789 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1793 if (r2sp) { \
1794 xt.fld = helper_frsp(env, xt.fld); \
1797 if (sfprf) { \
1798 helper_compute_fprf_float64(env, xt.fld); \
1801 putVSR(xT(opcode), &xt, env); \
1802 float_check_status(env); \
1805 VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1806 VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1807 VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1808 VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1809 VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1810 VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1811 VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1812 VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1814 void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
1816 ppc_vsr_t xt, xa, xb;
1817 float_status tstat;
1819 getVSR(rA(opcode) + 32, &xa, env);
1820 getVSR(rB(opcode) + 32, &xb, env);
1821 getVSR(rD(opcode) + 32, &xt, env);
1822 helper_reset_fpstatus(env);
1824 tstat = env->fp_status;
1825 if (unlikely(Rc(opcode) != 0)) {
1826 tstat.float_rounding_mode = float_round_to_odd;
1829 set_float_exception_flags(0, &tstat);
1830 xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
1831 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1833 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1834 if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1835 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
1836 } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1837 float128_is_signaling_nan(xb.f128, &tstat)) {
1838 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1842 helper_compute_fprf_float128(env, xt.f128);
1844 putVSR(rD(opcode) + 32, &xt, env);
1845 float_check_status(env);
1848 /* VSX_MUL - VSX floating point multiply
1849 * op - instruction mnemonic
1850 * nels - number of elements (1, 2 or 4)
1851 * tp - type (float32 or float64)
1852 * fld - vsr_t field (VsrD(*) or VsrW(*))
1853 * sfprf - set FPRF
1855 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
1856 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1858 ppc_vsr_t xt, xa, xb; \
1859 int i; \
1861 getVSR(xA(opcode), &xa, env); \
1862 getVSR(xB(opcode), &xb, env); \
1863 getVSR(xT(opcode), &xt, env); \
1864 helper_reset_fpstatus(env); \
1866 for (i = 0; i < nels; i++) { \
1867 float_status tstat = env->fp_status; \
1868 set_float_exception_flags(0, &tstat); \
1869 xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
1870 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1872 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1873 if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
1874 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
1875 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
1876 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1877 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1878 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1882 if (r2sp) { \
1883 xt.fld = helper_frsp(env, xt.fld); \
1886 if (sfprf) { \
1887 helper_compute_fprf_float64(env, xt.fld); \
1891 putVSR(xT(opcode), &xt, env); \
1892 float_check_status(env); \
1895 VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1896 VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1897 VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1898 VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1900 void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
1902 ppc_vsr_t xt, xa, xb;
1903 float_status tstat;
1905 getVSR(rA(opcode) + 32, &xa, env);
1906 getVSR(rB(opcode) + 32, &xb, env);
1907 getVSR(rD(opcode) + 32, &xt, env);
1909 helper_reset_fpstatus(env);
1910 tstat = env->fp_status;
1911 if (unlikely(Rc(opcode) != 0)) {
1912 tstat.float_rounding_mode = float_round_to_odd;
1915 set_float_exception_flags(0, &tstat);
1916 xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
1917 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1919 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1920 if ((float128_is_infinity(xa.f128) && float128_is_zero(xb.f128)) ||
1921 (float128_is_infinity(xb.f128) && float128_is_zero(xa.f128))) {
1922 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
1923 } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1924 float128_is_signaling_nan(xb.f128, &tstat)) {
1925 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1928 helper_compute_fprf_float128(env, xt.f128);
1930 putVSR(rD(opcode) + 32, &xt, env);
1931 float_check_status(env);
1934 /* VSX_DIV - VSX floating point divide
1935 * op - instruction mnemonic
1936 * nels - number of elements (1, 2 or 4)
1937 * tp - type (float32 or float64)
1938 * fld - vsr_t field (VsrD(*) or VsrW(*))
1939 * sfprf - set FPRF
1941 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
1942 void helper_##op(CPUPPCState *env, uint32_t opcode) \
1944 ppc_vsr_t xt, xa, xb; \
1945 int i; \
1947 getVSR(xA(opcode), &xa, env); \
1948 getVSR(xB(opcode), &xb, env); \
1949 getVSR(xT(opcode), &xt, env); \
1950 helper_reset_fpstatus(env); \
1952 for (i = 0; i < nels; i++) { \
1953 float_status tstat = env->fp_status; \
1954 set_float_exception_flags(0, &tstat); \
1955 xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
1956 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1958 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
1959 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
1960 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
1961 } else if (tp##_is_zero(xa.fld) && \
1962 tp##_is_zero(xb.fld)) { \
1963 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
1964 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
1965 tp##_is_signaling_nan(xb.fld, &tstat)) { \
1966 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
1969 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
1970 float_zero_divide_excp(env, GETPC()); \
1973 if (r2sp) { \
1974 xt.fld = helper_frsp(env, xt.fld); \
1977 if (sfprf) { \
1978 helper_compute_fprf_float64(env, xt.fld); \
1982 putVSR(xT(opcode), &xt, env); \
1983 float_check_status(env); \
1986 VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1987 VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1988 VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1989 VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1991 void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
1993 ppc_vsr_t xt, xa, xb;
1994 float_status tstat;
1996 getVSR(rA(opcode) + 32, &xa, env);
1997 getVSR(rB(opcode) + 32, &xb, env);
1998 getVSR(rD(opcode) + 32, &xt, env);
2000 helper_reset_fpstatus(env);
2001 tstat = env->fp_status;
2002 if (unlikely(Rc(opcode) != 0)) {
2003 tstat.float_rounding_mode = float_round_to_odd;
2006 set_float_exception_flags(0, &tstat);
2007 xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
2008 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2010 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
2011 if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
2012 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
2013 } else if (float128_is_zero(xa.f128) &&
2014 float128_is_zero(xb.f128)) {
2015 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
2016 } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
2017 float128_is_signaling_nan(xb.f128, &tstat)) {
2018 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
2021 if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
2022 float_zero_divide_excp(env, GETPC());
2025 helper_compute_fprf_float128(env, xt.f128);
2026 putVSR(rD(opcode) + 32, &xt, env);
2027 float_check_status(env);
2030 /* VSX_RE - VSX floating point reciprocal estimate
2031 * op - instruction mnemonic
2032 * nels - number of elements (1, 2 or 4)
2033 * tp - type (float32 or float64)
2034 * fld - vsr_t field (VsrD(*) or VsrW(*))
2035 * sfprf - set FPRF
2037 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
2038 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2040 ppc_vsr_t xt, xb; \
2041 int i; \
2043 getVSR(xB(opcode), &xb, env); \
2044 getVSR(xT(opcode), &xt, env); \
2045 helper_reset_fpstatus(env); \
2047 for (i = 0; i < nels; i++) { \
2048 if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2049 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2051 xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
2053 if (r2sp) { \
2054 xt.fld = helper_frsp(env, xt.fld); \
2057 if (sfprf) { \
2058 helper_compute_fprf_float64(env, xt.fld); \
2062 putVSR(xT(opcode), &xt, env); \
2063 float_check_status(env); \
2066 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2067 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2068 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2069 VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2071 /* VSX_SQRT - VSX floating point square root
2072 * op - instruction mnemonic
2073 * nels - number of elements (1, 2 or 4)
2074 * tp - type (float32 or float64)
2075 * fld - vsr_t field (VsrD(*) or VsrW(*))
2076 * sfprf - set FPRF
2078 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
2079 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2081 ppc_vsr_t xt, xb; \
2082 int i; \
2084 getVSR(xB(opcode), &xb, env); \
2085 getVSR(xT(opcode), &xt, env); \
2086 helper_reset_fpstatus(env); \
2088 for (i = 0; i < nels; i++) { \
2089 float_status tstat = env->fp_status; \
2090 set_float_exception_flags(0, &tstat); \
2091 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2092 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2094 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2095 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2096 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2097 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2098 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2102 if (r2sp) { \
2103 xt.fld = helper_frsp(env, xt.fld); \
2106 if (sfprf) { \
2107 helper_compute_fprf_float64(env, xt.fld); \
2111 putVSR(xT(opcode), &xt, env); \
2112 float_check_status(env); \
2115 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2116 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2117 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2118 VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2120 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2121 * op - instruction mnemonic
2122 * nels - number of elements (1, 2 or 4)
2123 * tp - type (float32 or float64)
2124 * fld - vsr_t field (VsrD(*) or VsrW(*))
2125 * sfprf - set FPRF
2127 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
2128 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2130 ppc_vsr_t xt, xb; \
2131 int i; \
2133 getVSR(xB(opcode), &xb, env); \
2134 getVSR(xT(opcode), &xt, env); \
2135 helper_reset_fpstatus(env); \
2137 for (i = 0; i < nels; i++) { \
2138 float_status tstat = env->fp_status; \
2139 set_float_exception_flags(0, &tstat); \
2140 xt.fld = tp##_sqrt(xb.fld, &tstat); \
2141 xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
2142 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2144 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2145 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
2146 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
2147 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
2148 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
2152 if (r2sp) { \
2153 xt.fld = helper_frsp(env, xt.fld); \
2156 if (sfprf) { \
2157 helper_compute_fprf_float64(env, xt.fld); \
2161 putVSR(xT(opcode), &xt, env); \
2162 float_check_status(env); \
2165 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2166 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2167 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2168 VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2170 /* VSX_TDIV - VSX floating point test for divide
2171 * op - instruction mnemonic
2172 * nels - number of elements (1, 2 or 4)
2173 * tp - type (float32 or float64)
2174 * fld - vsr_t field (VsrD(*) or VsrW(*))
2175 * emin - minimum unbiased exponent
2176 * emax - maximum unbiased exponent
2177 * nbits - number of fraction bits
2179 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
2180 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2182 ppc_vsr_t xa, xb; \
2183 int i; \
2184 int fe_flag = 0; \
2185 int fg_flag = 0; \
2187 getVSR(xA(opcode), &xa, env); \
2188 getVSR(xB(opcode), &xb, env); \
2190 for (i = 0; i < nels; i++) { \
2191 if (unlikely(tp##_is_infinity(xa.fld) || \
2192 tp##_is_infinity(xb.fld) || \
2193 tp##_is_zero(xb.fld))) { \
2194 fe_flag = 1; \
2195 fg_flag = 1; \
2196 } else { \
2197 int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
2198 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2200 if (unlikely(tp##_is_any_nan(xa.fld) || \
2201 tp##_is_any_nan(xb.fld))) { \
2202 fe_flag = 1; \
2203 } else if ((e_b <= emin) || (e_b >= (emax-2))) { \
2204 fe_flag = 1; \
2205 } else if (!tp##_is_zero(xa.fld) && \
2206 (((e_a - e_b) >= emax) || \
2207 ((e_a - e_b) <= (emin+1)) || \
2208 (e_a <= (emin+nbits)))) { \
2209 fe_flag = 1; \
2212 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2213 /* XB is not zero because of the above check and */ \
2214 /* so must be denormalized. */ \
2215 fg_flag = 1; \
2220 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2223 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2224 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2225 VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2227 /* VSX_TSQRT - VSX floating point test for square root
2228 * op - instruction mnemonic
2229 * nels - number of elements (1, 2 or 4)
2230 * tp - type (float32 or float64)
2231 * fld - vsr_t field (VsrD(*) or VsrW(*))
2232 * emin - minimum unbiased exponent
2233 * emax - maximum unbiased exponent
2234 * nbits - number of fraction bits
2236 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
2237 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2239 ppc_vsr_t xa, xb; \
2240 int i; \
2241 int fe_flag = 0; \
2242 int fg_flag = 0; \
2244 getVSR(xA(opcode), &xa, env); \
2245 getVSR(xB(opcode), &xb, env); \
2247 for (i = 0; i < nels; i++) { \
2248 if (unlikely(tp##_is_infinity(xb.fld) || \
2249 tp##_is_zero(xb.fld))) { \
2250 fe_flag = 1; \
2251 fg_flag = 1; \
2252 } else { \
2253 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
2255 if (unlikely(tp##_is_any_nan(xb.fld))) { \
2256 fe_flag = 1; \
2257 } else if (unlikely(tp##_is_zero(xb.fld))) { \
2258 fe_flag = 1; \
2259 } else if (unlikely(tp##_is_neg(xb.fld))) { \
2260 fe_flag = 1; \
2261 } else if (!tp##_is_zero(xb.fld) && \
2262 (e_b <= (emin+nbits))) { \
2263 fe_flag = 1; \
2266 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
2267 /* XB is not zero because of the above check and */ \
2268 /* therefore must be denormalized. */ \
2269 fg_flag = 1; \
2274 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2277 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2278 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2279 VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2281 /* VSX_MADD - VSX floating point muliply/add variations
2282 * op - instruction mnemonic
2283 * nels - number of elements (1, 2 or 4)
2284 * tp - type (float32 or float64)
2285 * fld - vsr_t field (VsrD(*) or VsrW(*))
2286 * maddflgs - flags for the float*muladd routine that control the
2287 * various forms (madd, msub, nmadd, nmsub)
2288 * afrm - A form (1=A, 0=M)
2289 * sfprf - set FPRF
2291 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
2292 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2294 ppc_vsr_t xt_in, xa, xb, xt_out; \
2295 ppc_vsr_t *b, *c; \
2296 int i; \
2298 if (afrm) { /* AxB + T */ \
2299 b = &xb; \
2300 c = &xt_in; \
2301 } else { /* AxT + B */ \
2302 b = &xt_in; \
2303 c = &xb; \
2306 getVSR(xA(opcode), &xa, env); \
2307 getVSR(xB(opcode), &xb, env); \
2308 getVSR(xT(opcode), &xt_in, env); \
2310 xt_out = xt_in; \
2312 helper_reset_fpstatus(env); \
2314 for (i = 0; i < nels; i++) { \
2315 float_status tstat = env->fp_status; \
2316 set_float_exception_flags(0, &tstat); \
2317 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2318 /* Avoid double rounding errors by rounding the intermediate */ \
2319 /* result to odd. */ \
2320 set_float_rounding_mode(float_round_to_zero, &tstat); \
2321 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2322 maddflgs, &tstat); \
2323 xt_out.fld |= (get_float_exception_flags(&tstat) & \
2324 float_flag_inexact) != 0; \
2325 } else { \
2326 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
2327 maddflgs, &tstat); \
2329 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2331 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
2332 tp##_maddsub_update_excp(env, xa.fld, b->fld, c->fld, maddflgs); \
2335 if (r2sp) { \
2336 xt_out.fld = helper_frsp(env, xt_out.fld); \
2339 if (sfprf) { \
2340 helper_compute_fprf_float64(env, xt_out.fld); \
2343 putVSR(xT(opcode), &xt_out, env); \
2344 float_check_status(env); \
2347 VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2348 VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2349 VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2350 VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2351 VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2352 VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2353 VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2354 VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2356 VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2357 VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2358 VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2359 VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2360 VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2361 VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2362 VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2363 VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2365 VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2366 VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2367 VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2368 VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2369 VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2370 VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2371 VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2372 VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2374 VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2375 VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2376 VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2377 VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2378 VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2379 VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2380 VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2381 VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2383 /* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2384 * op - instruction mnemonic
2385 * cmp - comparison operation
2386 * exp - expected result of comparison
2387 * svxvc - set VXVC bit
2389 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
2390 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2392 ppc_vsr_t xt, xa, xb; \
2393 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
2395 getVSR(xA(opcode), &xa, env); \
2396 getVSR(xB(opcode), &xb, env); \
2397 getVSR(xT(opcode), &xt, env); \
2399 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2400 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2401 vxsnan_flag = true; \
2402 if (fpscr_ve == 0 && svxvc) { \
2403 vxvc_flag = true; \
2405 } else if (svxvc) { \
2406 vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2407 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
2409 if (vxsnan_flag) { \
2410 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2412 if (vxvc_flag) { \
2413 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2415 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
2417 if (!vex_flag) { \
2418 if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
2419 xt.VsrD(0) = -1; \
2420 xt.VsrD(1) = 0; \
2421 } else { \
2422 xt.VsrD(0) = 0; \
2423 xt.VsrD(1) = 0; \
2426 putVSR(xT(opcode), &xt, env); \
2427 helper_float_check_status(env); \
2430 VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2431 VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2432 VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2433 VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2435 void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
2437 ppc_vsr_t xa, xb;
2438 int64_t exp_a, exp_b;
2439 uint32_t cc;
2441 getVSR(xA(opcode), &xa, env);
2442 getVSR(xB(opcode), &xb, env);
2444 exp_a = extract64(xa.VsrD(0), 52, 11);
2445 exp_b = extract64(xb.VsrD(0), 52, 11);
2447 if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||
2448 float64_is_any_nan(xb.VsrD(0)))) {
2449 cc = CRF_SO;
2450 } else {
2451 if (exp_a < exp_b) {
2452 cc = CRF_LT;
2453 } else if (exp_a > exp_b) {
2454 cc = CRF_GT;
2455 } else {
2456 cc = CRF_EQ;
2460 env->fpscr &= ~(0x0F << FPSCR_FPRF);
2461 env->fpscr |= cc << FPSCR_FPRF;
2462 env->crf[BF(opcode)] = cc;
2464 helper_float_check_status(env);
2467 void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
2469 ppc_vsr_t xa, xb;
2470 int64_t exp_a, exp_b;
2471 uint32_t cc;
2473 getVSR(rA(opcode) + 32, &xa, env);
2474 getVSR(rB(opcode) + 32, &xb, env);
2476 exp_a = extract64(xa.VsrD(0), 48, 15);
2477 exp_b = extract64(xb.VsrD(0), 48, 15);
2479 if (unlikely(float128_is_any_nan(xa.f128) ||
2480 float128_is_any_nan(xb.f128))) {
2481 cc = CRF_SO;
2482 } else {
2483 if (exp_a < exp_b) {
2484 cc = CRF_LT;
2485 } else if (exp_a > exp_b) {
2486 cc = CRF_GT;
2487 } else {
2488 cc = CRF_EQ;
2492 env->fpscr &= ~(0x0F << FPSCR_FPRF);
2493 env->fpscr |= cc << FPSCR_FPRF;
2494 env->crf[BF(opcode)] = cc;
2496 helper_float_check_status(env);
2499 #define VSX_SCALAR_CMP(op, ordered) \
2500 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2502 ppc_vsr_t xa, xb; \
2503 uint32_t cc = 0; \
2504 bool vxsnan_flag = false, vxvc_flag = false; \
2506 helper_reset_fpstatus(env); \
2507 getVSR(xA(opcode), &xa, env); \
2508 getVSR(xB(opcode), &xb, env); \
2510 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2511 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2512 vxsnan_flag = true; \
2513 cc = CRF_SO; \
2514 if (fpscr_ve == 0 && ordered) { \
2515 vxvc_flag = true; \
2517 } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
2518 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) { \
2519 cc = CRF_SO; \
2520 if (ordered) { \
2521 vxvc_flag = true; \
2524 if (vxsnan_flag) { \
2525 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2527 if (vxvc_flag) { \
2528 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2531 if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2532 cc |= CRF_LT; \
2533 } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
2534 cc |= CRF_GT; \
2535 } else { \
2536 cc |= CRF_EQ; \
2539 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2540 env->fpscr |= cc << FPSCR_FPRF; \
2541 env->crf[BF(opcode)] = cc; \
2543 float_check_status(env); \
2546 VSX_SCALAR_CMP(xscmpodp, 1)
2547 VSX_SCALAR_CMP(xscmpudp, 0)
2549 #define VSX_SCALAR_CMPQ(op, ordered) \
2550 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2552 ppc_vsr_t xa, xb; \
2553 uint32_t cc = 0; \
2554 bool vxsnan_flag = false, vxvc_flag = false; \
2556 helper_reset_fpstatus(env); \
2557 getVSR(rA(opcode) + 32, &xa, env); \
2558 getVSR(rB(opcode) + 32, &xb, env); \
2560 if (float128_is_signaling_nan(xa.f128, &env->fp_status) || \
2561 float128_is_signaling_nan(xb.f128, &env->fp_status)) { \
2562 vxsnan_flag = true; \
2563 cc = CRF_SO; \
2564 if (fpscr_ve == 0 && ordered) { \
2565 vxvc_flag = true; \
2567 } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) || \
2568 float128_is_quiet_nan(xb.f128, &env->fp_status)) { \
2569 cc = CRF_SO; \
2570 if (ordered) { \
2571 vxvc_flag = true; \
2574 if (vxsnan_flag) { \
2575 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2577 if (vxvc_flag) { \
2578 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2581 if (float128_lt(xa.f128, xb.f128, &env->fp_status)) { \
2582 cc |= CRF_LT; \
2583 } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) { \
2584 cc |= CRF_GT; \
2585 } else { \
2586 cc |= CRF_EQ; \
2589 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
2590 env->fpscr |= cc << FPSCR_FPRF; \
2591 env->crf[BF(opcode)] = cc; \
2593 float_check_status(env); \
2596 VSX_SCALAR_CMPQ(xscmpoqp, 1)
2597 VSX_SCALAR_CMPQ(xscmpuqp, 0)
2599 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2600 * name - instruction mnemonic
2601 * op - operation (max or min)
2602 * nels - number of elements (1, 2 or 4)
2603 * tp - type (float32 or float64)
2604 * fld - vsr_t field (VsrD(*) or VsrW(*))
2606 #define VSX_MAX_MIN(name, op, nels, tp, fld) \
2607 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2609 ppc_vsr_t xt, xa, xb; \
2610 int i; \
2612 getVSR(xA(opcode), &xa, env); \
2613 getVSR(xB(opcode), &xb, env); \
2614 getVSR(xT(opcode), &xt, env); \
2616 for (i = 0; i < nels; i++) { \
2617 xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
2618 if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2619 tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
2620 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2624 putVSR(xT(opcode), &xt, env); \
2625 float_check_status(env); \
2628 VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2629 VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2630 VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2631 VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2632 VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2633 VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2635 #define VSX_MAX_MINC(name, max) \
2636 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2638 ppc_vsr_t xt, xa, xb; \
2639 bool vxsnan_flag = false, vex_flag = false; \
2641 getVSR(rA(opcode) + 32, &xa, env); \
2642 getVSR(rB(opcode) + 32, &xb, env); \
2643 getVSR(rD(opcode) + 32, &xt, env); \
2645 if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
2646 float64_is_any_nan(xb.VsrD(0)))) { \
2647 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
2648 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2649 vxsnan_flag = true; \
2651 xt.VsrD(0) = xb.VsrD(0); \
2652 } else if ((max && \
2653 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2654 (!max && \
2655 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2656 xt.VsrD(0) = xa.VsrD(0); \
2657 } else { \
2658 xt.VsrD(0) = xb.VsrD(0); \
2661 vex_flag = fpscr_ve & vxsnan_flag; \
2662 if (vxsnan_flag) { \
2663 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2665 if (!vex_flag) { \
2666 putVSR(rD(opcode) + 32, &xt, env); \
2670 VSX_MAX_MINC(xsmaxcdp, 1);
2671 VSX_MAX_MINC(xsmincdp, 0);
2673 #define VSX_MAX_MINJ(name, max) \
2674 void helper_##name(CPUPPCState *env, uint32_t opcode) \
2676 ppc_vsr_t xt, xa, xb; \
2677 bool vxsnan_flag = false, vex_flag = false; \
2679 getVSR(rA(opcode) + 32, &xa, env); \
2680 getVSR(rB(opcode) + 32, &xb, env); \
2681 getVSR(rD(opcode) + 32, &xt, env); \
2683 if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \
2684 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \
2685 vxsnan_flag = true; \
2687 xt.VsrD(0) = xa.VsrD(0); \
2688 } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \
2689 if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
2690 vxsnan_flag = true; \
2692 xt.VsrD(0) = xb.VsrD(0); \
2693 } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \
2694 if (max) { \
2695 if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2696 xt.VsrD(0) = 0ULL; \
2697 } else { \
2698 xt.VsrD(0) = 0x8000000000000000ULL; \
2700 } else { \
2701 if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \
2702 xt.VsrD(0) = 0x8000000000000000ULL; \
2703 } else { \
2704 xt.VsrD(0) = 0ULL; \
2707 } else if ((max && \
2708 !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
2709 (!max && \
2710 float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
2711 xt.VsrD(0) = xa.VsrD(0); \
2712 } else { \
2713 xt.VsrD(0) = xb.VsrD(0); \
2716 vex_flag = fpscr_ve & vxsnan_flag; \
2717 if (vxsnan_flag) { \
2718 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2720 if (!vex_flag) { \
2721 putVSR(rD(opcode) + 32, &xt, env); \
2725 VSX_MAX_MINJ(xsmaxjdp, 1);
2726 VSX_MAX_MINJ(xsminjdp, 0);
2728 /* VSX_CMP - VSX floating point compare
2729 * op - instruction mnemonic
2730 * nels - number of elements (1, 2 or 4)
2731 * tp - type (float32 or float64)
2732 * fld - vsr_t field (VsrD(*) or VsrW(*))
2733 * cmp - comparison operation
2734 * svxvc - set VXVC bit
2735 * exp - expected result of comparison
2737 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
2738 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2740 ppc_vsr_t xt, xa, xb; \
2741 int i; \
2742 int all_true = 1; \
2743 int all_false = 1; \
2745 getVSR(xA(opcode), &xa, env); \
2746 getVSR(xB(opcode), &xb, env); \
2747 getVSR(xT(opcode), &xt, env); \
2749 for (i = 0; i < nels; i++) { \
2750 if (unlikely(tp##_is_any_nan(xa.fld) || \
2751 tp##_is_any_nan(xb.fld))) { \
2752 if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
2753 tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
2754 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2756 if (svxvc) { \
2757 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
2759 xt.fld = 0; \
2760 all_true = 0; \
2761 } else { \
2762 if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
2763 xt.fld = -1; \
2764 all_false = 0; \
2765 } else { \
2766 xt.fld = 0; \
2767 all_true = 0; \
2772 putVSR(xT(opcode), &xt, env); \
2773 if ((opcode >> (31-21)) & 1) { \
2774 env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
2776 float_check_status(env); \
2779 VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2780 VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2781 VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2782 VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2783 VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2784 VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2785 VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2786 VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2788 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2789 * op - instruction mnemonic
2790 * nels - number of elements (1, 2 or 4)
2791 * stp - source type (float32 or float64)
2792 * ttp - target type (float32 or float64)
2793 * sfld - source vsr_t field
2794 * tfld - target vsr_t field (f32 or f64)
2795 * sfprf - set FPRF
2797 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2798 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2800 ppc_vsr_t xt, xb; \
2801 int i; \
2803 getVSR(xB(opcode), &xb, env); \
2804 getVSR(xT(opcode), &xt, env); \
2806 for (i = 0; i < nels; i++) { \
2807 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2808 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2809 &env->fp_status))) { \
2810 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2811 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2813 if (sfprf) { \
2814 helper_compute_fprf_##ttp(env, xt.tfld); \
2818 putVSR(xT(opcode), &xt, env); \
2819 float_check_status(env); \
2822 VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2823 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2824 VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2825 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2827 /* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2828 * op - instruction mnemonic
2829 * nels - number of elements (1, 2 or 4)
2830 * stp - source type (float32 or float64)
2831 * ttp - target type (float32 or float64)
2832 * sfld - source vsr_t field
2833 * tfld - target vsr_t field (f32 or f64)
2834 * sfprf - set FPRF
2836 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
2837 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2839 ppc_vsr_t xt, xb; \
2840 int i; \
2842 getVSR(rB(opcode) + 32, &xb, env); \
2843 getVSR(rD(opcode) + 32, &xt, env); \
2845 for (i = 0; i < nels; i++) { \
2846 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
2847 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2848 &env->fp_status))) { \
2849 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2850 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2852 if (sfprf) { \
2853 helper_compute_fprf_##ttp(env, xt.tfld); \
2857 putVSR(rD(opcode) + 32, &xt, env); \
2858 float_check_status(env); \
2861 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2863 /* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2864 * involving one half precision value
2865 * op - instruction mnemonic
2866 * nels - number of elements (1, 2 or 4)
2867 * stp - source type
2868 * ttp - target type
2869 * sfld - source vsr_t field
2870 * tfld - target vsr_t field
2871 * sfprf - set FPRF
2873 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2874 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2876 ppc_vsr_t xt, xb; \
2877 int i; \
2879 getVSR(xB(opcode), &xb, env); \
2880 memset(&xt, 0, sizeof(xt)); \
2882 for (i = 0; i < nels; i++) { \
2883 xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status); \
2884 if (unlikely(stp##_is_signaling_nan(xb.sfld, \
2885 &env->fp_status))) { \
2886 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2887 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
2889 if (sfprf) { \
2890 helper_compute_fprf_##ttp(env, xt.tfld); \
2894 putVSR(xT(opcode), &xt, env); \
2895 float_check_status(env); \
2898 VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2899 VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2900 VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0)
2901 VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2904 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2905 * added to this later.
2907 void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
2909 ppc_vsr_t xt, xb;
2910 float_status tstat;
2912 getVSR(rB(opcode) + 32, &xb, env);
2913 memset(&xt, 0, sizeof(xt));
2915 tstat = env->fp_status;
2916 if (unlikely(Rc(opcode) != 0)) {
2917 tstat.float_rounding_mode = float_round_to_odd;
2920 xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
2921 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2922 if (unlikely(float128_is_signaling_nan(xb.f128,
2923 &tstat))) {
2924 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
2925 xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
2927 helper_compute_fprf_float64(env, xt.VsrD(0));
2929 putVSR(rD(opcode) + 32, &xt, env);
2930 float_check_status(env);
2933 uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2935 float_status tstat = env->fp_status;
2936 set_float_exception_flags(0, &tstat);
2938 return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2941 uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2943 float_status tstat = env->fp_status;
2944 set_float_exception_flags(0, &tstat);
2946 return float32_to_float64(xb >> 32, &tstat);
2949 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2950 * op - instruction mnemonic
2951 * nels - number of elements (1, 2 or 4)
2952 * stp - source type (float32 or float64)
2953 * ttp - target type (int32, uint32, int64 or uint64)
2954 * sfld - source vsr_t field
2955 * tfld - target vsr_t field
2956 * rnan - resulting NaN
2958 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
2959 void helper_##op(CPUPPCState *env, uint32_t opcode) \
2961 ppc_vsr_t xt, xb; \
2962 int i; \
2964 getVSR(xB(opcode), &xb, env); \
2965 getVSR(xT(opcode), &xt, env); \
2967 for (i = 0; i < nels; i++) { \
2968 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
2969 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
2970 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2972 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2973 xt.tfld = rnan; \
2974 } else { \
2975 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
2976 &env->fp_status); \
2977 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2978 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
2983 putVSR(xT(opcode), &xt, env); \
2984 float_check_status(env); \
2987 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2988 0x8000000000000000ULL)
2989 VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2990 0x80000000U)
2991 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2992 VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2993 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2994 0x8000000000000000ULL)
2995 VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
2996 0x80000000U)
2997 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2998 VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
2999 VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
3000 0x8000000000000000ULL)
3001 VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
3002 VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
3003 VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
3005 /* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
3006 * op - instruction mnemonic
3007 * stp - source type (float32 or float64)
3008 * ttp - target type (int32, uint32, int64 or uint64)
3009 * sfld - source vsr_t field
3010 * tfld - target vsr_t field
3011 * rnan - resulting NaN
3013 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
3014 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3016 ppc_vsr_t xt, xb; \
3018 getVSR(rB(opcode) + 32, &xb, env); \
3019 memset(&xt, 0, sizeof(xt)); \
3021 if (unlikely(stp##_is_any_nan(xb.sfld))) { \
3022 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
3023 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
3025 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
3026 xt.tfld = rnan; \
3027 } else { \
3028 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
3029 &env->fp_status); \
3030 if (env->fp_status.float_exception_flags & float_flag_invalid) { \
3031 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
3035 putVSR(rD(opcode) + 32, &xt, env); \
3036 float_check_status(env); \
3039 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
3040 0x8000000000000000ULL)
3042 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
3043 0xffffffff80000000ULL)
3044 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
3045 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
3047 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3048 * op - instruction mnemonic
3049 * nels - number of elements (1, 2 or 4)
3050 * stp - source type (int32, uint32, int64 or uint64)
3051 * ttp - target type (float32 or float64)
3052 * sfld - source vsr_t field
3053 * tfld - target vsr_t field
3054 * jdef - definition of the j index (i or 2*i)
3055 * sfprf - set FPRF
3057 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
3058 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3060 ppc_vsr_t xt, xb; \
3061 int i; \
3063 getVSR(xB(opcode), &xb, env); \
3064 getVSR(xT(opcode), &xt, env); \
3066 for (i = 0; i < nels; i++) { \
3067 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3068 if (r2sp) { \
3069 xt.tfld = helper_frsp(env, xt.tfld); \
3071 if (sfprf) { \
3072 helper_compute_fprf_float64(env, xt.tfld); \
3076 putVSR(xT(opcode), &xt, env); \
3077 float_check_status(env); \
3080 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3081 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3082 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3083 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3084 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3085 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3086 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
3087 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
3088 VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
3089 VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
3090 VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3091 VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3093 /* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3094 * op - instruction mnemonic
3095 * stp - source type (int32, uint32, int64 or uint64)
3096 * ttp - target type (float32 or float64)
3097 * sfld - source vsr_t field
3098 * tfld - target vsr_t field
3100 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
3101 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3103 ppc_vsr_t xt, xb; \
3105 getVSR(rB(opcode) + 32, &xb, env); \
3106 getVSR(rD(opcode) + 32, &xt, env); \
3108 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
3109 helper_compute_fprf_##ttp(env, xt.tfld); \
3111 putVSR(xT(opcode) + 32, &xt, env); \
3112 float_check_status(env); \
3115 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3116 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3118 /* For "use current rounding mode", define a value that will not be one of
3119 * the existing rounding model enums.
3121 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3122 float_round_up + float_round_to_zero)
3124 /* VSX_ROUND - VSX floating point round
3125 * op - instruction mnemonic
3126 * nels - number of elements (1, 2 or 4)
3127 * tp - type (float32 or float64)
3128 * fld - vsr_t field (VsrD(*) or VsrW(*))
3129 * rmode - rounding mode
3130 * sfprf - set FPRF
3132 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
3133 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3135 ppc_vsr_t xt, xb; \
3136 int i; \
3137 getVSR(xB(opcode), &xb, env); \
3138 getVSR(xT(opcode), &xt, env); \
3140 if (rmode != FLOAT_ROUND_CURRENT) { \
3141 set_float_rounding_mode(rmode, &env->fp_status); \
3144 for (i = 0; i < nels; i++) { \
3145 if (unlikely(tp##_is_signaling_nan(xb.fld, \
3146 &env->fp_status))) { \
3147 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
3148 xt.fld = tp##_snan_to_qnan(xb.fld); \
3149 } else { \
3150 xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
3152 if (sfprf) { \
3153 helper_compute_fprf_float64(env, xt.fld); \
3157 /* If this is not a "use current rounding mode" instruction, \
3158 * then inhibit setting of the XX bit and restore rounding \
3159 * mode from FPSCR */ \
3160 if (rmode != FLOAT_ROUND_CURRENT) { \
3161 fpscr_set_rounding_mode(env); \
3162 env->fp_status.float_exception_flags &= ~float_flag_inexact; \
3165 putVSR(xT(opcode), &xt, env); \
3166 float_check_status(env); \
3169 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3170 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3171 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3172 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3173 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3175 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3176 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3177 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3178 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3179 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3181 VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3182 VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3183 VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3184 VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3185 VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3187 uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3189 helper_reset_fpstatus(env);
3191 uint64_t xt = helper_frsp(env, xb);
3193 helper_compute_fprf_float64(env, xt);
3194 float_check_status(env);
3195 return xt;
3198 #define VSX_XXPERM(op, indexed) \
3199 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3201 ppc_vsr_t xt, xa, pcv, xto; \
3202 int i, idx; \
3204 getVSR(xA(opcode), &xa, env); \
3205 getVSR(xT(opcode), &xt, env); \
3206 getVSR(xB(opcode), &pcv, env); \
3208 for (i = 0; i < 16; i++) { \
3209 idx = pcv.VsrB(i) & 0x1F; \
3210 if (indexed) { \
3211 idx = 31 - idx; \
3213 xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3215 putVSR(xT(opcode), &xto, env); \
3218 VSX_XXPERM(xxperm, 0)
3219 VSX_XXPERM(xxpermr, 1)
3221 void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
3223 ppc_vsr_t xt, xb;
3224 uint32_t exp, i, fraction;
3226 getVSR(xB(opcode), &xb, env);
3227 memset(&xt, 0, sizeof(xt));
3229 for (i = 0; i < 4; i++) {
3230 exp = (xb.VsrW(i) >> 23) & 0xFF;
3231 fraction = xb.VsrW(i) & 0x7FFFFF;
3232 if (exp != 0 && exp != 255) {
3233 xt.VsrW(i) = fraction | 0x00800000;
3234 } else {
3235 xt.VsrW(i) = fraction;
3238 putVSR(xT(opcode), &xt, env);
3241 /* VSX_TEST_DC - VSX floating point test data class
3242 * op - instruction mnemonic
3243 * nels - number of elements (1, 2 or 4)
3244 * xbn - VSR register number
3245 * tp - type (float32 or float64)
3246 * fld - vsr_t field (VsrD(*) or VsrW(*))
3247 * tfld - target vsr_t field (VsrD(*) or VsrW(*))
3248 * fld_max - target field max
3249 * scrf - set result in CR and FPCC
3251 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
3252 void helper_##op(CPUPPCState *env, uint32_t opcode) \
3254 ppc_vsr_t xt, xb; \
3255 uint32_t i, sign, dcmx; \
3256 uint32_t cc, match = 0; \
3258 getVSR(xbn, &xb, env); \
3259 if (!scrf) { \
3260 memset(&xt, 0, sizeof(xt)); \
3261 dcmx = DCMX_XV(opcode); \
3262 } else { \
3263 dcmx = DCMX(opcode); \
3266 for (i = 0; i < nels; i++) { \
3267 sign = tp##_is_neg(xb.fld); \
3268 if (tp##_is_any_nan(xb.fld)) { \
3269 match = extract32(dcmx, 6, 1); \
3270 } else if (tp##_is_infinity(xb.fld)) { \
3271 match = extract32(dcmx, 4 + !sign, 1); \
3272 } else if (tp##_is_zero(xb.fld)) { \
3273 match = extract32(dcmx, 2 + !sign, 1); \
3274 } else if (tp##_is_zero_or_denormal(xb.fld)) { \
3275 match = extract32(dcmx, 0 + !sign, 1); \
3278 if (scrf) { \
3279 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
3280 env->fpscr &= ~(0x0F << FPSCR_FPRF); \
3281 env->fpscr |= cc << FPSCR_FPRF; \
3282 env->crf[BF(opcode)] = cc; \
3283 } else { \
3284 xt.tfld = match ? fld_max : 0; \
3286 match = 0; \
3288 if (!scrf) { \
3289 putVSR(xT(opcode), &xt, env); \
3293 VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3294 VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3295 VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3296 VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3298 void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
3300 ppc_vsr_t xb;
3301 uint32_t dcmx, sign, exp;
3302 uint32_t cc, match = 0, not_sp = 0;
3304 getVSR(xB(opcode), &xb, env);
3305 dcmx = DCMX(opcode);
3306 exp = (xb.VsrD(0) >> 52) & 0x7FF;
3308 sign = float64_is_neg(xb.VsrD(0));
3309 if (float64_is_any_nan(xb.VsrD(0))) {
3310 match = extract32(dcmx, 6, 1);
3311 } else if (float64_is_infinity(xb.VsrD(0))) {
3312 match = extract32(dcmx, 4 + !sign, 1);
3313 } else if (float64_is_zero(xb.VsrD(0))) {
3314 match = extract32(dcmx, 2 + !sign, 1);
3315 } else if (float64_is_zero_or_denormal(xb.VsrD(0)) ||
3316 (exp > 0 && exp < 0x381)) {
3317 match = extract32(dcmx, 0 + !sign, 1);
3320 not_sp = !float64_eq(xb.VsrD(0),
3321 float32_to_float64(
3322 float64_to_float32(xb.VsrD(0), &env->fp_status),
3323 &env->fp_status), &env->fp_status);
3325 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3326 env->fpscr &= ~(0x0F << FPSCR_FPRF);
3327 env->fpscr |= cc << FPSCR_FPRF;
3328 env->crf[BF(opcode)] = cc;
3331 void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
3333 ppc_vsr_t xb;
3334 ppc_vsr_t xt;
3335 uint8_t r = Rrm(opcode);
3336 uint8_t ex = Rc(opcode);
3337 uint8_t rmc = RMC(opcode);
3338 uint8_t rmode = 0;
3339 float_status tstat;
3341 getVSR(rB(opcode) + 32, &xb, env);
3342 memset(&xt, 0, sizeof(xt));
3343 helper_reset_fpstatus(env);
3345 if (r == 0 && rmc == 0) {
3346 rmode = float_round_ties_away;
3347 } else if (r == 0 && rmc == 0x3) {
3348 rmode = fpscr_rn;
3349 } else if (r == 1) {
3350 switch (rmc) {
3351 case 0:
3352 rmode = float_round_nearest_even;
3353 break;
3354 case 1:
3355 rmode = float_round_to_zero;
3356 break;
3357 case 2:
3358 rmode = float_round_up;
3359 break;
3360 case 3:
3361 rmode = float_round_down;
3362 break;
3363 default:
3364 abort();
3368 tstat = env->fp_status;
3369 set_float_exception_flags(0, &tstat);
3370 set_float_rounding_mode(rmode, &tstat);
3371 xt.f128 = float128_round_to_int(xb.f128, &tstat);
3372 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3374 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3375 if (float128_is_signaling_nan(xb.f128, &tstat)) {
3376 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3377 xt.f128 = float128_snan_to_qnan(xt.f128);
3381 if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3382 env->fp_status.float_exception_flags &= ~float_flag_inexact;
3385 helper_compute_fprf_float128(env, xt.f128);
3386 float_check_status(env);
3387 putVSR(rD(opcode) + 32, &xt, env);
3390 void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
3392 ppc_vsr_t xb;
3393 ppc_vsr_t xt;
3394 uint8_t r = Rrm(opcode);
3395 uint8_t rmc = RMC(opcode);
3396 uint8_t rmode = 0;
3397 floatx80 round_res;
3398 float_status tstat;
3400 getVSR(rB(opcode) + 32, &xb, env);
3401 memset(&xt, 0, sizeof(xt));
3402 helper_reset_fpstatus(env);
3404 if (r == 0 && rmc == 0) {
3405 rmode = float_round_ties_away;
3406 } else if (r == 0 && rmc == 0x3) {
3407 rmode = fpscr_rn;
3408 } else if (r == 1) {
3409 switch (rmc) {
3410 case 0:
3411 rmode = float_round_nearest_even;
3412 break;
3413 case 1:
3414 rmode = float_round_to_zero;
3415 break;
3416 case 2:
3417 rmode = float_round_up;
3418 break;
3419 case 3:
3420 rmode = float_round_down;
3421 break;
3422 default:
3423 abort();
3427 tstat = env->fp_status;
3428 set_float_exception_flags(0, &tstat);
3429 set_float_rounding_mode(rmode, &tstat);
3430 round_res = float128_to_floatx80(xb.f128, &tstat);
3431 xt.f128 = floatx80_to_float128(round_res, &tstat);
3432 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3434 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3435 if (float128_is_signaling_nan(xb.f128, &tstat)) {
3436 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3437 xt.f128 = float128_snan_to_qnan(xt.f128);
3441 helper_compute_fprf_float128(env, xt.f128);
3442 putVSR(rD(opcode) + 32, &xt, env);
3443 float_check_status(env);
3446 void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
3448 ppc_vsr_t xb;
3449 ppc_vsr_t xt;
3450 float_status tstat;
3452 getVSR(rB(opcode) + 32, &xb, env);
3453 memset(&xt, 0, sizeof(xt));
3454 helper_reset_fpstatus(env);
3456 tstat = env->fp_status;
3457 if (unlikely(Rc(opcode) != 0)) {
3458 tstat.float_rounding_mode = float_round_to_odd;
3461 set_float_exception_flags(0, &tstat);
3462 xt.f128 = float128_sqrt(xb.f128, &tstat);
3463 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3465 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3466 if (float128_is_signaling_nan(xb.f128, &tstat)) {
3467 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3468 xt.f128 = float128_snan_to_qnan(xb.f128);
3469 } else if (float128_is_quiet_nan(xb.f128, &tstat)) {
3470 xt.f128 = xb.f128;
3471 } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
3472 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
3473 xt.f128 = float128_default_nan(&env->fp_status);
3477 helper_compute_fprf_float128(env, xt.f128);
3478 putVSR(rD(opcode) + 32, &xt, env);
3479 float_check_status(env);
3482 void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
3484 ppc_vsr_t xt, xa, xb;
3485 float_status tstat;
3487 getVSR(rA(opcode) + 32, &xa, env);
3488 getVSR(rB(opcode) + 32, &xb, env);
3489 getVSR(rD(opcode) + 32, &xt, env);
3490 helper_reset_fpstatus(env);
3492 tstat = env->fp_status;
3493 if (unlikely(Rc(opcode) != 0)) {
3494 tstat.float_rounding_mode = float_round_to_odd;
3497 set_float_exception_flags(0, &tstat);
3498 xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
3499 env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3501 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3502 if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
3503 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
3504 } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
3505 float128_is_signaling_nan(xb.f128, &tstat)) {
3506 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3510 helper_compute_fprf_float128(env, xt.f128);
3511 putVSR(rD(opcode) + 32, &xt, env);
3512 float_check_status(env);