Implement missing parts of the logic for the POWER PURR
[qemu-kvm.git] / target-ppc / op_helper.c
blobaa2e8ba415857edf973bfe22b27ed51b75a31068
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <string.h>
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "helper.h"
24 #include "helper_regs.h"
26 //#define DEBUG_OP
27 //#define DEBUG_EXCEPTIONS
28 //#define DEBUG_SOFTWARE_TLB
30 #ifdef DEBUG_SOFTWARE_TLB
31 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32 #else
33 # define LOG_SWTLB(...) do { } while (0)
34 #endif
37 /*****************************************************************************/
38 /* Exceptions processing helpers */
40 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
42 #if 0
43 printf("Raise exception %3x code : %d\n", exception, error_code);
44 #endif
45 env->exception_index = exception;
46 env->error_code = error_code;
47 cpu_loop_exit();
50 void helper_raise_exception (uint32_t exception)
52 helper_raise_exception_err(exception, 0);
55 /*****************************************************************************/
56 /* SPR accesses */
57 void helper_load_dump_spr (uint32_t sprn)
59 qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60 env->spr[sprn]);
63 void helper_store_dump_spr (uint32_t sprn)
65 qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66 env->spr[sprn]);
69 target_ulong helper_load_tbl (void)
71 return (target_ulong)cpu_ppc_load_tbl(env);
74 target_ulong helper_load_tbu (void)
76 return cpu_ppc_load_tbu(env);
79 target_ulong helper_load_atbl (void)
81 return (target_ulong)cpu_ppc_load_atbl(env);
84 target_ulong helper_load_atbu (void)
86 return cpu_ppc_load_atbu(env);
89 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
90 target_ulong helper_load_purr (void)
92 return (target_ulong)cpu_ppc_load_purr(env);
94 #endif
96 target_ulong helper_load_601_rtcl (void)
98 return cpu_ppc601_load_rtcl(env);
101 target_ulong helper_load_601_rtcu (void)
103 return cpu_ppc601_load_rtcu(env);
106 #if !defined(CONFIG_USER_ONLY)
107 #if defined (TARGET_PPC64)
108 void helper_store_asr (target_ulong val)
110 ppc_store_asr(env, val);
112 #endif
114 void helper_store_sdr1 (target_ulong val)
116 ppc_store_sdr1(env, val);
119 void helper_store_tbl (target_ulong val)
121 cpu_ppc_store_tbl(env, val);
124 void helper_store_tbu (target_ulong val)
126 cpu_ppc_store_tbu(env, val);
129 void helper_store_atbl (target_ulong val)
131 cpu_ppc_store_atbl(env, val);
134 void helper_store_atbu (target_ulong val)
136 cpu_ppc_store_atbu(env, val);
139 void helper_store_601_rtcl (target_ulong val)
141 cpu_ppc601_store_rtcl(env, val);
144 void helper_store_601_rtcu (target_ulong val)
146 cpu_ppc601_store_rtcu(env, val);
149 target_ulong helper_load_decr (void)
151 return cpu_ppc_load_decr(env);
154 void helper_store_decr (target_ulong val)
156 cpu_ppc_store_decr(env, val);
159 void helper_store_hid0_601 (target_ulong val)
161 target_ulong hid0;
163 hid0 = env->spr[SPR_HID0];
164 if ((val ^ hid0) & 0x00000008) {
165 /* Change current endianness */
166 env->hflags &= ~(1 << MSR_LE);
167 env->hflags_nmsr &= ~(1 << MSR_LE);
168 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
169 env->hflags |= env->hflags_nmsr;
170 qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
171 val & 0x8 ? 'l' : 'b', env->hflags);
173 env->spr[SPR_HID0] = (uint32_t)val;
176 void helper_store_403_pbr (uint32_t num, target_ulong value)
178 if (likely(env->pb[num] != value)) {
179 env->pb[num] = value;
180 /* Should be optimized */
181 tlb_flush(env, 1);
185 target_ulong helper_load_40x_pit (void)
187 return load_40x_pit(env);
190 void helper_store_40x_pit (target_ulong val)
192 store_40x_pit(env, val);
195 void helper_store_40x_dbcr0 (target_ulong val)
197 store_40x_dbcr0(env, val);
200 void helper_store_40x_sler (target_ulong val)
202 store_40x_sler(env, val);
205 void helper_store_booke_tcr (target_ulong val)
207 store_booke_tcr(env, val);
210 void helper_store_booke_tsr (target_ulong val)
212 store_booke_tsr(env, val);
215 void helper_store_ibatu (uint32_t nr, target_ulong val)
217 ppc_store_ibatu(env, nr, val);
220 void helper_store_ibatl (uint32_t nr, target_ulong val)
222 ppc_store_ibatl(env, nr, val);
225 void helper_store_dbatu (uint32_t nr, target_ulong val)
227 ppc_store_dbatu(env, nr, val);
230 void helper_store_dbatl (uint32_t nr, target_ulong val)
232 ppc_store_dbatl(env, nr, val);
235 void helper_store_601_batl (uint32_t nr, target_ulong val)
237 ppc_store_ibatl_601(env, nr, val);
240 void helper_store_601_batu (uint32_t nr, target_ulong val)
242 ppc_store_ibatu_601(env, nr, val);
244 #endif
246 /*****************************************************************************/
247 /* Memory load and stores */
249 static inline target_ulong addr_add(target_ulong addr, target_long arg)
251 #if defined(TARGET_PPC64)
252 if (!msr_sf)
253 return (uint32_t)(addr + arg);
254 else
255 #endif
256 return addr + arg;
259 void helper_lmw (target_ulong addr, uint32_t reg)
261 for (; reg < 32; reg++) {
262 if (msr_le)
263 env->gpr[reg] = bswap32(ldl(addr));
264 else
265 env->gpr[reg] = ldl(addr);
266 addr = addr_add(addr, 4);
270 void helper_stmw (target_ulong addr, uint32_t reg)
272 for (; reg < 32; reg++) {
273 if (msr_le)
274 stl(addr, bswap32((uint32_t)env->gpr[reg]));
275 else
276 stl(addr, (uint32_t)env->gpr[reg]);
277 addr = addr_add(addr, 4);
281 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
283 int sh;
284 for (; nb > 3; nb -= 4) {
285 env->gpr[reg] = ldl(addr);
286 reg = (reg + 1) % 32;
287 addr = addr_add(addr, 4);
289 if (unlikely(nb > 0)) {
290 env->gpr[reg] = 0;
291 for (sh = 24; nb > 0; nb--, sh -= 8) {
292 env->gpr[reg] |= ldub(addr) << sh;
293 addr = addr_add(addr, 1);
297 /* PPC32 specification says we must generate an exception if
298 * rA is in the range of registers to be loaded.
299 * In an other hand, IBM says this is valid, but rA won't be loaded.
300 * For now, I'll follow the spec...
302 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
304 if (likely(xer_bc != 0)) {
305 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
306 (reg < rb && (reg + xer_bc) > rb))) {
307 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
308 POWERPC_EXCP_INVAL |
309 POWERPC_EXCP_INVAL_LSWX);
310 } else {
311 helper_lsw(addr, xer_bc, reg);
316 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
318 int sh;
319 for (; nb > 3; nb -= 4) {
320 stl(addr, env->gpr[reg]);
321 reg = (reg + 1) % 32;
322 addr = addr_add(addr, 4);
324 if (unlikely(nb > 0)) {
325 for (sh = 24; nb > 0; nb--, sh -= 8) {
326 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
327 addr = addr_add(addr, 1);
332 static void do_dcbz(target_ulong addr, int dcache_line_size)
334 addr &= ~(dcache_line_size - 1);
335 int i;
336 for (i = 0 ; i < dcache_line_size ; i += 4) {
337 stl(addr + i , 0);
339 if (env->reserve_addr == addr)
340 env->reserve_addr = (target_ulong)-1ULL;
343 void helper_dcbz(target_ulong addr)
345 do_dcbz(addr, env->dcache_line_size);
348 void helper_dcbz_970(target_ulong addr)
350 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
351 do_dcbz(addr, 32);
352 else
353 do_dcbz(addr, env->dcache_line_size);
356 void helper_icbi(target_ulong addr)
358 addr &= ~(env->dcache_line_size - 1);
359 /* Invalidate one cache line :
360 * PowerPC specification says this is to be treated like a load
361 * (not a fetch) by the MMU. To be sure it will be so,
362 * do the load "by hand".
364 ldl(addr);
365 tb_invalidate_page_range(addr, addr + env->icache_line_size);
368 // XXX: to be tested
369 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
371 int i, c, d;
372 d = 24;
373 for (i = 0; i < xer_bc; i++) {
374 c = ldub(addr);
375 addr = addr_add(addr, 1);
376 /* ra (if not 0) and rb are never modified */
377 if (likely(reg != rb && (ra == 0 || reg != ra))) {
378 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
380 if (unlikely(c == xer_cmp))
381 break;
382 if (likely(d != 0)) {
383 d -= 8;
384 } else {
385 d = 24;
386 reg++;
387 reg = reg & 0x1F;
390 return i;
393 /*****************************************************************************/
394 /* Fixed point operations helpers */
395 #if defined(TARGET_PPC64)
397 /* multiply high word */
398 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
400 uint64_t tl, th;
402 muls64(&tl, &th, arg1, arg2);
403 return th;
406 /* multiply high word unsigned */
407 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
409 uint64_t tl, th;
411 mulu64(&tl, &th, arg1, arg2);
412 return th;
415 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
417 int64_t th;
418 uint64_t tl;
420 muls64(&tl, (uint64_t *)&th, arg1, arg2);
421 /* If th != 0 && th != -1, then we had an overflow */
422 if (likely((uint64_t)(th + 1) <= 1)) {
423 env->xer &= ~(1 << XER_OV);
424 } else {
425 env->xer |= (1 << XER_OV) | (1 << XER_SO);
427 return (int64_t)tl;
429 #endif
431 target_ulong helper_cntlzw (target_ulong t)
433 return clz32(t);
436 #if defined(TARGET_PPC64)
437 target_ulong helper_cntlzd (target_ulong t)
439 return clz64(t);
441 #endif
443 /* shift right arithmetic helper */
444 target_ulong helper_sraw (target_ulong value, target_ulong shift)
446 int32_t ret;
448 if (likely(!(shift & 0x20))) {
449 if (likely((uint32_t)shift != 0)) {
450 shift &= 0x1f;
451 ret = (int32_t)value >> shift;
452 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
453 env->xer &= ~(1 << XER_CA);
454 } else {
455 env->xer |= (1 << XER_CA);
457 } else {
458 ret = (int32_t)value;
459 env->xer &= ~(1 << XER_CA);
461 } else {
462 ret = (int32_t)value >> 31;
463 if (ret) {
464 env->xer |= (1 << XER_CA);
465 } else {
466 env->xer &= ~(1 << XER_CA);
469 return (target_long)ret;
472 #if defined(TARGET_PPC64)
473 target_ulong helper_srad (target_ulong value, target_ulong shift)
475 int64_t ret;
477 if (likely(!(shift & 0x40))) {
478 if (likely((uint64_t)shift != 0)) {
479 shift &= 0x3f;
480 ret = (int64_t)value >> shift;
481 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
482 env->xer &= ~(1 << XER_CA);
483 } else {
484 env->xer |= (1 << XER_CA);
486 } else {
487 ret = (int64_t)value;
488 env->xer &= ~(1 << XER_CA);
490 } else {
491 ret = (int64_t)value >> 63;
492 if (ret) {
493 env->xer |= (1 << XER_CA);
494 } else {
495 env->xer &= ~(1 << XER_CA);
498 return ret;
500 #endif
502 target_ulong helper_popcntb (target_ulong val)
504 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
505 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
506 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
507 return val;
510 #if defined(TARGET_PPC64)
511 target_ulong helper_popcntb_64 (target_ulong val)
513 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
514 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
515 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
516 return val;
518 #endif
520 /*****************************************************************************/
521 /* Floating point operations helpers */
522 uint64_t helper_float32_to_float64(uint32_t arg)
524 CPU_FloatU f;
525 CPU_DoubleU d;
526 f.l = arg;
527 d.d = float32_to_float64(f.f, &env->fp_status);
528 return d.ll;
531 uint32_t helper_float64_to_float32(uint64_t arg)
533 CPU_FloatU f;
534 CPU_DoubleU d;
535 d.ll = arg;
536 f.f = float64_to_float32(d.d, &env->fp_status);
537 return f.l;
540 static inline int isden(float64 d)
542 CPU_DoubleU u;
544 u.d = d;
546 return ((u.ll >> 52) & 0x7FF) == 0;
549 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
551 CPU_DoubleU farg;
552 int isneg;
553 int ret;
554 farg.ll = arg;
555 isneg = float64_is_neg(farg.d);
556 if (unlikely(float64_is_any_nan(farg.d))) {
557 if (float64_is_signaling_nan(farg.d)) {
558 /* Signaling NaN: flags are undefined */
559 ret = 0x00;
560 } else {
561 /* Quiet NaN */
562 ret = 0x11;
564 } else if (unlikely(float64_is_infinity(farg.d))) {
565 /* +/- infinity */
566 if (isneg)
567 ret = 0x09;
568 else
569 ret = 0x05;
570 } else {
571 if (float64_is_zero(farg.d)) {
572 /* +/- zero */
573 if (isneg)
574 ret = 0x12;
575 else
576 ret = 0x02;
577 } else {
578 if (isden(farg.d)) {
579 /* Denormalized numbers */
580 ret = 0x10;
581 } else {
582 /* Normalized numbers */
583 ret = 0x00;
585 if (isneg) {
586 ret |= 0x08;
587 } else {
588 ret |= 0x04;
592 if (set_fprf) {
593 /* We update FPSCR_FPRF */
594 env->fpscr &= ~(0x1F << FPSCR_FPRF);
595 env->fpscr |= ret << FPSCR_FPRF;
597 /* We just need fpcc to update Rc1 */
598 return ret & 0xF;
601 /* Floating-point invalid operations exception */
602 static inline uint64_t fload_invalid_op_excp(int op)
604 uint64_t ret = 0;
605 int ve;
607 ve = fpscr_ve;
608 switch (op) {
609 case POWERPC_EXCP_FP_VXSNAN:
610 env->fpscr |= 1 << FPSCR_VXSNAN;
611 break;
612 case POWERPC_EXCP_FP_VXSOFT:
613 env->fpscr |= 1 << FPSCR_VXSOFT;
614 break;
615 case POWERPC_EXCP_FP_VXISI:
616 /* Magnitude subtraction of infinities */
617 env->fpscr |= 1 << FPSCR_VXISI;
618 goto update_arith;
619 case POWERPC_EXCP_FP_VXIDI:
620 /* Division of infinity by infinity */
621 env->fpscr |= 1 << FPSCR_VXIDI;
622 goto update_arith;
623 case POWERPC_EXCP_FP_VXZDZ:
624 /* Division of zero by zero */
625 env->fpscr |= 1 << FPSCR_VXZDZ;
626 goto update_arith;
627 case POWERPC_EXCP_FP_VXIMZ:
628 /* Multiplication of zero by infinity */
629 env->fpscr |= 1 << FPSCR_VXIMZ;
630 goto update_arith;
631 case POWERPC_EXCP_FP_VXVC:
632 /* Ordered comparison of NaN */
633 env->fpscr |= 1 << FPSCR_VXVC;
634 env->fpscr &= ~(0xF << FPSCR_FPCC);
635 env->fpscr |= 0x11 << FPSCR_FPCC;
636 /* We must update the target FPR before raising the exception */
637 if (ve != 0) {
638 env->exception_index = POWERPC_EXCP_PROGRAM;
639 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
640 /* Update the floating-point enabled exception summary */
641 env->fpscr |= 1 << FPSCR_FEX;
642 /* Exception is differed */
643 ve = 0;
645 break;
646 case POWERPC_EXCP_FP_VXSQRT:
647 /* Square root of a negative number */
648 env->fpscr |= 1 << FPSCR_VXSQRT;
649 update_arith:
650 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
651 if (ve == 0) {
652 /* Set the result to quiet NaN */
653 ret = 0x7FF8000000000000ULL;
654 env->fpscr &= ~(0xF << FPSCR_FPCC);
655 env->fpscr |= 0x11 << FPSCR_FPCC;
657 break;
658 case POWERPC_EXCP_FP_VXCVI:
659 /* Invalid conversion */
660 env->fpscr |= 1 << FPSCR_VXCVI;
661 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
662 if (ve == 0) {
663 /* Set the result to quiet NaN */
664 ret = 0x7FF8000000000000ULL;
665 env->fpscr &= ~(0xF << FPSCR_FPCC);
666 env->fpscr |= 0x11 << FPSCR_FPCC;
668 break;
670 /* Update the floating-point invalid operation summary */
671 env->fpscr |= 1 << FPSCR_VX;
672 /* Update the floating-point exception summary */
673 env->fpscr |= 1 << FPSCR_FX;
674 if (ve != 0) {
675 /* Update the floating-point enabled exception summary */
676 env->fpscr |= 1 << FPSCR_FEX;
677 if (msr_fe0 != 0 || msr_fe1 != 0)
678 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
680 return ret;
683 static inline void float_zero_divide_excp(void)
685 env->fpscr |= 1 << FPSCR_ZX;
686 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
687 /* Update the floating-point exception summary */
688 env->fpscr |= 1 << FPSCR_FX;
689 if (fpscr_ze != 0) {
690 /* Update the floating-point enabled exception summary */
691 env->fpscr |= 1 << FPSCR_FEX;
692 if (msr_fe0 != 0 || msr_fe1 != 0) {
693 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
694 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
699 static inline void float_overflow_excp(void)
701 env->fpscr |= 1 << FPSCR_OX;
702 /* Update the floating-point exception summary */
703 env->fpscr |= 1 << FPSCR_FX;
704 if (fpscr_oe != 0) {
705 /* XXX: should adjust the result */
706 /* Update the floating-point enabled exception summary */
707 env->fpscr |= 1 << FPSCR_FEX;
708 /* We must update the target FPR before raising the exception */
709 env->exception_index = POWERPC_EXCP_PROGRAM;
710 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
711 } else {
712 env->fpscr |= 1 << FPSCR_XX;
713 env->fpscr |= 1 << FPSCR_FI;
717 static inline void float_underflow_excp(void)
719 env->fpscr |= 1 << FPSCR_UX;
720 /* Update the floating-point exception summary */
721 env->fpscr |= 1 << FPSCR_FX;
722 if (fpscr_ue != 0) {
723 /* XXX: should adjust the result */
724 /* Update the floating-point enabled exception summary */
725 env->fpscr |= 1 << FPSCR_FEX;
726 /* We must update the target FPR before raising the exception */
727 env->exception_index = POWERPC_EXCP_PROGRAM;
728 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
732 static inline void float_inexact_excp(void)
734 env->fpscr |= 1 << FPSCR_XX;
735 /* Update the floating-point exception summary */
736 env->fpscr |= 1 << FPSCR_FX;
737 if (fpscr_xe != 0) {
738 /* Update the floating-point enabled exception summary */
739 env->fpscr |= 1 << FPSCR_FEX;
740 /* We must update the target FPR before raising the exception */
741 env->exception_index = POWERPC_EXCP_PROGRAM;
742 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
746 static inline void fpscr_set_rounding_mode(void)
748 int rnd_type;
750 /* Set rounding mode */
751 switch (fpscr_rn) {
752 case 0:
753 /* Best approximation (round to nearest) */
754 rnd_type = float_round_nearest_even;
755 break;
756 case 1:
757 /* Smaller magnitude (round toward zero) */
758 rnd_type = float_round_to_zero;
759 break;
760 case 2:
761 /* Round toward +infinite */
762 rnd_type = float_round_up;
763 break;
764 default:
765 case 3:
766 /* Round toward -infinite */
767 rnd_type = float_round_down;
768 break;
770 set_float_rounding_mode(rnd_type, &env->fp_status);
773 void helper_fpscr_clrbit (uint32_t bit)
775 int prev;
777 prev = (env->fpscr >> bit) & 1;
778 env->fpscr &= ~(1 << bit);
779 if (prev == 1) {
780 switch (bit) {
781 case FPSCR_RN1:
782 case FPSCR_RN:
783 fpscr_set_rounding_mode();
784 break;
785 default:
786 break;
791 void helper_fpscr_setbit (uint32_t bit)
793 int prev;
795 prev = (env->fpscr >> bit) & 1;
796 env->fpscr |= 1 << bit;
797 if (prev == 0) {
798 switch (bit) {
799 case FPSCR_VX:
800 env->fpscr |= 1 << FPSCR_FX;
801 if (fpscr_ve)
802 goto raise_ve;
803 case FPSCR_OX:
804 env->fpscr |= 1 << FPSCR_FX;
805 if (fpscr_oe)
806 goto raise_oe;
807 break;
808 case FPSCR_UX:
809 env->fpscr |= 1 << FPSCR_FX;
810 if (fpscr_ue)
811 goto raise_ue;
812 break;
813 case FPSCR_ZX:
814 env->fpscr |= 1 << FPSCR_FX;
815 if (fpscr_ze)
816 goto raise_ze;
817 break;
818 case FPSCR_XX:
819 env->fpscr |= 1 << FPSCR_FX;
820 if (fpscr_xe)
821 goto raise_xe;
822 break;
823 case FPSCR_VXSNAN:
824 case FPSCR_VXISI:
825 case FPSCR_VXIDI:
826 case FPSCR_VXZDZ:
827 case FPSCR_VXIMZ:
828 case FPSCR_VXVC:
829 case FPSCR_VXSOFT:
830 case FPSCR_VXSQRT:
831 case FPSCR_VXCVI:
832 env->fpscr |= 1 << FPSCR_VX;
833 env->fpscr |= 1 << FPSCR_FX;
834 if (fpscr_ve != 0)
835 goto raise_ve;
836 break;
837 case FPSCR_VE:
838 if (fpscr_vx != 0) {
839 raise_ve:
840 env->error_code = POWERPC_EXCP_FP;
841 if (fpscr_vxsnan)
842 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
843 if (fpscr_vxisi)
844 env->error_code |= POWERPC_EXCP_FP_VXISI;
845 if (fpscr_vxidi)
846 env->error_code |= POWERPC_EXCP_FP_VXIDI;
847 if (fpscr_vxzdz)
848 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
849 if (fpscr_vximz)
850 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
851 if (fpscr_vxvc)
852 env->error_code |= POWERPC_EXCP_FP_VXVC;
853 if (fpscr_vxsoft)
854 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
855 if (fpscr_vxsqrt)
856 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
857 if (fpscr_vxcvi)
858 env->error_code |= POWERPC_EXCP_FP_VXCVI;
859 goto raise_excp;
861 break;
862 case FPSCR_OE:
863 if (fpscr_ox != 0) {
864 raise_oe:
865 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
866 goto raise_excp;
868 break;
869 case FPSCR_UE:
870 if (fpscr_ux != 0) {
871 raise_ue:
872 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
873 goto raise_excp;
875 break;
876 case FPSCR_ZE:
877 if (fpscr_zx != 0) {
878 raise_ze:
879 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
880 goto raise_excp;
882 break;
883 case FPSCR_XE:
884 if (fpscr_xx != 0) {
885 raise_xe:
886 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
887 goto raise_excp;
889 break;
890 case FPSCR_RN1:
891 case FPSCR_RN:
892 fpscr_set_rounding_mode();
893 break;
894 default:
895 break;
896 raise_excp:
897 /* Update the floating-point enabled exception summary */
898 env->fpscr |= 1 << FPSCR_FEX;
899 /* We have to update Rc1 before raising the exception */
900 env->exception_index = POWERPC_EXCP_PROGRAM;
901 break;
906 void helper_store_fpscr (uint64_t arg, uint32_t mask)
909 * We use only the 32 LSB of the incoming fpr
911 uint32_t prev, new;
912 int i;
914 prev = env->fpscr;
915 new = (uint32_t)arg;
916 new &= ~0x60000000;
917 new |= prev & 0x60000000;
918 for (i = 0; i < 8; i++) {
919 if (mask & (1 << i)) {
920 env->fpscr &= ~(0xF << (4 * i));
921 env->fpscr |= new & (0xF << (4 * i));
924 /* Update VX and FEX */
925 if (fpscr_ix != 0)
926 env->fpscr |= 1 << FPSCR_VX;
927 else
928 env->fpscr &= ~(1 << FPSCR_VX);
929 if ((fpscr_ex & fpscr_eex) != 0) {
930 env->fpscr |= 1 << FPSCR_FEX;
931 env->exception_index = POWERPC_EXCP_PROGRAM;
932 /* XXX: we should compute it properly */
933 env->error_code = POWERPC_EXCP_FP;
935 else
936 env->fpscr &= ~(1 << FPSCR_FEX);
937 fpscr_set_rounding_mode();
940 void helper_float_check_status (void)
942 #ifdef CONFIG_SOFTFLOAT
943 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
944 (env->error_code & POWERPC_EXCP_FP)) {
945 /* Differred floating-point exception after target FPR update */
946 if (msr_fe0 != 0 || msr_fe1 != 0)
947 helper_raise_exception_err(env->exception_index, env->error_code);
948 } else {
949 int status = get_float_exception_flags(&env->fp_status);
950 if (status & float_flag_divbyzero) {
951 float_zero_divide_excp();
952 } else if (status & float_flag_overflow) {
953 float_overflow_excp();
954 } else if (status & float_flag_underflow) {
955 float_underflow_excp();
956 } else if (status & float_flag_inexact) {
957 float_inexact_excp();
960 #else
961 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
962 (env->error_code & POWERPC_EXCP_FP)) {
963 /* Differred floating-point exception after target FPR update */
964 if (msr_fe0 != 0 || msr_fe1 != 0)
965 helper_raise_exception_err(env->exception_index, env->error_code);
967 #endif
970 #ifdef CONFIG_SOFTFLOAT
971 void helper_reset_fpstatus (void)
973 set_float_exception_flags(0, &env->fp_status);
975 #endif
977 /* fadd - fadd. */
978 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
980 CPU_DoubleU farg1, farg2;
982 farg1.ll = arg1;
983 farg2.ll = arg2;
985 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
986 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
987 /* Magnitude subtraction of infinities */
988 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
989 } else {
990 if (unlikely(float64_is_signaling_nan(farg1.d) ||
991 float64_is_signaling_nan(farg2.d))) {
992 /* sNaN addition */
993 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
995 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
998 return farg1.ll;
1001 /* fsub - fsub. */
1002 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1004 CPU_DoubleU farg1, farg2;
1006 farg1.ll = arg1;
1007 farg2.ll = arg2;
1009 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1010 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1011 /* Magnitude subtraction of infinities */
1012 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1013 } else {
1014 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1015 float64_is_signaling_nan(farg2.d))) {
1016 /* sNaN subtraction */
1017 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1019 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1022 return farg1.ll;
1025 /* fmul - fmul. */
1026 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1028 CPU_DoubleU farg1, farg2;
1030 farg1.ll = arg1;
1031 farg2.ll = arg2;
1033 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1034 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1035 /* Multiplication of zero by infinity */
1036 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1037 } else {
1038 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1039 float64_is_signaling_nan(farg2.d))) {
1040 /* sNaN multiplication */
1041 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1043 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1046 return farg1.ll;
1049 /* fdiv - fdiv. */
1050 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1052 CPU_DoubleU farg1, farg2;
1054 farg1.ll = arg1;
1055 farg2.ll = arg2;
1057 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1058 /* Division of infinity by infinity */
1059 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1060 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1061 /* Division of zero by zero */
1062 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1063 } else {
1064 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1065 float64_is_signaling_nan(farg2.d))) {
1066 /* sNaN division */
1067 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1069 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1072 return farg1.ll;
1075 /* fabs */
1076 uint64_t helper_fabs (uint64_t arg)
1078 CPU_DoubleU farg;
1080 farg.ll = arg;
1081 farg.d = float64_abs(farg.d);
1082 return farg.ll;
1085 /* fnabs */
1086 uint64_t helper_fnabs (uint64_t arg)
1088 CPU_DoubleU farg;
1090 farg.ll = arg;
1091 farg.d = float64_abs(farg.d);
1092 farg.d = float64_chs(farg.d);
1093 return farg.ll;
1096 /* fneg */
1097 uint64_t helper_fneg (uint64_t arg)
1099 CPU_DoubleU farg;
1101 farg.ll = arg;
1102 farg.d = float64_chs(farg.d);
1103 return farg.ll;
1106 /* fctiw - fctiw. */
1107 uint64_t helper_fctiw (uint64_t arg)
1109 CPU_DoubleU farg;
1110 farg.ll = arg;
1112 if (unlikely(float64_is_signaling_nan(farg.d))) {
1113 /* sNaN conversion */
1114 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1115 } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1116 /* qNan / infinity conversion */
1117 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1118 } else {
1119 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1120 /* XXX: higher bits are not supposed to be significant.
1121 * to make tests easier, return the same as a real PowerPC 750
1123 farg.ll |= 0xFFF80000ULL << 32;
1125 return farg.ll;
1128 /* fctiwz - fctiwz. */
1129 uint64_t helper_fctiwz (uint64_t arg)
1131 CPU_DoubleU farg;
1132 farg.ll = arg;
1134 if (unlikely(float64_is_signaling_nan(farg.d))) {
1135 /* sNaN conversion */
1136 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1137 } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1138 /* qNan / infinity conversion */
1139 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1140 } else {
1141 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1142 /* XXX: higher bits are not supposed to be significant.
1143 * to make tests easier, return the same as a real PowerPC 750
1145 farg.ll |= 0xFFF80000ULL << 32;
1147 return farg.ll;
1150 #if defined(TARGET_PPC64)
1151 /* fcfid - fcfid. */
1152 uint64_t helper_fcfid (uint64_t arg)
1154 CPU_DoubleU farg;
1155 farg.d = int64_to_float64(arg, &env->fp_status);
1156 return farg.ll;
1159 /* fctid - fctid. */
1160 uint64_t helper_fctid (uint64_t arg)
1162 CPU_DoubleU farg;
1163 farg.ll = arg;
1165 if (unlikely(float64_is_signaling_nan(farg.d))) {
1166 /* sNaN conversion */
1167 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1168 } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1169 /* qNan / infinity conversion */
1170 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1171 } else {
1172 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1174 return farg.ll;
1177 /* fctidz - fctidz. */
1178 uint64_t helper_fctidz (uint64_t arg)
1180 CPU_DoubleU farg;
1181 farg.ll = arg;
1183 if (unlikely(float64_is_signaling_nan(farg.d))) {
1184 /* sNaN conversion */
1185 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1186 } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1187 /* qNan / infinity conversion */
1188 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1189 } else {
1190 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1192 return farg.ll;
1195 #endif
1197 static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1199 CPU_DoubleU farg;
1200 farg.ll = arg;
1202 if (unlikely(float64_is_signaling_nan(farg.d))) {
1203 /* sNaN round */
1204 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1205 } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) {
1206 /* qNan / infinity round */
1207 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1208 } else {
1209 set_float_rounding_mode(rounding_mode, &env->fp_status);
1210 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1211 /* Restore rounding mode from FPSCR */
1212 fpscr_set_rounding_mode();
1214 return farg.ll;
1217 uint64_t helper_frin (uint64_t arg)
1219 return do_fri(arg, float_round_nearest_even);
1222 uint64_t helper_friz (uint64_t arg)
1224 return do_fri(arg, float_round_to_zero);
1227 uint64_t helper_frip (uint64_t arg)
1229 return do_fri(arg, float_round_up);
1232 uint64_t helper_frim (uint64_t arg)
1234 return do_fri(arg, float_round_down);
1237 /* fmadd - fmadd. */
1238 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1240 CPU_DoubleU farg1, farg2, farg3;
1242 farg1.ll = arg1;
1243 farg2.ll = arg2;
1244 farg3.ll = arg3;
1246 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1247 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1248 /* Multiplication of zero by infinity */
1249 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1250 } else {
1251 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1252 float64_is_signaling_nan(farg2.d) ||
1253 float64_is_signaling_nan(farg3.d))) {
1254 /* sNaN operation */
1255 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1257 #ifdef FLOAT128
1258 /* This is the way the PowerPC specification defines it */
1259 float128 ft0_128, ft1_128;
1261 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1262 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1263 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1264 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1265 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1266 /* Magnitude subtraction of infinities */
1267 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1268 } else {
1269 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1270 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1271 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1273 #else
1274 /* This is OK on x86 hosts */
1275 farg1.d = (farg1.d * farg2.d) + farg3.d;
1276 #endif
1279 return farg1.ll;
1282 /* fmsub - fmsub. */
1283 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1285 CPU_DoubleU farg1, farg2, farg3;
1287 farg1.ll = arg1;
1288 farg2.ll = arg2;
1289 farg3.ll = arg3;
1291 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1292 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1293 /* Multiplication of zero by infinity */
1294 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1295 } else {
1296 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1297 float64_is_signaling_nan(farg2.d) ||
1298 float64_is_signaling_nan(farg3.d))) {
1299 /* sNaN operation */
1300 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1302 #ifdef FLOAT128
1303 /* This is the way the PowerPC specification defines it */
1304 float128 ft0_128, ft1_128;
1306 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1307 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1308 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1309 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1310 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1311 /* Magnitude subtraction of infinities */
1312 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1313 } else {
1314 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1315 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1316 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1318 #else
1319 /* This is OK on x86 hosts */
1320 farg1.d = (farg1.d * farg2.d) - farg3.d;
1321 #endif
1323 return farg1.ll;
1326 /* fnmadd - fnmadd. */
1327 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1329 CPU_DoubleU farg1, farg2, farg3;
1331 farg1.ll = arg1;
1332 farg2.ll = arg2;
1333 farg3.ll = arg3;
1335 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1336 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1337 /* Multiplication of zero by infinity */
1338 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1339 } else {
1340 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1341 float64_is_signaling_nan(farg2.d) ||
1342 float64_is_signaling_nan(farg3.d))) {
1343 /* sNaN operation */
1344 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1346 #ifdef FLOAT128
1347 /* This is the way the PowerPC specification defines it */
1348 float128 ft0_128, ft1_128;
1350 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1351 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1352 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1353 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1354 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1355 /* Magnitude subtraction of infinities */
1356 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1357 } else {
1358 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1359 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1360 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1362 #else
1363 /* This is OK on x86 hosts */
1364 farg1.d = (farg1.d * farg2.d) + farg3.d;
1365 #endif
1366 if (likely(!float64_is_any_nan(farg1.d))) {
1367 farg1.d = float64_chs(farg1.d);
1370 return farg1.ll;
1373 /* fnmsub - fnmsub. */
1374 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1376 CPU_DoubleU farg1, farg2, farg3;
1378 farg1.ll = arg1;
1379 farg2.ll = arg2;
1380 farg3.ll = arg3;
1382 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1383 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1384 /* Multiplication of zero by infinity */
1385 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1386 } else {
1387 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1388 float64_is_signaling_nan(farg2.d) ||
1389 float64_is_signaling_nan(farg3.d))) {
1390 /* sNaN operation */
1391 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1393 #ifdef FLOAT128
1394 /* This is the way the PowerPC specification defines it */
1395 float128 ft0_128, ft1_128;
1397 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1398 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1399 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1400 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1401 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1402 /* Magnitude subtraction of infinities */
1403 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1404 } else {
1405 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1406 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1407 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1409 #else
1410 /* This is OK on x86 hosts */
1411 farg1.d = (farg1.d * farg2.d) - farg3.d;
1412 #endif
1413 if (likely(!float64_is_any_nan(farg1.d))) {
1414 farg1.d = float64_chs(farg1.d);
1417 return farg1.ll;
1420 /* frsp - frsp. */
1421 uint64_t helper_frsp (uint64_t arg)
1423 CPU_DoubleU farg;
1424 float32 f32;
1425 farg.ll = arg;
1427 if (unlikely(float64_is_signaling_nan(farg.d))) {
1428 /* sNaN square root */
1429 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1431 f32 = float64_to_float32(farg.d, &env->fp_status);
1432 farg.d = float32_to_float64(f32, &env->fp_status);
1434 return farg.ll;
1437 /* fsqrt - fsqrt. */
1438 uint64_t helper_fsqrt (uint64_t arg)
1440 CPU_DoubleU farg;
1441 farg.ll = arg;
1443 if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1444 /* Square root of a negative nonzero number */
1445 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1446 } else {
1447 if (unlikely(float64_is_signaling_nan(farg.d))) {
1448 /* sNaN square root */
1449 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1451 farg.d = float64_sqrt(farg.d, &env->fp_status);
1453 return farg.ll;
1456 /* fre - fre. */
1457 uint64_t helper_fre (uint64_t arg)
1459 CPU_DoubleU farg;
1460 farg.ll = arg;
1462 if (unlikely(float64_is_signaling_nan(farg.d))) {
1463 /* sNaN reciprocal */
1464 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1466 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1467 return farg.d;
1470 /* fres - fres. */
1471 uint64_t helper_fres (uint64_t arg)
1473 CPU_DoubleU farg;
1474 float32 f32;
1475 farg.ll = arg;
1477 if (unlikely(float64_is_signaling_nan(farg.d))) {
1478 /* sNaN reciprocal */
1479 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1481 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1482 f32 = float64_to_float32(farg.d, &env->fp_status);
1483 farg.d = float32_to_float64(f32, &env->fp_status);
1485 return farg.ll;
1488 /* frsqrte - frsqrte. */
1489 uint64_t helper_frsqrte (uint64_t arg)
1491 CPU_DoubleU farg;
1492 float32 f32;
1493 farg.ll = arg;
1495 if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1496 /* Reciprocal square root of a negative nonzero number */
1497 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1498 } else {
1499 if (unlikely(float64_is_signaling_nan(farg.d))) {
1500 /* sNaN reciprocal square root */
1501 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1503 farg.d = float64_sqrt(farg.d, &env->fp_status);
1504 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1505 f32 = float64_to_float32(farg.d, &env->fp_status);
1506 farg.d = float32_to_float64(f32, &env->fp_status);
1508 return farg.ll;
1511 /* fsel - fsel. */
1512 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1514 CPU_DoubleU farg1;
1516 farg1.ll = arg1;
1518 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_any_nan(farg1.d)) {
1519 return arg2;
1520 } else {
1521 return arg3;
1525 void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1527 CPU_DoubleU farg1, farg2;
1528 uint32_t ret = 0;
1529 farg1.ll = arg1;
1530 farg2.ll = arg2;
1532 if (unlikely(float64_is_any_nan(farg1.d) ||
1533 float64_is_any_nan(farg2.d))) {
1534 ret = 0x01UL;
1535 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1536 ret = 0x08UL;
1537 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1538 ret = 0x04UL;
1539 } else {
1540 ret = 0x02UL;
1543 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1544 env->fpscr |= ret << FPSCR_FPRF;
1545 env->crf[crfD] = ret;
1546 if (unlikely(ret == 0x01UL
1547 && (float64_is_signaling_nan(farg1.d) ||
1548 float64_is_signaling_nan(farg2.d)))) {
1549 /* sNaN comparison */
1550 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1554 void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1556 CPU_DoubleU farg1, farg2;
1557 uint32_t ret = 0;
1558 farg1.ll = arg1;
1559 farg2.ll = arg2;
1561 if (unlikely(float64_is_any_nan(farg1.d) ||
1562 float64_is_any_nan(farg2.d))) {
1563 ret = 0x01UL;
1564 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1565 ret = 0x08UL;
1566 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1567 ret = 0x04UL;
1568 } else {
1569 ret = 0x02UL;
1572 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1573 env->fpscr |= ret << FPSCR_FPRF;
1574 env->crf[crfD] = ret;
1575 if (unlikely (ret == 0x01UL)) {
1576 if (float64_is_signaling_nan(farg1.d) ||
1577 float64_is_signaling_nan(farg2.d)) {
1578 /* sNaN comparison */
1579 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1580 POWERPC_EXCP_FP_VXVC);
1581 } else {
1582 /* qNaN comparison */
1583 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1588 #if !defined (CONFIG_USER_ONLY)
1589 void helper_store_msr (target_ulong val)
1591 val = hreg_store_msr(env, val, 0);
1592 if (val != 0) {
1593 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1594 helper_raise_exception(val);
1598 static inline void do_rfi(target_ulong nip, target_ulong msr,
1599 target_ulong msrm, int keep_msrh)
1601 #if defined(TARGET_PPC64)
1602 if (msr & (1ULL << MSR_SF)) {
1603 nip = (uint64_t)nip;
1604 msr &= (uint64_t)msrm;
1605 } else {
1606 nip = (uint32_t)nip;
1607 msr = (uint32_t)(msr & msrm);
1608 if (keep_msrh)
1609 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1611 #else
1612 nip = (uint32_t)nip;
1613 msr &= (uint32_t)msrm;
1614 #endif
1615 /* XXX: beware: this is false if VLE is supported */
1616 env->nip = nip & ~((target_ulong)0x00000003);
1617 hreg_store_msr(env, msr, 1);
1618 #if defined (DEBUG_OP)
1619 cpu_dump_rfi(env->nip, env->msr);
1620 #endif
1621 /* No need to raise an exception here,
1622 * as rfi is always the last insn of a TB
1624 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1627 void helper_rfi (void)
1629 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1630 ~((target_ulong)0x783F0000), 1);
1633 #if defined(TARGET_PPC64)
1634 void helper_rfid (void)
1636 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1637 ~((target_ulong)0x783F0000), 0);
1640 void helper_hrfid (void)
1642 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1643 ~((target_ulong)0x783F0000), 0);
1645 #endif
1646 #endif
1648 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1650 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1651 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1652 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1653 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1654 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1655 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1659 #if defined(TARGET_PPC64)
1660 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1662 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1663 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1664 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1665 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1666 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1667 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1669 #endif
1671 /*****************************************************************************/
1672 /* PowerPC 601 specific instructions (POWER bridge) */
1674 target_ulong helper_clcs (uint32_t arg)
1676 switch (arg) {
1677 case 0x0CUL:
1678 /* Instruction cache line size */
1679 return env->icache_line_size;
1680 break;
1681 case 0x0DUL:
1682 /* Data cache line size */
1683 return env->dcache_line_size;
1684 break;
1685 case 0x0EUL:
1686 /* Minimum cache line size */
1687 return (env->icache_line_size < env->dcache_line_size) ?
1688 env->icache_line_size : env->dcache_line_size;
1689 break;
1690 case 0x0FUL:
1691 /* Maximum cache line size */
1692 return (env->icache_line_size > env->dcache_line_size) ?
1693 env->icache_line_size : env->dcache_line_size;
1694 break;
1695 default:
1696 /* Undefined */
1697 return 0;
1698 break;
1702 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1704 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1706 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1707 (int32_t)arg2 == 0) {
1708 env->spr[SPR_MQ] = 0;
1709 return INT32_MIN;
1710 } else {
1711 env->spr[SPR_MQ] = tmp % arg2;
1712 return tmp / (int32_t)arg2;
1716 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1718 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1720 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1721 (int32_t)arg2 == 0) {
1722 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1723 env->spr[SPR_MQ] = 0;
1724 return INT32_MIN;
1725 } else {
1726 env->spr[SPR_MQ] = tmp % arg2;
1727 tmp /= (int32_t)arg2;
1728 if ((int32_t)tmp != tmp) {
1729 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1730 } else {
1731 env->xer &= ~(1 << XER_OV);
1733 return tmp;
1737 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1739 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1740 (int32_t)arg2 == 0) {
1741 env->spr[SPR_MQ] = 0;
1742 return INT32_MIN;
1743 } else {
1744 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1745 return (int32_t)arg1 / (int32_t)arg2;
1749 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1751 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1752 (int32_t)arg2 == 0) {
1753 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1754 env->spr[SPR_MQ] = 0;
1755 return INT32_MIN;
1756 } else {
1757 env->xer &= ~(1 << XER_OV);
1758 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1759 return (int32_t)arg1 / (int32_t)arg2;
1763 #if !defined (CONFIG_USER_ONLY)
1764 target_ulong helper_rac (target_ulong addr)
1766 mmu_ctx_t ctx;
1767 int nb_BATs;
1768 target_ulong ret = 0;
1770 /* We don't have to generate many instances of this instruction,
1771 * as rac is supervisor only.
1773 /* XXX: FIX THIS: Pretend we have no BAT */
1774 nb_BATs = env->nb_BATs;
1775 env->nb_BATs = 0;
1776 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1777 ret = ctx.raddr;
1778 env->nb_BATs = nb_BATs;
1779 return ret;
1782 void helper_rfsvc (void)
1784 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1786 #endif
1788 /*****************************************************************************/
1789 /* 602 specific instructions */
1790 /* mfrom is the most crazy instruction ever seen, imho ! */
1791 /* Real implementation uses a ROM table. Do the same */
1792 /* Extremly decomposed:
1793 * -arg / 256
1794 * return 256 * log10(10 + 1.0) + 0.5
1796 #if !defined (CONFIG_USER_ONLY)
1797 target_ulong helper_602_mfrom (target_ulong arg)
1799 if (likely(arg < 602)) {
1800 #include "mfrom_table.c"
1801 return mfrom_ROM_table[arg];
1802 } else {
1803 return 0;
1806 #endif
1808 /*****************************************************************************/
1809 /* Embedded PowerPC specific helpers */
1811 /* XXX: to be improved to check access rights when in user-mode */
1812 target_ulong helper_load_dcr (target_ulong dcrn)
1814 uint32_t val = 0;
1816 if (unlikely(env->dcr_env == NULL)) {
1817 qemu_log("No DCR environment\n");
1818 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1819 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1820 } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1821 qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1822 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1823 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1825 return val;
1828 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1830 if (unlikely(env->dcr_env == NULL)) {
1831 qemu_log("No DCR environment\n");
1832 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1833 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1834 } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1835 qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1836 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1837 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1841 #if !defined(CONFIG_USER_ONLY)
1842 void helper_40x_rfci (void)
1844 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1845 ~((target_ulong)0xFFFF0000), 0);
1848 void helper_rfci (void)
1850 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1851 ~((target_ulong)0x3FFF0000), 0);
1854 void helper_rfdi (void)
1856 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1857 ~((target_ulong)0x3FFF0000), 0);
1860 void helper_rfmci (void)
1862 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1863 ~((target_ulong)0x3FFF0000), 0);
1865 #endif
1867 /* 440 specific */
1868 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1870 target_ulong mask;
1871 int i;
1873 i = 1;
1874 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1875 if ((high & mask) == 0) {
1876 if (update_Rc) {
1877 env->crf[0] = 0x4;
1879 goto done;
1881 i++;
1883 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1884 if ((low & mask) == 0) {
1885 if (update_Rc) {
1886 env->crf[0] = 0x8;
1888 goto done;
1890 i++;
1892 if (update_Rc) {
1893 env->crf[0] = 0x2;
1895 done:
1896 env->xer = (env->xer & ~0x7F) | i;
1897 if (update_Rc) {
1898 env->crf[0] |= xer_so;
1900 return i;
1903 /*****************************************************************************/
1904 /* Altivec extension helpers */
1905 #if defined(HOST_WORDS_BIGENDIAN)
1906 #define HI_IDX 0
1907 #define LO_IDX 1
1908 #else
1909 #define HI_IDX 1
1910 #define LO_IDX 0
1911 #endif
1913 #if defined(HOST_WORDS_BIGENDIAN)
1914 #define VECTOR_FOR_INORDER_I(index, element) \
1915 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1916 #else
1917 #define VECTOR_FOR_INORDER_I(index, element) \
1918 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1919 #endif
1921 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1922 * execute the following block. */
1923 #define DO_HANDLE_NAN(result, x) \
1924 if (float32_is_any_nan(x)) { \
1925 CPU_FloatU __f; \
1926 __f.f = x; \
1927 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1928 result = __f.f; \
1929 } else
1931 #define HANDLE_NAN1(result, x) \
1932 DO_HANDLE_NAN(result, x)
1933 #define HANDLE_NAN2(result, x, y) \
1934 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1935 #define HANDLE_NAN3(result, x, y, z) \
1936 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1938 /* Saturating arithmetic helpers. */
1939 #define SATCVT(from, to, from_type, to_type, min, max) \
1940 static inline to_type cvt##from##to(from_type x, int *sat) \
1942 to_type r; \
1943 if (x < (from_type)min) { \
1944 r = min; \
1945 *sat = 1; \
1946 } else if (x > (from_type)max) { \
1947 r = max; \
1948 *sat = 1; \
1949 } else { \
1950 r = x; \
1952 return r; \
1954 #define SATCVTU(from, to, from_type, to_type, min, max) \
1955 static inline to_type cvt##from##to(from_type x, int *sat) \
1957 to_type r; \
1958 if (x > (from_type)max) { \
1959 r = max; \
1960 *sat = 1; \
1961 } else { \
1962 r = x; \
1964 return r; \
1966 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1967 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1968 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1970 SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1971 SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1972 SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1973 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1974 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1975 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1976 #undef SATCVT
1977 #undef SATCVTU
1979 #define LVE(name, access, swap, element) \
1980 void helper_##name (ppc_avr_t *r, target_ulong addr) \
1982 size_t n_elems = ARRAY_SIZE(r->element); \
1983 int adjust = HI_IDX*(n_elems-1); \
1984 int sh = sizeof(r->element[0]) >> 1; \
1985 int index = (addr & 0xf) >> sh; \
1986 if(msr_le) { \
1987 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
1988 } else { \
1989 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
1992 #define I(x) (x)
1993 LVE(lvebx, ldub, I, u8)
1994 LVE(lvehx, lduw, bswap16, u16)
1995 LVE(lvewx, ldl, bswap32, u32)
1996 #undef I
1997 #undef LVE
1999 void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2001 int i, j = (sh & 0xf);
2003 VECTOR_FOR_INORDER_I (i, u8) {
2004 r->u8[i] = j++;
2008 void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2010 int i, j = 0x10 - (sh & 0xf);
2012 VECTOR_FOR_INORDER_I (i, u8) {
2013 r->u8[i] = j++;
2017 #define STVE(name, access, swap, element) \
2018 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2020 size_t n_elems = ARRAY_SIZE(r->element); \
2021 int adjust = HI_IDX*(n_elems-1); \
2022 int sh = sizeof(r->element[0]) >> 1; \
2023 int index = (addr & 0xf) >> sh; \
2024 if(msr_le) { \
2025 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2026 } else { \
2027 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2030 #define I(x) (x)
2031 STVE(stvebx, stb, I, u8)
2032 STVE(stvehx, stw, bswap16, u16)
2033 STVE(stvewx, stl, bswap32, u32)
2034 #undef I
2035 #undef LVE
2037 void helper_mtvscr (ppc_avr_t *r)
2039 #if defined(HOST_WORDS_BIGENDIAN)
2040 env->vscr = r->u32[3];
2041 #else
2042 env->vscr = r->u32[0];
2043 #endif
2044 set_flush_to_zero(vscr_nj, &env->vec_status);
2047 void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2049 int i;
2050 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2051 r->u32[i] = ~a->u32[i] < b->u32[i];
2055 #define VARITH_DO(name, op, element) \
2056 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2058 int i; \
2059 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2060 r->element[i] = a->element[i] op b->element[i]; \
2063 #define VARITH(suffix, element) \
2064 VARITH_DO(add##suffix, +, element) \
2065 VARITH_DO(sub##suffix, -, element)
2066 VARITH(ubm, u8)
2067 VARITH(uhm, u16)
2068 VARITH(uwm, u32)
2069 #undef VARITH_DO
2070 #undef VARITH
2072 #define VARITHFP(suffix, func) \
2073 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2075 int i; \
2076 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2077 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2078 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2082 VARITHFP(addfp, float32_add)
2083 VARITHFP(subfp, float32_sub)
2084 #undef VARITHFP
2086 #define VARITHSAT_CASE(type, op, cvt, element) \
2088 type result = (type)a->element[i] op (type)b->element[i]; \
2089 r->element[i] = cvt(result, &sat); \
2092 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2093 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2095 int sat = 0; \
2096 int i; \
2097 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2098 switch (sizeof(r->element[0])) { \
2099 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2100 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2101 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2104 if (sat) { \
2105 env->vscr |= (1 << VSCR_SAT); \
2108 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2109 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2110 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2111 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2112 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2113 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2114 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2115 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2116 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2117 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2118 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2119 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2120 #undef VARITHSAT_CASE
2121 #undef VARITHSAT_DO
2122 #undef VARITHSAT_SIGNED
2123 #undef VARITHSAT_UNSIGNED
2125 #define VAVG_DO(name, element, etype) \
2126 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2128 int i; \
2129 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2130 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2131 r->element[i] = x >> 1; \
2135 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2136 VAVG_DO(avgs##type, signed_element, signed_type) \
2137 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2138 VAVG(b, s8, int16_t, u8, uint16_t)
2139 VAVG(h, s16, int32_t, u16, uint32_t)
2140 VAVG(w, s32, int64_t, u32, uint64_t)
2141 #undef VAVG_DO
2142 #undef VAVG
2144 #define VCF(suffix, cvt, element) \
2145 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2147 int i; \
2148 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2149 float32 t = cvt(b->element[i], &env->vec_status); \
2150 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2153 VCF(ux, uint32_to_float32, u32)
2154 VCF(sx, int32_to_float32, s32)
2155 #undef VCF
2157 #define VCMP_DO(suffix, compare, element, record) \
2158 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2160 uint32_t ones = (uint32_t)-1; \
2161 uint32_t all = ones; \
2162 uint32_t none = 0; \
2163 int i; \
2164 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2165 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2166 switch (sizeof (a->element[0])) { \
2167 case 4: r->u32[i] = result; break; \
2168 case 2: r->u16[i] = result; break; \
2169 case 1: r->u8[i] = result; break; \
2171 all &= result; \
2172 none |= result; \
2174 if (record) { \
2175 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2178 #define VCMP(suffix, compare, element) \
2179 VCMP_DO(suffix, compare, element, 0) \
2180 VCMP_DO(suffix##_dot, compare, element, 1)
2181 VCMP(equb, ==, u8)
2182 VCMP(equh, ==, u16)
2183 VCMP(equw, ==, u32)
2184 VCMP(gtub, >, u8)
2185 VCMP(gtuh, >, u16)
2186 VCMP(gtuw, >, u32)
2187 VCMP(gtsb, >, s8)
2188 VCMP(gtsh, >, s16)
2189 VCMP(gtsw, >, s32)
2190 #undef VCMP_DO
2191 #undef VCMP
2193 #define VCMPFP_DO(suffix, compare, order, record) \
2194 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2196 uint32_t ones = (uint32_t)-1; \
2197 uint32_t all = ones; \
2198 uint32_t none = 0; \
2199 int i; \
2200 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2201 uint32_t result; \
2202 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2203 if (rel == float_relation_unordered) { \
2204 result = 0; \
2205 } else if (rel compare order) { \
2206 result = ones; \
2207 } else { \
2208 result = 0; \
2210 r->u32[i] = result; \
2211 all &= result; \
2212 none |= result; \
2214 if (record) { \
2215 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2218 #define VCMPFP(suffix, compare, order) \
2219 VCMPFP_DO(suffix, compare, order, 0) \
2220 VCMPFP_DO(suffix##_dot, compare, order, 1)
2221 VCMPFP(eqfp, ==, float_relation_equal)
2222 VCMPFP(gefp, !=, float_relation_less)
2223 VCMPFP(gtfp, ==, float_relation_greater)
2224 #undef VCMPFP_DO
2225 #undef VCMPFP
2227 static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2228 int record)
2230 int i;
2231 int all_in = 0;
2232 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2233 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2234 if (le_rel == float_relation_unordered) {
2235 r->u32[i] = 0xc0000000;
2236 /* ALL_IN does not need to be updated here. */
2237 } else {
2238 float32 bneg = float32_chs(b->f[i]);
2239 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2240 int le = le_rel != float_relation_greater;
2241 int ge = ge_rel != float_relation_less;
2242 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2243 all_in |= (!le | !ge);
2246 if (record) {
2247 env->crf[6] = (all_in == 0) << 1;
2251 void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2253 vcmpbfp_internal(r, a, b, 0);
2256 void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2258 vcmpbfp_internal(r, a, b, 1);
2261 #define VCT(suffix, satcvt, element) \
2262 void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2264 int i; \
2265 int sat = 0; \
2266 float_status s = env->vec_status; \
2267 set_float_rounding_mode(float_round_to_zero, &s); \
2268 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2269 if (float32_is_any_nan(b->f[i])) { \
2270 r->element[i] = 0; \
2271 } else { \
2272 float64 t = float32_to_float64(b->f[i], &s); \
2273 int64_t j; \
2274 t = float64_scalbn(t, uim, &s); \
2275 j = float64_to_int64(t, &s); \
2276 r->element[i] = satcvt(j, &sat); \
2279 if (sat) { \
2280 env->vscr |= (1 << VSCR_SAT); \
2283 VCT(uxs, cvtsduw, u32)
2284 VCT(sxs, cvtsdsw, s32)
2285 #undef VCT
2287 void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2289 int i;
2290 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2291 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2292 /* Need to do the computation in higher precision and round
2293 * once at the end. */
2294 float64 af, bf, cf, t;
2295 af = float32_to_float64(a->f[i], &env->vec_status);
2296 bf = float32_to_float64(b->f[i], &env->vec_status);
2297 cf = float32_to_float64(c->f[i], &env->vec_status);
2298 t = float64_mul(af, cf, &env->vec_status);
2299 t = float64_add(t, bf, &env->vec_status);
2300 r->f[i] = float64_to_float32(t, &env->vec_status);
2305 void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2307 int sat = 0;
2308 int i;
2310 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2311 int32_t prod = a->s16[i] * b->s16[i];
2312 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2313 r->s16[i] = cvtswsh (t, &sat);
2316 if (sat) {
2317 env->vscr |= (1 << VSCR_SAT);
2321 void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2323 int sat = 0;
2324 int i;
2326 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2327 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2328 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2329 r->s16[i] = cvtswsh (t, &sat);
2332 if (sat) {
2333 env->vscr |= (1 << VSCR_SAT);
2337 #define VMINMAX_DO(name, compare, element) \
2338 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2340 int i; \
2341 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2342 if (a->element[i] compare b->element[i]) { \
2343 r->element[i] = b->element[i]; \
2344 } else { \
2345 r->element[i] = a->element[i]; \
2349 #define VMINMAX(suffix, element) \
2350 VMINMAX_DO(min##suffix, >, element) \
2351 VMINMAX_DO(max##suffix, <, element)
2352 VMINMAX(sb, s8)
2353 VMINMAX(sh, s16)
2354 VMINMAX(sw, s32)
2355 VMINMAX(ub, u8)
2356 VMINMAX(uh, u16)
2357 VMINMAX(uw, u32)
2358 #undef VMINMAX_DO
2359 #undef VMINMAX
2361 #define VMINMAXFP(suffix, rT, rF) \
2362 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2364 int i; \
2365 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2366 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2367 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2368 r->f[i] = rT->f[i]; \
2369 } else { \
2370 r->f[i] = rF->f[i]; \
2375 VMINMAXFP(minfp, a, b)
2376 VMINMAXFP(maxfp, b, a)
2377 #undef VMINMAXFP
2379 void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2381 int i;
2382 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2383 int32_t prod = a->s16[i] * b->s16[i];
2384 r->s16[i] = (int16_t) (prod + c->s16[i]);
2388 #define VMRG_DO(name, element, highp) \
2389 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2391 ppc_avr_t result; \
2392 int i; \
2393 size_t n_elems = ARRAY_SIZE(r->element); \
2394 for (i = 0; i < n_elems/2; i++) { \
2395 if (highp) { \
2396 result.element[i*2+HI_IDX] = a->element[i]; \
2397 result.element[i*2+LO_IDX] = b->element[i]; \
2398 } else { \
2399 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2400 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2403 *r = result; \
2405 #if defined(HOST_WORDS_BIGENDIAN)
2406 #define MRGHI 0
2407 #define MRGLO 1
2408 #else
2409 #define MRGHI 1
2410 #define MRGLO 0
2411 #endif
2412 #define VMRG(suffix, element) \
2413 VMRG_DO(mrgl##suffix, element, MRGHI) \
2414 VMRG_DO(mrgh##suffix, element, MRGLO)
2415 VMRG(b, u8)
2416 VMRG(h, u16)
2417 VMRG(w, u32)
2418 #undef VMRG_DO
2419 #undef VMRG
2420 #undef MRGHI
2421 #undef MRGLO
2423 void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2425 int32_t prod[16];
2426 int i;
2428 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2429 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2432 VECTOR_FOR_INORDER_I(i, s32) {
2433 r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2437 void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2439 int32_t prod[8];
2440 int i;
2442 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2443 prod[i] = a->s16[i] * b->s16[i];
2446 VECTOR_FOR_INORDER_I(i, s32) {
2447 r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2451 void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2453 int32_t prod[8];
2454 int i;
2455 int sat = 0;
2457 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2458 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2461 VECTOR_FOR_INORDER_I (i, s32) {
2462 int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2463 r->u32[i] = cvtsdsw(t, &sat);
2466 if (sat) {
2467 env->vscr |= (1 << VSCR_SAT);
2471 void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2473 uint16_t prod[16];
2474 int i;
2476 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2477 prod[i] = a->u8[i] * b->u8[i];
2480 VECTOR_FOR_INORDER_I(i, u32) {
2481 r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2485 void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2487 uint32_t prod[8];
2488 int i;
2490 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2491 prod[i] = a->u16[i] * b->u16[i];
2494 VECTOR_FOR_INORDER_I(i, u32) {
2495 r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2499 void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2501 uint32_t prod[8];
2502 int i;
2503 int sat = 0;
2505 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2506 prod[i] = a->u16[i] * b->u16[i];
2509 VECTOR_FOR_INORDER_I (i, s32) {
2510 uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2511 r->u32[i] = cvtuduw(t, &sat);
2514 if (sat) {
2515 env->vscr |= (1 << VSCR_SAT);
2519 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2520 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2522 int i; \
2523 VECTOR_FOR_INORDER_I(i, prod_element) { \
2524 if (evenp) { \
2525 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2526 } else { \
2527 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2531 #define VMUL(suffix, mul_element, prod_element) \
2532 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2533 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2534 VMUL(sb, s8, s16)
2535 VMUL(sh, s16, s32)
2536 VMUL(ub, u8, u16)
2537 VMUL(uh, u16, u32)
2538 #undef VMUL_DO
2539 #undef VMUL
2541 void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2543 int i;
2544 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2545 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2546 /* Need to do the computation is higher precision and round
2547 * once at the end. */
2548 float64 af, bf, cf, t;
2549 af = float32_to_float64(a->f[i], &env->vec_status);
2550 bf = float32_to_float64(b->f[i], &env->vec_status);
2551 cf = float32_to_float64(c->f[i], &env->vec_status);
2552 t = float64_mul(af, cf, &env->vec_status);
2553 t = float64_sub(t, bf, &env->vec_status);
2554 t = float64_chs(t);
2555 r->f[i] = float64_to_float32(t, &env->vec_status);
2560 void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2562 ppc_avr_t result;
2563 int i;
2564 VECTOR_FOR_INORDER_I (i, u8) {
2565 int s = c->u8[i] & 0x1f;
2566 #if defined(HOST_WORDS_BIGENDIAN)
2567 int index = s & 0xf;
2568 #else
2569 int index = 15 - (s & 0xf);
2570 #endif
2571 if (s & 0x10) {
2572 result.u8[i] = b->u8[index];
2573 } else {
2574 result.u8[i] = a->u8[index];
2577 *r = result;
2580 #if defined(HOST_WORDS_BIGENDIAN)
2581 #define PKBIG 1
2582 #else
2583 #define PKBIG 0
2584 #endif
2585 void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2587 int i, j;
2588 ppc_avr_t result;
2589 #if defined(HOST_WORDS_BIGENDIAN)
2590 const ppc_avr_t *x[2] = { a, b };
2591 #else
2592 const ppc_avr_t *x[2] = { b, a };
2593 #endif
2595 VECTOR_FOR_INORDER_I (i, u64) {
2596 VECTOR_FOR_INORDER_I (j, u32){
2597 uint32_t e = x[i]->u32[j];
2598 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2599 ((e >> 6) & 0x3e0) |
2600 ((e >> 3) & 0x1f));
2603 *r = result;
2606 #define VPK(suffix, from, to, cvt, dosat) \
2607 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2609 int i; \
2610 int sat = 0; \
2611 ppc_avr_t result; \
2612 ppc_avr_t *a0 = PKBIG ? a : b; \
2613 ppc_avr_t *a1 = PKBIG ? b : a; \
2614 VECTOR_FOR_INORDER_I (i, from) { \
2615 result.to[i] = cvt(a0->from[i], &sat); \
2616 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2618 *r = result; \
2619 if (dosat && sat) { \
2620 env->vscr |= (1 << VSCR_SAT); \
2623 #define I(x, y) (x)
2624 VPK(shss, s16, s8, cvtshsb, 1)
2625 VPK(shus, s16, u8, cvtshub, 1)
2626 VPK(swss, s32, s16, cvtswsh, 1)
2627 VPK(swus, s32, u16, cvtswuh, 1)
2628 VPK(uhus, u16, u8, cvtuhub, 1)
2629 VPK(uwus, u32, u16, cvtuwuh, 1)
2630 VPK(uhum, u16, u8, I, 0)
2631 VPK(uwum, u32, u16, I, 0)
2632 #undef I
2633 #undef VPK
2634 #undef PKBIG
2636 void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2638 int i;
2639 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2640 HANDLE_NAN1(r->f[i], b->f[i]) {
2641 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2646 #define VRFI(suffix, rounding) \
2647 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2649 int i; \
2650 float_status s = env->vec_status; \
2651 set_float_rounding_mode(rounding, &s); \
2652 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2653 HANDLE_NAN1(r->f[i], b->f[i]) { \
2654 r->f[i] = float32_round_to_int (b->f[i], &s); \
2658 VRFI(n, float_round_nearest_even)
2659 VRFI(m, float_round_down)
2660 VRFI(p, float_round_up)
2661 VRFI(z, float_round_to_zero)
2662 #undef VRFI
2664 #define VROTATE(suffix, element) \
2665 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2667 int i; \
2668 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2669 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2670 unsigned int shift = b->element[i] & mask; \
2671 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2674 VROTATE(b, u8)
2675 VROTATE(h, u16)
2676 VROTATE(w, u32)
2677 #undef VROTATE
2679 void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2681 int i;
2682 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2683 HANDLE_NAN1(r->f[i], b->f[i]) {
2684 float32 t = float32_sqrt(b->f[i], &env->vec_status);
2685 r->f[i] = float32_div(float32_one, t, &env->vec_status);
2690 void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2692 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2693 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2696 void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2698 int i;
2699 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2700 HANDLE_NAN1(r->f[i], b->f[i]) {
2701 r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2706 void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2708 int i;
2709 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2710 HANDLE_NAN1(r->f[i], b->f[i]) {
2711 r->f[i] = float32_log2(b->f[i], &env->vec_status);
2716 #if defined(HOST_WORDS_BIGENDIAN)
2717 #define LEFT 0
2718 #define RIGHT 1
2719 #else
2720 #define LEFT 1
2721 #define RIGHT 0
2722 #endif
2723 /* The specification says that the results are undefined if all of the
2724 * shift counts are not identical. We check to make sure that they are
2725 * to conform to what real hardware appears to do. */
2726 #define VSHIFT(suffix, leftp) \
2727 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2729 int shift = b->u8[LO_IDX*15] & 0x7; \
2730 int doit = 1; \
2731 int i; \
2732 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2733 doit = doit && ((b->u8[i] & 0x7) == shift); \
2735 if (doit) { \
2736 if (shift == 0) { \
2737 *r = *a; \
2738 } else if (leftp) { \
2739 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2740 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2741 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2742 } else { \
2743 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2744 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2745 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2749 VSHIFT(l, LEFT)
2750 VSHIFT(r, RIGHT)
2751 #undef VSHIFT
2752 #undef LEFT
2753 #undef RIGHT
2755 #define VSL(suffix, element) \
2756 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2758 int i; \
2759 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2760 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2761 unsigned int shift = b->element[i] & mask; \
2762 r->element[i] = a->element[i] << shift; \
2765 VSL(b, u8)
2766 VSL(h, u16)
2767 VSL(w, u32)
2768 #undef VSL
2770 void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2772 int sh = shift & 0xf;
2773 int i;
2774 ppc_avr_t result;
2776 #if defined(HOST_WORDS_BIGENDIAN)
2777 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2778 int index = sh + i;
2779 if (index > 0xf) {
2780 result.u8[i] = b->u8[index-0x10];
2781 } else {
2782 result.u8[i] = a->u8[index];
2785 #else
2786 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2787 int index = (16 - sh) + i;
2788 if (index > 0xf) {
2789 result.u8[i] = a->u8[index-0x10];
2790 } else {
2791 result.u8[i] = b->u8[index];
2794 #endif
2795 *r = result;
2798 void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2800 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2802 #if defined (HOST_WORDS_BIGENDIAN)
2803 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2804 memset (&r->u8[16-sh], 0, sh);
2805 #else
2806 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2807 memset (&r->u8[0], 0, sh);
2808 #endif
2811 /* Experimental testing shows that hardware masks the immediate. */
2812 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2813 #if defined(HOST_WORDS_BIGENDIAN)
2814 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2815 #else
2816 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2817 #endif
2818 #define VSPLT(suffix, element) \
2819 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2821 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2822 int i; \
2823 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2824 r->element[i] = s; \
2827 VSPLT(b, u8)
2828 VSPLT(h, u16)
2829 VSPLT(w, u32)
2830 #undef VSPLT
2831 #undef SPLAT_ELEMENT
2832 #undef _SPLAT_MASKED
2834 #define VSPLTI(suffix, element, splat_type) \
2835 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2837 splat_type x = (int8_t)(splat << 3) >> 3; \
2838 int i; \
2839 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2840 r->element[i] = x; \
2843 VSPLTI(b, s8, int8_t)
2844 VSPLTI(h, s16, int16_t)
2845 VSPLTI(w, s32, int32_t)
2846 #undef VSPLTI
2848 #define VSR(suffix, element) \
2849 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2851 int i; \
2852 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2853 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2854 unsigned int shift = b->element[i] & mask; \
2855 r->element[i] = a->element[i] >> shift; \
2858 VSR(ab, s8)
2859 VSR(ah, s16)
2860 VSR(aw, s32)
2861 VSR(b, u8)
2862 VSR(h, u16)
2863 VSR(w, u32)
2864 #undef VSR
2866 void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2868 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2870 #if defined (HOST_WORDS_BIGENDIAN)
2871 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2872 memset (&r->u8[0], 0, sh);
2873 #else
2874 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2875 memset (&r->u8[16-sh], 0, sh);
2876 #endif
2879 void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2881 int i;
2882 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2883 r->u32[i] = a->u32[i] >= b->u32[i];
2887 void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2889 int64_t t;
2890 int i, upper;
2891 ppc_avr_t result;
2892 int sat = 0;
2894 #if defined(HOST_WORDS_BIGENDIAN)
2895 upper = ARRAY_SIZE(r->s32)-1;
2896 #else
2897 upper = 0;
2898 #endif
2899 t = (int64_t)b->s32[upper];
2900 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2901 t += a->s32[i];
2902 result.s32[i] = 0;
2904 result.s32[upper] = cvtsdsw(t, &sat);
2905 *r = result;
2907 if (sat) {
2908 env->vscr |= (1 << VSCR_SAT);
2912 void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2914 int i, j, upper;
2915 ppc_avr_t result;
2916 int sat = 0;
2918 #if defined(HOST_WORDS_BIGENDIAN)
2919 upper = 1;
2920 #else
2921 upper = 0;
2922 #endif
2923 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2924 int64_t t = (int64_t)b->s32[upper+i*2];
2925 result.u64[i] = 0;
2926 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2927 t += a->s32[2*i+j];
2929 result.s32[upper+i*2] = cvtsdsw(t, &sat);
2932 *r = result;
2933 if (sat) {
2934 env->vscr |= (1 << VSCR_SAT);
2938 void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2940 int i, j;
2941 int sat = 0;
2943 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2944 int64_t t = (int64_t)b->s32[i];
2945 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2946 t += a->s8[4*i+j];
2948 r->s32[i] = cvtsdsw(t, &sat);
2951 if (sat) {
2952 env->vscr |= (1 << VSCR_SAT);
2956 void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2958 int sat = 0;
2959 int i;
2961 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2962 int64_t t = (int64_t)b->s32[i];
2963 t += a->s16[2*i] + a->s16[2*i+1];
2964 r->s32[i] = cvtsdsw(t, &sat);
2967 if (sat) {
2968 env->vscr |= (1 << VSCR_SAT);
2972 void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2974 int i, j;
2975 int sat = 0;
2977 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2978 uint64_t t = (uint64_t)b->u32[i];
2979 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2980 t += a->u8[4*i+j];
2982 r->u32[i] = cvtuduw(t, &sat);
2985 if (sat) {
2986 env->vscr |= (1 << VSCR_SAT);
2990 #if defined(HOST_WORDS_BIGENDIAN)
2991 #define UPKHI 1
2992 #define UPKLO 0
2993 #else
2994 #define UPKHI 0
2995 #define UPKLO 1
2996 #endif
2997 #define VUPKPX(suffix, hi) \
2998 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3000 int i; \
3001 ppc_avr_t result; \
3002 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3003 uint16_t e = b->u16[hi ? i : i+4]; \
3004 uint8_t a = (e >> 15) ? 0xff : 0; \
3005 uint8_t r = (e >> 10) & 0x1f; \
3006 uint8_t g = (e >> 5) & 0x1f; \
3007 uint8_t b = e & 0x1f; \
3008 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3010 *r = result; \
3012 VUPKPX(lpx, UPKLO)
3013 VUPKPX(hpx, UPKHI)
3014 #undef VUPKPX
3016 #define VUPK(suffix, unpacked, packee, hi) \
3017 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3019 int i; \
3020 ppc_avr_t result; \
3021 if (hi) { \
3022 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3023 result.unpacked[i] = b->packee[i]; \
3025 } else { \
3026 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3027 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3030 *r = result; \
3032 VUPK(hsb, s16, s8, UPKHI)
3033 VUPK(hsh, s32, s16, UPKHI)
3034 VUPK(lsb, s16, s8, UPKLO)
3035 VUPK(lsh, s32, s16, UPKLO)
3036 #undef VUPK
3037 #undef UPKHI
3038 #undef UPKLO
3040 #undef DO_HANDLE_NAN
3041 #undef HANDLE_NAN1
3042 #undef HANDLE_NAN2
3043 #undef HANDLE_NAN3
3044 #undef VECTOR_FOR_INORDER_I
3045 #undef HI_IDX
3046 #undef LO_IDX
3048 /*****************************************************************************/
3049 /* SPE extension helpers */
3050 /* Use a table to make this quicker */
3051 static uint8_t hbrev[16] = {
3052 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3053 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3056 static inline uint8_t byte_reverse(uint8_t val)
3058 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3061 static inline uint32_t word_reverse(uint32_t val)
3063 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3064 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3067 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3068 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3070 uint32_t a, b, d, mask;
3072 mask = UINT32_MAX >> (32 - MASKBITS);
3073 a = arg1 & mask;
3074 b = arg2 & mask;
3075 d = word_reverse(1 + word_reverse(a | ~b));
3076 return (arg1 & ~mask) | (d & b);
3079 uint32_t helper_cntlsw32 (uint32_t val)
3081 if (val & 0x80000000)
3082 return clz32(~val);
3083 else
3084 return clz32(val);
3087 uint32_t helper_cntlzw32 (uint32_t val)
3089 return clz32(val);
3092 /* Single-precision floating-point conversions */
3093 static inline uint32_t efscfsi(uint32_t val)
3095 CPU_FloatU u;
3097 u.f = int32_to_float32(val, &env->vec_status);
3099 return u.l;
3102 static inline uint32_t efscfui(uint32_t val)
3104 CPU_FloatU u;
3106 u.f = uint32_to_float32(val, &env->vec_status);
3108 return u.l;
3111 static inline int32_t efsctsi(uint32_t val)
3113 CPU_FloatU u;
3115 u.l = val;
3116 /* NaN are not treated the same way IEEE 754 does */
3117 if (unlikely(float32_is_quiet_nan(u.f)))
3118 return 0;
3120 return float32_to_int32(u.f, &env->vec_status);
3123 static inline uint32_t efsctui(uint32_t val)
3125 CPU_FloatU u;
3127 u.l = val;
3128 /* NaN are not treated the same way IEEE 754 does */
3129 if (unlikely(float32_is_quiet_nan(u.f)))
3130 return 0;
3132 return float32_to_uint32(u.f, &env->vec_status);
3135 static inline uint32_t efsctsiz(uint32_t val)
3137 CPU_FloatU u;
3139 u.l = val;
3140 /* NaN are not treated the same way IEEE 754 does */
3141 if (unlikely(float32_is_quiet_nan(u.f)))
3142 return 0;
3144 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3147 static inline uint32_t efsctuiz(uint32_t val)
3149 CPU_FloatU u;
3151 u.l = val;
3152 /* NaN are not treated the same way IEEE 754 does */
3153 if (unlikely(float32_is_quiet_nan(u.f)))
3154 return 0;
3156 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3159 static inline uint32_t efscfsf(uint32_t val)
3161 CPU_FloatU u;
3162 float32 tmp;
3164 u.f = int32_to_float32(val, &env->vec_status);
3165 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3166 u.f = float32_div(u.f, tmp, &env->vec_status);
3168 return u.l;
3171 static inline uint32_t efscfuf(uint32_t val)
3173 CPU_FloatU u;
3174 float32 tmp;
3176 u.f = uint32_to_float32(val, &env->vec_status);
3177 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3178 u.f = float32_div(u.f, tmp, &env->vec_status);
3180 return u.l;
3183 static inline uint32_t efsctsf(uint32_t val)
3185 CPU_FloatU u;
3186 float32 tmp;
3188 u.l = val;
3189 /* NaN are not treated the same way IEEE 754 does */
3190 if (unlikely(float32_is_quiet_nan(u.f)))
3191 return 0;
3192 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3193 u.f = float32_mul(u.f, tmp, &env->vec_status);
3195 return float32_to_int32(u.f, &env->vec_status);
3198 static inline uint32_t efsctuf(uint32_t val)
3200 CPU_FloatU u;
3201 float32 tmp;
3203 u.l = val;
3204 /* NaN are not treated the same way IEEE 754 does */
3205 if (unlikely(float32_is_quiet_nan(u.f)))
3206 return 0;
3207 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3208 u.f = float32_mul(u.f, tmp, &env->vec_status);
3210 return float32_to_uint32(u.f, &env->vec_status);
3213 #define HELPER_SPE_SINGLE_CONV(name) \
3214 uint32_t helper_e##name (uint32_t val) \
3216 return e##name(val); \
3218 /* efscfsi */
3219 HELPER_SPE_SINGLE_CONV(fscfsi);
3220 /* efscfui */
3221 HELPER_SPE_SINGLE_CONV(fscfui);
3222 /* efscfuf */
3223 HELPER_SPE_SINGLE_CONV(fscfuf);
3224 /* efscfsf */
3225 HELPER_SPE_SINGLE_CONV(fscfsf);
3226 /* efsctsi */
3227 HELPER_SPE_SINGLE_CONV(fsctsi);
3228 /* efsctui */
3229 HELPER_SPE_SINGLE_CONV(fsctui);
3230 /* efsctsiz */
3231 HELPER_SPE_SINGLE_CONV(fsctsiz);
3232 /* efsctuiz */
3233 HELPER_SPE_SINGLE_CONV(fsctuiz);
3234 /* efsctsf */
3235 HELPER_SPE_SINGLE_CONV(fsctsf);
3236 /* efsctuf */
3237 HELPER_SPE_SINGLE_CONV(fsctuf);
3239 #define HELPER_SPE_VECTOR_CONV(name) \
3240 uint64_t helper_ev##name (uint64_t val) \
3242 return ((uint64_t)e##name(val >> 32) << 32) | \
3243 (uint64_t)e##name(val); \
3245 /* evfscfsi */
3246 HELPER_SPE_VECTOR_CONV(fscfsi);
3247 /* evfscfui */
3248 HELPER_SPE_VECTOR_CONV(fscfui);
3249 /* evfscfuf */
3250 HELPER_SPE_VECTOR_CONV(fscfuf);
3251 /* evfscfsf */
3252 HELPER_SPE_VECTOR_CONV(fscfsf);
3253 /* evfsctsi */
3254 HELPER_SPE_VECTOR_CONV(fsctsi);
3255 /* evfsctui */
3256 HELPER_SPE_VECTOR_CONV(fsctui);
3257 /* evfsctsiz */
3258 HELPER_SPE_VECTOR_CONV(fsctsiz);
3259 /* evfsctuiz */
3260 HELPER_SPE_VECTOR_CONV(fsctuiz);
3261 /* evfsctsf */
3262 HELPER_SPE_VECTOR_CONV(fsctsf);
3263 /* evfsctuf */
3264 HELPER_SPE_VECTOR_CONV(fsctuf);
3266 /* Single-precision floating-point arithmetic */
3267 static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3269 CPU_FloatU u1, u2;
3270 u1.l = op1;
3271 u2.l = op2;
3272 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3273 return u1.l;
3276 static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3278 CPU_FloatU u1, u2;
3279 u1.l = op1;
3280 u2.l = op2;
3281 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3282 return u1.l;
3285 static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3287 CPU_FloatU u1, u2;
3288 u1.l = op1;
3289 u2.l = op2;
3290 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3291 return u1.l;
3294 static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3296 CPU_FloatU u1, u2;
3297 u1.l = op1;
3298 u2.l = op2;
3299 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3300 return u1.l;
3303 #define HELPER_SPE_SINGLE_ARITH(name) \
3304 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3306 return e##name(op1, op2); \
3308 /* efsadd */
3309 HELPER_SPE_SINGLE_ARITH(fsadd);
3310 /* efssub */
3311 HELPER_SPE_SINGLE_ARITH(fssub);
3312 /* efsmul */
3313 HELPER_SPE_SINGLE_ARITH(fsmul);
3314 /* efsdiv */
3315 HELPER_SPE_SINGLE_ARITH(fsdiv);
3317 #define HELPER_SPE_VECTOR_ARITH(name) \
3318 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3320 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3321 (uint64_t)e##name(op1, op2); \
3323 /* evfsadd */
3324 HELPER_SPE_VECTOR_ARITH(fsadd);
3325 /* evfssub */
3326 HELPER_SPE_VECTOR_ARITH(fssub);
3327 /* evfsmul */
3328 HELPER_SPE_VECTOR_ARITH(fsmul);
3329 /* evfsdiv */
3330 HELPER_SPE_VECTOR_ARITH(fsdiv);
3332 /* Single-precision floating-point comparisons */
3333 static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3335 CPU_FloatU u1, u2;
3336 u1.l = op1;
3337 u2.l = op2;
3338 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3341 static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3343 CPU_FloatU u1, u2;
3344 u1.l = op1;
3345 u2.l = op2;
3346 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3349 static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3351 CPU_FloatU u1, u2;
3352 u1.l = op1;
3353 u2.l = op2;
3354 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3357 static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3359 /* XXX: TODO: test special values (NaN, infinites, ...) */
3360 return efststlt(op1, op2);
3363 static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3365 /* XXX: TODO: test special values (NaN, infinites, ...) */
3366 return efststgt(op1, op2);
3369 static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3371 /* XXX: TODO: test special values (NaN, infinites, ...) */
3372 return efststeq(op1, op2);
3375 #define HELPER_SINGLE_SPE_CMP(name) \
3376 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3378 return e##name(op1, op2) << 2; \
3380 /* efststlt */
3381 HELPER_SINGLE_SPE_CMP(fststlt);
3382 /* efststgt */
3383 HELPER_SINGLE_SPE_CMP(fststgt);
3384 /* efststeq */
3385 HELPER_SINGLE_SPE_CMP(fststeq);
3386 /* efscmplt */
3387 HELPER_SINGLE_SPE_CMP(fscmplt);
3388 /* efscmpgt */
3389 HELPER_SINGLE_SPE_CMP(fscmpgt);
3390 /* efscmpeq */
3391 HELPER_SINGLE_SPE_CMP(fscmpeq);
3393 static inline uint32_t evcmp_merge(int t0, int t1)
3395 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3398 #define HELPER_VECTOR_SPE_CMP(name) \
3399 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3401 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3403 /* evfststlt */
3404 HELPER_VECTOR_SPE_CMP(fststlt);
3405 /* evfststgt */
3406 HELPER_VECTOR_SPE_CMP(fststgt);
3407 /* evfststeq */
3408 HELPER_VECTOR_SPE_CMP(fststeq);
3409 /* evfscmplt */
3410 HELPER_VECTOR_SPE_CMP(fscmplt);
3411 /* evfscmpgt */
3412 HELPER_VECTOR_SPE_CMP(fscmpgt);
3413 /* evfscmpeq */
3414 HELPER_VECTOR_SPE_CMP(fscmpeq);
3416 /* Double-precision floating-point conversion */
3417 uint64_t helper_efdcfsi (uint32_t val)
3419 CPU_DoubleU u;
3421 u.d = int32_to_float64(val, &env->vec_status);
3423 return u.ll;
3426 uint64_t helper_efdcfsid (uint64_t val)
3428 CPU_DoubleU u;
3430 u.d = int64_to_float64(val, &env->vec_status);
3432 return u.ll;
3435 uint64_t helper_efdcfui (uint32_t val)
3437 CPU_DoubleU u;
3439 u.d = uint32_to_float64(val, &env->vec_status);
3441 return u.ll;
3444 uint64_t helper_efdcfuid (uint64_t val)
3446 CPU_DoubleU u;
3448 u.d = uint64_to_float64(val, &env->vec_status);
3450 return u.ll;
3453 uint32_t helper_efdctsi (uint64_t val)
3455 CPU_DoubleU u;
3457 u.ll = val;
3458 /* NaN are not treated the same way IEEE 754 does */
3459 if (unlikely(float64_is_any_nan(u.d))) {
3460 return 0;
3463 return float64_to_int32(u.d, &env->vec_status);
3466 uint32_t helper_efdctui (uint64_t val)
3468 CPU_DoubleU u;
3470 u.ll = val;
3471 /* NaN are not treated the same way IEEE 754 does */
3472 if (unlikely(float64_is_any_nan(u.d))) {
3473 return 0;
3476 return float64_to_uint32(u.d, &env->vec_status);
3479 uint32_t helper_efdctsiz (uint64_t val)
3481 CPU_DoubleU u;
3483 u.ll = val;
3484 /* NaN are not treated the same way IEEE 754 does */
3485 if (unlikely(float64_is_any_nan(u.d))) {
3486 return 0;
3489 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3492 uint64_t helper_efdctsidz (uint64_t val)
3494 CPU_DoubleU u;
3496 u.ll = val;
3497 /* NaN are not treated the same way IEEE 754 does */
3498 if (unlikely(float64_is_any_nan(u.d))) {
3499 return 0;
3502 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3505 uint32_t helper_efdctuiz (uint64_t val)
3507 CPU_DoubleU u;
3509 u.ll = val;
3510 /* NaN are not treated the same way IEEE 754 does */
3511 if (unlikely(float64_is_any_nan(u.d))) {
3512 return 0;
3515 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3518 uint64_t helper_efdctuidz (uint64_t val)
3520 CPU_DoubleU u;
3522 u.ll = val;
3523 /* NaN are not treated the same way IEEE 754 does */
3524 if (unlikely(float64_is_any_nan(u.d))) {
3525 return 0;
3528 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3531 uint64_t helper_efdcfsf (uint32_t val)
3533 CPU_DoubleU u;
3534 float64 tmp;
3536 u.d = int32_to_float64(val, &env->vec_status);
3537 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3538 u.d = float64_div(u.d, tmp, &env->vec_status);
3540 return u.ll;
3543 uint64_t helper_efdcfuf (uint32_t val)
3545 CPU_DoubleU u;
3546 float64 tmp;
3548 u.d = uint32_to_float64(val, &env->vec_status);
3549 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3550 u.d = float64_div(u.d, tmp, &env->vec_status);
3552 return u.ll;
3555 uint32_t helper_efdctsf (uint64_t val)
3557 CPU_DoubleU u;
3558 float64 tmp;
3560 u.ll = val;
3561 /* NaN are not treated the same way IEEE 754 does */
3562 if (unlikely(float64_is_any_nan(u.d))) {
3563 return 0;
3565 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3566 u.d = float64_mul(u.d, tmp, &env->vec_status);
3568 return float64_to_int32(u.d, &env->vec_status);
3571 uint32_t helper_efdctuf (uint64_t val)
3573 CPU_DoubleU u;
3574 float64 tmp;
3576 u.ll = val;
3577 /* NaN are not treated the same way IEEE 754 does */
3578 if (unlikely(float64_is_any_nan(u.d))) {
3579 return 0;
3581 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3582 u.d = float64_mul(u.d, tmp, &env->vec_status);
3584 return float64_to_uint32(u.d, &env->vec_status);
3587 uint32_t helper_efscfd (uint64_t val)
3589 CPU_DoubleU u1;
3590 CPU_FloatU u2;
3592 u1.ll = val;
3593 u2.f = float64_to_float32(u1.d, &env->vec_status);
3595 return u2.l;
3598 uint64_t helper_efdcfs (uint32_t val)
3600 CPU_DoubleU u2;
3601 CPU_FloatU u1;
3603 u1.l = val;
3604 u2.d = float32_to_float64(u1.f, &env->vec_status);
3606 return u2.ll;
3609 /* Double precision fixed-point arithmetic */
3610 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3612 CPU_DoubleU u1, u2;
3613 u1.ll = op1;
3614 u2.ll = op2;
3615 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3616 return u1.ll;
3619 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3621 CPU_DoubleU u1, u2;
3622 u1.ll = op1;
3623 u2.ll = op2;
3624 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3625 return u1.ll;
3628 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3630 CPU_DoubleU u1, u2;
3631 u1.ll = op1;
3632 u2.ll = op2;
3633 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3634 return u1.ll;
3637 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3639 CPU_DoubleU u1, u2;
3640 u1.ll = op1;
3641 u2.ll = op2;
3642 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3643 return u1.ll;
3646 /* Double precision floating point helpers */
3647 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3649 CPU_DoubleU u1, u2;
3650 u1.ll = op1;
3651 u2.ll = op2;
3652 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3655 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3657 CPU_DoubleU u1, u2;
3658 u1.ll = op1;
3659 u2.ll = op2;
3660 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3663 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3665 CPU_DoubleU u1, u2;
3666 u1.ll = op1;
3667 u2.ll = op2;
3668 return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3671 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3673 /* XXX: TODO: test special values (NaN, infinites, ...) */
3674 return helper_efdtstlt(op1, op2);
3677 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3679 /* XXX: TODO: test special values (NaN, infinites, ...) */
3680 return helper_efdtstgt(op1, op2);
3683 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3685 /* XXX: TODO: test special values (NaN, infinites, ...) */
3686 return helper_efdtsteq(op1, op2);
3689 /*****************************************************************************/
3690 /* Softmmu support */
3691 #if !defined (CONFIG_USER_ONLY)
3693 #define MMUSUFFIX _mmu
3695 #define SHIFT 0
3696 #include "softmmu_template.h"
3698 #define SHIFT 1
3699 #include "softmmu_template.h"
3701 #define SHIFT 2
3702 #include "softmmu_template.h"
3704 #define SHIFT 3
3705 #include "softmmu_template.h"
3707 /* try to fill the TLB and return an exception if error. If retaddr is
3708 NULL, it means that the function was called in C code (i.e. not
3709 from generated code or from helper.c) */
3710 /* XXX: fix it to restore all registers */
3711 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3713 TranslationBlock *tb;
3714 CPUState *saved_env;
3715 unsigned long pc;
3716 int ret;
3718 /* XXX: hack to restore env in all cases, even if not called from
3719 generated code */
3720 saved_env = env;
3721 env = cpu_single_env;
3722 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3723 if (unlikely(ret != 0)) {
3724 if (likely(retaddr)) {
3725 /* now we have a real cpu fault */
3726 pc = (unsigned long)retaddr;
3727 tb = tb_find_pc(pc);
3728 if (likely(tb)) {
3729 /* the PC is inside the translated code. It means that we have
3730 a virtual CPU fault */
3731 cpu_restore_state(tb, env, pc, NULL);
3734 helper_raise_exception_err(env->exception_index, env->error_code);
3736 env = saved_env;
3739 /* Segment registers load and store */
3740 target_ulong helper_load_sr (target_ulong sr_num)
3742 #if defined(TARGET_PPC64)
3743 if (env->mmu_model & POWERPC_MMU_64)
3744 return ppc_load_sr(env, sr_num);
3745 #endif
3746 return env->sr[sr_num];
3749 void helper_store_sr (target_ulong sr_num, target_ulong val)
3751 ppc_store_sr(env, sr_num, val);
3754 /* SLB management */
3755 #if defined(TARGET_PPC64)
3756 void helper_store_slb (target_ulong rb, target_ulong rs)
3758 if (ppc_store_slb(env, rb, rs) < 0) {
3759 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3763 target_ulong helper_load_slb_esid (target_ulong rb)
3765 target_ulong rt;
3767 if (ppc_load_slb_esid(env, rb, &rt) < 0) {
3768 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3770 return rt;
3773 target_ulong helper_load_slb_vsid (target_ulong rb)
3775 target_ulong rt;
3777 if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
3778 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3780 return rt;
3783 void helper_slbia (void)
3785 ppc_slb_invalidate_all(env);
3788 void helper_slbie (target_ulong addr)
3790 ppc_slb_invalidate_one(env, addr);
3793 #endif /* defined(TARGET_PPC64) */
3795 /* TLB management */
3796 void helper_tlbia (void)
3798 ppc_tlb_invalidate_all(env);
3801 void helper_tlbie (target_ulong addr)
3803 ppc_tlb_invalidate_one(env, addr);
3806 /* Software driven TLBs management */
3807 /* PowerPC 602/603 software TLB load instructions helpers */
3808 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3810 target_ulong RPN, CMP, EPN;
3811 int way;
3813 RPN = env->spr[SPR_RPA];
3814 if (is_code) {
3815 CMP = env->spr[SPR_ICMP];
3816 EPN = env->spr[SPR_IMISS];
3817 } else {
3818 CMP = env->spr[SPR_DCMP];
3819 EPN = env->spr[SPR_DMISS];
3821 way = (env->spr[SPR_SRR1] >> 17) & 1;
3822 (void)EPN; /* avoid a compiler warning */
3823 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3824 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3825 RPN, way);
3826 /* Store this TLB */
3827 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3828 way, is_code, CMP, RPN);
3831 void helper_6xx_tlbd (target_ulong EPN)
3833 do_6xx_tlb(EPN, 0);
3836 void helper_6xx_tlbi (target_ulong EPN)
3838 do_6xx_tlb(EPN, 1);
3841 /* PowerPC 74xx software TLB load instructions helpers */
3842 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3844 target_ulong RPN, CMP, EPN;
3845 int way;
3847 RPN = env->spr[SPR_PTELO];
3848 CMP = env->spr[SPR_PTEHI];
3849 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3850 way = env->spr[SPR_TLBMISS] & 0x3;
3851 (void)EPN; /* avoid a compiler warning */
3852 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3853 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3854 RPN, way);
3855 /* Store this TLB */
3856 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3857 way, is_code, CMP, RPN);
3860 void helper_74xx_tlbd (target_ulong EPN)
3862 do_74xx_tlb(EPN, 0);
3865 void helper_74xx_tlbi (target_ulong EPN)
3867 do_74xx_tlb(EPN, 1);
3870 static inline target_ulong booke_tlb_to_page_size(int size)
3872 return 1024 << (2 * size);
3875 static inline int booke_page_size_to_tlb(target_ulong page_size)
3877 int size;
3879 switch (page_size) {
3880 case 0x00000400UL:
3881 size = 0x0;
3882 break;
3883 case 0x00001000UL:
3884 size = 0x1;
3885 break;
3886 case 0x00004000UL:
3887 size = 0x2;
3888 break;
3889 case 0x00010000UL:
3890 size = 0x3;
3891 break;
3892 case 0x00040000UL:
3893 size = 0x4;
3894 break;
3895 case 0x00100000UL:
3896 size = 0x5;
3897 break;
3898 case 0x00400000UL:
3899 size = 0x6;
3900 break;
3901 case 0x01000000UL:
3902 size = 0x7;
3903 break;
3904 case 0x04000000UL:
3905 size = 0x8;
3906 break;
3907 case 0x10000000UL:
3908 size = 0x9;
3909 break;
3910 case 0x40000000UL:
3911 size = 0xA;
3912 break;
3913 #if defined (TARGET_PPC64)
3914 case 0x000100000000ULL:
3915 size = 0xB;
3916 break;
3917 case 0x000400000000ULL:
3918 size = 0xC;
3919 break;
3920 case 0x001000000000ULL:
3921 size = 0xD;
3922 break;
3923 case 0x004000000000ULL:
3924 size = 0xE;
3925 break;
3926 case 0x010000000000ULL:
3927 size = 0xF;
3928 break;
3929 #endif
3930 default:
3931 size = -1;
3932 break;
3935 return size;
3938 /* Helpers for 4xx TLB management */
3939 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
3941 #define PPC4XX_TLBHI_V 0x00000040
3942 #define PPC4XX_TLBHI_E 0x00000020
3943 #define PPC4XX_TLBHI_SIZE_MIN 0
3944 #define PPC4XX_TLBHI_SIZE_MAX 7
3945 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
3946 #define PPC4XX_TLBHI_SIZE_SHIFT 7
3947 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
3949 #define PPC4XX_TLBLO_EX 0x00000200
3950 #define PPC4XX_TLBLO_WR 0x00000100
3951 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
3952 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
3954 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3956 ppcemb_tlb_t *tlb;
3957 target_ulong ret;
3958 int size;
3960 entry &= PPC4XX_TLB_ENTRY_MASK;
3961 tlb = &env->tlb[entry].tlbe;
3962 ret = tlb->EPN;
3963 if (tlb->prot & PAGE_VALID) {
3964 ret |= PPC4XX_TLBHI_V;
3966 size = booke_page_size_to_tlb(tlb->size);
3967 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3968 size = PPC4XX_TLBHI_SIZE_DEFAULT;
3970 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3971 env->spr[SPR_40x_PID] = tlb->PID;
3972 return ret;
3975 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3977 ppcemb_tlb_t *tlb;
3978 target_ulong ret;
3980 entry &= PPC4XX_TLB_ENTRY_MASK;
3981 tlb = &env->tlb[entry].tlbe;
3982 ret = tlb->RPN;
3983 if (tlb->prot & PAGE_EXEC) {
3984 ret |= PPC4XX_TLBLO_EX;
3986 if (tlb->prot & PAGE_WRITE) {
3987 ret |= PPC4XX_TLBLO_WR;
3989 return ret;
3992 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3994 ppcemb_tlb_t *tlb;
3995 target_ulong page, end;
3997 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
3998 val);
3999 entry &= PPC4XX_TLB_ENTRY_MASK;
4000 tlb = &env->tlb[entry].tlbe;
4001 /* Invalidate previous TLB (if it's valid) */
4002 if (tlb->prot & PAGE_VALID) {
4003 end = tlb->EPN + tlb->size;
4004 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
4005 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4006 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4007 tlb_flush_page(env, page);
4010 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4011 & PPC4XX_TLBHI_SIZE_MASK);
4012 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
4013 * If this ever occurs, one should use the ppcemb target instead
4014 * of the ppc or ppc64 one
4016 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4017 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4018 "are not supported (%d)\n",
4019 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4021 tlb->EPN = val & ~(tlb->size - 1);
4022 if (val & PPC4XX_TLBHI_V) {
4023 tlb->prot |= PAGE_VALID;
4024 if (val & PPC4XX_TLBHI_E) {
4025 /* XXX: TO BE FIXED */
4026 cpu_abort(env,
4027 "Little-endian TLB entries are not supported by now\n");
4029 } else {
4030 tlb->prot &= ~PAGE_VALID;
4032 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4033 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4034 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4035 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4036 tlb->prot & PAGE_READ ? 'r' : '-',
4037 tlb->prot & PAGE_WRITE ? 'w' : '-',
4038 tlb->prot & PAGE_EXEC ? 'x' : '-',
4039 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4040 /* Invalidate new TLB (if valid) */
4041 if (tlb->prot & PAGE_VALID) {
4042 end = tlb->EPN + tlb->size;
4043 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4044 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4045 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4046 tlb_flush_page(env, page);
4051 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4053 ppcemb_tlb_t *tlb;
4055 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4056 val);
4057 entry &= PPC4XX_TLB_ENTRY_MASK;
4058 tlb = &env->tlb[entry].tlbe;
4059 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4060 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4061 tlb->prot = PAGE_READ;
4062 if (val & PPC4XX_TLBLO_EX) {
4063 tlb->prot |= PAGE_EXEC;
4065 if (val & PPC4XX_TLBLO_WR) {
4066 tlb->prot |= PAGE_WRITE;
4068 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4069 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4070 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4071 tlb->prot & PAGE_READ ? 'r' : '-',
4072 tlb->prot & PAGE_WRITE ? 'w' : '-',
4073 tlb->prot & PAGE_EXEC ? 'x' : '-',
4074 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4077 target_ulong helper_4xx_tlbsx (target_ulong address)
4079 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4082 /* PowerPC 440 TLB management */
4083 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4085 ppcemb_tlb_t *tlb;
4086 target_ulong EPN, RPN, size;
4087 int do_flush_tlbs;
4089 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4090 __func__, word, (int)entry, value);
4091 do_flush_tlbs = 0;
4092 entry &= 0x3F;
4093 tlb = &env->tlb[entry].tlbe;
4094 switch (word) {
4095 default:
4096 /* Just here to please gcc */
4097 case 0:
4098 EPN = value & 0xFFFFFC00;
4099 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4100 do_flush_tlbs = 1;
4101 tlb->EPN = EPN;
4102 size = booke_tlb_to_page_size((value >> 4) & 0xF);
4103 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4104 do_flush_tlbs = 1;
4105 tlb->size = size;
4106 tlb->attr &= ~0x1;
4107 tlb->attr |= (value >> 8) & 1;
4108 if (value & 0x200) {
4109 tlb->prot |= PAGE_VALID;
4110 } else {
4111 if (tlb->prot & PAGE_VALID) {
4112 tlb->prot &= ~PAGE_VALID;
4113 do_flush_tlbs = 1;
4116 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4117 if (do_flush_tlbs)
4118 tlb_flush(env, 1);
4119 break;
4120 case 1:
4121 RPN = value & 0xFFFFFC0F;
4122 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4123 tlb_flush(env, 1);
4124 tlb->RPN = RPN;
4125 break;
4126 case 2:
4127 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4128 tlb->prot = tlb->prot & PAGE_VALID;
4129 if (value & 0x1)
4130 tlb->prot |= PAGE_READ << 4;
4131 if (value & 0x2)
4132 tlb->prot |= PAGE_WRITE << 4;
4133 if (value & 0x4)
4134 tlb->prot |= PAGE_EXEC << 4;
4135 if (value & 0x8)
4136 tlb->prot |= PAGE_READ;
4137 if (value & 0x10)
4138 tlb->prot |= PAGE_WRITE;
4139 if (value & 0x20)
4140 tlb->prot |= PAGE_EXEC;
4141 break;
4145 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4147 ppcemb_tlb_t *tlb;
4148 target_ulong ret;
4149 int size;
4151 entry &= 0x3F;
4152 tlb = &env->tlb[entry].tlbe;
4153 switch (word) {
4154 default:
4155 /* Just here to please gcc */
4156 case 0:
4157 ret = tlb->EPN;
4158 size = booke_page_size_to_tlb(tlb->size);
4159 if (size < 0 || size > 0xF)
4160 size = 1;
4161 ret |= size << 4;
4162 if (tlb->attr & 0x1)
4163 ret |= 0x100;
4164 if (tlb->prot & PAGE_VALID)
4165 ret |= 0x200;
4166 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4167 env->spr[SPR_440_MMUCR] |= tlb->PID;
4168 break;
4169 case 1:
4170 ret = tlb->RPN;
4171 break;
4172 case 2:
4173 ret = tlb->attr & ~0x1;
4174 if (tlb->prot & (PAGE_READ << 4))
4175 ret |= 0x1;
4176 if (tlb->prot & (PAGE_WRITE << 4))
4177 ret |= 0x2;
4178 if (tlb->prot & (PAGE_EXEC << 4))
4179 ret |= 0x4;
4180 if (tlb->prot & PAGE_READ)
4181 ret |= 0x8;
4182 if (tlb->prot & PAGE_WRITE)
4183 ret |= 0x10;
4184 if (tlb->prot & PAGE_EXEC)
4185 ret |= 0x20;
4186 break;
4188 return ret;
4191 target_ulong helper_440_tlbsx (target_ulong address)
4193 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4196 #endif /* !CONFIG_USER_ONLY */