Fix mfcr on ppc64-softmmu
[qemu/mini2440/sniper_sniper_test.git] / target-ppc / op_helper.c
blobf21f695db6bde0c11274dc03af3a3c32c7377e48
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <string.h>
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "helper.h"
25 #include "helper_regs.h"
27 //#define DEBUG_OP
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
31 #ifdef DEBUG_SOFTWARE_TLB
32 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
33 #else
34 # define LOG_SWTLB(...) do { } while (0)
35 #endif
38 /*****************************************************************************/
39 /* Exceptions processing helpers */
41 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
43 #if 0
44 printf("Raise exception %3x code : %d\n", exception, error_code);
45 #endif
46 env->exception_index = exception;
47 env->error_code = error_code;
48 cpu_loop_exit();
51 void helper_raise_exception (uint32_t exception)
53 helper_raise_exception_err(exception, 0);
56 /*****************************************************************************/
57 /* Registers load and stores */
58 target_ulong helper_load_cr (void)
60 return (env->crf[0] << 28) |
61 (env->crf[1] << 24) |
62 (env->crf[2] << 20) |
63 (env->crf[3] << 16) |
64 (env->crf[4] << 12) |
65 (env->crf[5] << 8) |
66 (env->crf[6] << 4) |
67 (env->crf[7] << 0);
70 void helper_store_cr (target_ulong val, uint32_t mask)
72 int i, sh;
74 for (i = 0, sh = 7; i < 8; i++, sh--) {
75 if (mask & (1 << sh))
76 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
80 /*****************************************************************************/
81 /* SPR accesses */
82 void helper_load_dump_spr (uint32_t sprn)
84 qemu_log("Read SPR %d %03x => " ADDRX "\n",
85 sprn, sprn, env->spr[sprn]);
88 void helper_store_dump_spr (uint32_t sprn)
90 qemu_log("Write SPR %d %03x <= " ADDRX "\n",
91 sprn, sprn, env->spr[sprn]);
94 target_ulong helper_load_tbl (void)
96 return cpu_ppc_load_tbl(env);
99 target_ulong helper_load_tbu (void)
101 return cpu_ppc_load_tbu(env);
104 target_ulong helper_load_atbl (void)
106 return cpu_ppc_load_atbl(env);
109 target_ulong helper_load_atbu (void)
111 return cpu_ppc_load_atbu(env);
114 target_ulong helper_load_601_rtcl (void)
116 return cpu_ppc601_load_rtcl(env);
119 target_ulong helper_load_601_rtcu (void)
121 return cpu_ppc601_load_rtcu(env);
124 #if !defined(CONFIG_USER_ONLY)
125 #if defined (TARGET_PPC64)
126 void helper_store_asr (target_ulong val)
128 ppc_store_asr(env, val);
130 #endif
132 void helper_store_sdr1 (target_ulong val)
134 ppc_store_sdr1(env, val);
137 void helper_store_tbl (target_ulong val)
139 cpu_ppc_store_tbl(env, val);
142 void helper_store_tbu (target_ulong val)
144 cpu_ppc_store_tbu(env, val);
147 void helper_store_atbl (target_ulong val)
149 cpu_ppc_store_atbl(env, val);
152 void helper_store_atbu (target_ulong val)
154 cpu_ppc_store_atbu(env, val);
157 void helper_store_601_rtcl (target_ulong val)
159 cpu_ppc601_store_rtcl(env, val);
162 void helper_store_601_rtcu (target_ulong val)
164 cpu_ppc601_store_rtcu(env, val);
167 target_ulong helper_load_decr (void)
169 return cpu_ppc_load_decr(env);
172 void helper_store_decr (target_ulong val)
174 cpu_ppc_store_decr(env, val);
177 void helper_store_hid0_601 (target_ulong val)
179 target_ulong hid0;
181 hid0 = env->spr[SPR_HID0];
182 if ((val ^ hid0) & 0x00000008) {
183 /* Change current endianness */
184 env->hflags &= ~(1 << MSR_LE);
185 env->hflags_nmsr &= ~(1 << MSR_LE);
186 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
187 env->hflags |= env->hflags_nmsr;
188 qemu_log("%s: set endianness to %c => " ADDRX "\n",
189 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
191 env->spr[SPR_HID0] = (uint32_t)val;
194 void helper_store_403_pbr (uint32_t num, target_ulong value)
196 if (likely(env->pb[num] != value)) {
197 env->pb[num] = value;
198 /* Should be optimized */
199 tlb_flush(env, 1);
203 target_ulong helper_load_40x_pit (void)
205 return load_40x_pit(env);
208 void helper_store_40x_pit (target_ulong val)
210 store_40x_pit(env, val);
213 void helper_store_40x_dbcr0 (target_ulong val)
215 store_40x_dbcr0(env, val);
218 void helper_store_40x_sler (target_ulong val)
220 store_40x_sler(env, val);
223 void helper_store_booke_tcr (target_ulong val)
225 store_booke_tcr(env, val);
228 void helper_store_booke_tsr (target_ulong val)
230 store_booke_tsr(env, val);
233 void helper_store_ibatu (uint32_t nr, target_ulong val)
235 ppc_store_ibatu(env, nr, val);
238 void helper_store_ibatl (uint32_t nr, target_ulong val)
240 ppc_store_ibatl(env, nr, val);
243 void helper_store_dbatu (uint32_t nr, target_ulong val)
245 ppc_store_dbatu(env, nr, val);
248 void helper_store_dbatl (uint32_t nr, target_ulong val)
250 ppc_store_dbatl(env, nr, val);
253 void helper_store_601_batl (uint32_t nr, target_ulong val)
255 ppc_store_ibatl_601(env, nr, val);
258 void helper_store_601_batu (uint32_t nr, target_ulong val)
260 ppc_store_ibatu_601(env, nr, val);
262 #endif
264 /*****************************************************************************/
265 /* Memory load and stores */
267 static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
269 #if defined(TARGET_PPC64)
270 if (!msr_sf)
271 return (uint32_t)(addr + arg);
272 else
273 #endif
274 return addr + arg;
277 void helper_lmw (target_ulong addr, uint32_t reg)
279 for (; reg < 32; reg++) {
280 if (msr_le)
281 env->gpr[reg] = bswap32(ldl(addr));
282 else
283 env->gpr[reg] = ldl(addr);
284 addr = addr_add(addr, 4);
288 void helper_stmw (target_ulong addr, uint32_t reg)
290 for (; reg < 32; reg++) {
291 if (msr_le)
292 stl(addr, bswap32((uint32_t)env->gpr[reg]));
293 else
294 stl(addr, (uint32_t)env->gpr[reg]);
295 addr = addr_add(addr, 4);
299 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
301 int sh;
302 for (; nb > 3; nb -= 4) {
303 env->gpr[reg] = ldl(addr);
304 reg = (reg + 1) % 32;
305 addr = addr_add(addr, 4);
307 if (unlikely(nb > 0)) {
308 env->gpr[reg] = 0;
309 for (sh = 24; nb > 0; nb--, sh -= 8) {
310 env->gpr[reg] |= ldub(addr) << sh;
311 addr = addr_add(addr, 1);
315 /* PPC32 specification says we must generate an exception if
316 * rA is in the range of registers to be loaded.
317 * In an other hand, IBM says this is valid, but rA won't be loaded.
318 * For now, I'll follow the spec...
320 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
322 if (likely(xer_bc != 0)) {
323 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
324 (reg < rb && (reg + xer_bc) > rb))) {
325 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
326 POWERPC_EXCP_INVAL |
327 POWERPC_EXCP_INVAL_LSWX);
328 } else {
329 helper_lsw(addr, xer_bc, reg);
334 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
336 int sh;
337 for (; nb > 3; nb -= 4) {
338 stl(addr, env->gpr[reg]);
339 reg = (reg + 1) % 32;
340 addr = addr_add(addr, 4);
342 if (unlikely(nb > 0)) {
343 for (sh = 24; nb > 0; nb--, sh -= 8) {
344 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
345 addr = addr_add(addr, 1);
350 static void do_dcbz(target_ulong addr, int dcache_line_size)
352 addr &= ~(dcache_line_size - 1);
353 int i;
354 for (i = 0 ; i < dcache_line_size ; i += 4) {
355 stl(addr + i , 0);
357 if (env->reserve == addr)
358 env->reserve = (target_ulong)-1ULL;
361 void helper_dcbz(target_ulong addr)
363 do_dcbz(addr, env->dcache_line_size);
366 void helper_dcbz_970(target_ulong addr)
368 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
369 do_dcbz(addr, 32);
370 else
371 do_dcbz(addr, env->dcache_line_size);
374 void helper_icbi(target_ulong addr)
376 uint32_t tmp;
378 addr &= ~(env->dcache_line_size - 1);
379 /* Invalidate one cache line :
380 * PowerPC specification says this is to be treated like a load
381 * (not a fetch) by the MMU. To be sure it will be so,
382 * do the load "by hand".
384 tmp = ldl(addr);
385 tb_invalidate_page_range(addr, addr + env->icache_line_size);
388 // XXX: to be tested
389 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
391 int i, c, d;
392 d = 24;
393 for (i = 0; i < xer_bc; i++) {
394 c = ldub(addr);
395 addr = addr_add(addr, 1);
396 /* ra (if not 0) and rb are never modified */
397 if (likely(reg != rb && (ra == 0 || reg != ra))) {
398 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
400 if (unlikely(c == xer_cmp))
401 break;
402 if (likely(d != 0)) {
403 d -= 8;
404 } else {
405 d = 24;
406 reg++;
407 reg = reg & 0x1F;
410 return i;
413 /*****************************************************************************/
414 /* Fixed point operations helpers */
415 #if defined(TARGET_PPC64)
417 /* multiply high word */
418 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
420 uint64_t tl, th;
422 muls64(&tl, &th, arg1, arg2);
423 return th;
426 /* multiply high word unsigned */
427 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
429 uint64_t tl, th;
431 mulu64(&tl, &th, arg1, arg2);
432 return th;
435 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
437 int64_t th;
438 uint64_t tl;
440 muls64(&tl, (uint64_t *)&th, arg1, arg2);
441 /* If th != 0 && th != -1, then we had an overflow */
442 if (likely((uint64_t)(th + 1) <= 1)) {
443 env->xer &= ~(1 << XER_OV);
444 } else {
445 env->xer |= (1 << XER_OV) | (1 << XER_SO);
447 return (int64_t)tl;
449 #endif
451 target_ulong helper_cntlzw (target_ulong t)
453 return clz32(t);
456 #if defined(TARGET_PPC64)
457 target_ulong helper_cntlzd (target_ulong t)
459 return clz64(t);
461 #endif
463 /* shift right arithmetic helper */
464 target_ulong helper_sraw (target_ulong value, target_ulong shift)
466 int32_t ret;
468 if (likely(!(shift & 0x20))) {
469 if (likely((uint32_t)shift != 0)) {
470 shift &= 0x1f;
471 ret = (int32_t)value >> shift;
472 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
473 env->xer &= ~(1 << XER_CA);
474 } else {
475 env->xer |= (1 << XER_CA);
477 } else {
478 ret = (int32_t)value;
479 env->xer &= ~(1 << XER_CA);
481 } else {
482 ret = (int32_t)value >> 31;
483 if (ret) {
484 env->xer |= (1 << XER_CA);
485 } else {
486 env->xer &= ~(1 << XER_CA);
489 return (target_long)ret;
492 #if defined(TARGET_PPC64)
493 target_ulong helper_srad (target_ulong value, target_ulong shift)
495 int64_t ret;
497 if (likely(!(shift & 0x40))) {
498 if (likely((uint64_t)shift != 0)) {
499 shift &= 0x3f;
500 ret = (int64_t)value >> shift;
501 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
502 env->xer &= ~(1 << XER_CA);
503 } else {
504 env->xer |= (1 << XER_CA);
506 } else {
507 ret = (int64_t)value;
508 env->xer &= ~(1 << XER_CA);
510 } else {
511 ret = (int64_t)value >> 63;
512 if (ret) {
513 env->xer |= (1 << XER_CA);
514 } else {
515 env->xer &= ~(1 << XER_CA);
518 return ret;
520 #endif
522 target_ulong helper_popcntb (target_ulong val)
524 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
525 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
526 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
527 return val;
530 #if defined(TARGET_PPC64)
531 target_ulong helper_popcntb_64 (target_ulong val)
533 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
534 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
535 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
536 return val;
538 #endif
540 /*****************************************************************************/
541 /* Floating point operations helpers */
542 uint64_t helper_float32_to_float64(uint32_t arg)
544 CPU_FloatU f;
545 CPU_DoubleU d;
546 f.l = arg;
547 d.d = float32_to_float64(f.f, &env->fp_status);
548 return d.ll;
551 uint32_t helper_float64_to_float32(uint64_t arg)
553 CPU_FloatU f;
554 CPU_DoubleU d;
555 d.ll = arg;
556 f.f = float64_to_float32(d.d, &env->fp_status);
557 return f.l;
560 static always_inline int isden (float64 d)
562 CPU_DoubleU u;
564 u.d = d;
566 return ((u.ll >> 52) & 0x7FF) == 0;
569 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
571 CPU_DoubleU farg;
572 int isneg;
573 int ret;
574 farg.ll = arg;
575 isneg = float64_is_neg(farg.d);
576 if (unlikely(float64_is_nan(farg.d))) {
577 if (float64_is_signaling_nan(farg.d)) {
578 /* Signaling NaN: flags are undefined */
579 ret = 0x00;
580 } else {
581 /* Quiet NaN */
582 ret = 0x11;
584 } else if (unlikely(float64_is_infinity(farg.d))) {
585 /* +/- infinity */
586 if (isneg)
587 ret = 0x09;
588 else
589 ret = 0x05;
590 } else {
591 if (float64_is_zero(farg.d)) {
592 /* +/- zero */
593 if (isneg)
594 ret = 0x12;
595 else
596 ret = 0x02;
597 } else {
598 if (isden(farg.d)) {
599 /* Denormalized numbers */
600 ret = 0x10;
601 } else {
602 /* Normalized numbers */
603 ret = 0x00;
605 if (isneg) {
606 ret |= 0x08;
607 } else {
608 ret |= 0x04;
612 if (set_fprf) {
613 /* We update FPSCR_FPRF */
614 env->fpscr &= ~(0x1F << FPSCR_FPRF);
615 env->fpscr |= ret << FPSCR_FPRF;
617 /* We just need fpcc to update Rc1 */
618 return ret & 0xF;
621 /* Floating-point invalid operations exception */
622 static always_inline uint64_t fload_invalid_op_excp (int op)
624 uint64_t ret = 0;
625 int ve;
627 ve = fpscr_ve;
628 switch (op) {
629 case POWERPC_EXCP_FP_VXSNAN:
630 env->fpscr |= 1 << FPSCR_VXSNAN;
631 break;
632 case POWERPC_EXCP_FP_VXSOFT:
633 env->fpscr |= 1 << FPSCR_VXSOFT;
634 break;
635 case POWERPC_EXCP_FP_VXISI:
636 /* Magnitude subtraction of infinities */
637 env->fpscr |= 1 << FPSCR_VXISI;
638 goto update_arith;
639 case POWERPC_EXCP_FP_VXIDI:
640 /* Division of infinity by infinity */
641 env->fpscr |= 1 << FPSCR_VXIDI;
642 goto update_arith;
643 case POWERPC_EXCP_FP_VXZDZ:
644 /* Division of zero by zero */
645 env->fpscr |= 1 << FPSCR_VXZDZ;
646 goto update_arith;
647 case POWERPC_EXCP_FP_VXIMZ:
648 /* Multiplication of zero by infinity */
649 env->fpscr |= 1 << FPSCR_VXIMZ;
650 goto update_arith;
651 case POWERPC_EXCP_FP_VXVC:
652 /* Ordered comparison of NaN */
653 env->fpscr |= 1 << FPSCR_VXVC;
654 env->fpscr &= ~(0xF << FPSCR_FPCC);
655 env->fpscr |= 0x11 << FPSCR_FPCC;
656 /* We must update the target FPR before raising the exception */
657 if (ve != 0) {
658 env->exception_index = POWERPC_EXCP_PROGRAM;
659 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
660 /* Update the floating-point enabled exception summary */
661 env->fpscr |= 1 << FPSCR_FEX;
662 /* Exception is differed */
663 ve = 0;
665 break;
666 case POWERPC_EXCP_FP_VXSQRT:
667 /* Square root of a negative number */
668 env->fpscr |= 1 << FPSCR_VXSQRT;
669 update_arith:
670 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
671 if (ve == 0) {
672 /* Set the result to quiet NaN */
673 ret = 0xFFF8000000000000ULL;
674 env->fpscr &= ~(0xF << FPSCR_FPCC);
675 env->fpscr |= 0x11 << FPSCR_FPCC;
677 break;
678 case POWERPC_EXCP_FP_VXCVI:
679 /* Invalid conversion */
680 env->fpscr |= 1 << FPSCR_VXCVI;
681 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
682 if (ve == 0) {
683 /* Set the result to quiet NaN */
684 ret = 0xFFF8000000000000ULL;
685 env->fpscr &= ~(0xF << FPSCR_FPCC);
686 env->fpscr |= 0x11 << FPSCR_FPCC;
688 break;
690 /* Update the floating-point invalid operation summary */
691 env->fpscr |= 1 << FPSCR_VX;
692 /* Update the floating-point exception summary */
693 env->fpscr |= 1 << FPSCR_FX;
694 if (ve != 0) {
695 /* Update the floating-point enabled exception summary */
696 env->fpscr |= 1 << FPSCR_FEX;
697 if (msr_fe0 != 0 || msr_fe1 != 0)
698 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
700 return ret;
703 static always_inline void float_zero_divide_excp (void)
705 env->fpscr |= 1 << FPSCR_ZX;
706 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
707 /* Update the floating-point exception summary */
708 env->fpscr |= 1 << FPSCR_FX;
709 if (fpscr_ze != 0) {
710 /* Update the floating-point enabled exception summary */
711 env->fpscr |= 1 << FPSCR_FEX;
712 if (msr_fe0 != 0 || msr_fe1 != 0) {
713 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
714 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
719 static always_inline void float_overflow_excp (void)
721 env->fpscr |= 1 << FPSCR_OX;
722 /* Update the floating-point exception summary */
723 env->fpscr |= 1 << FPSCR_FX;
724 if (fpscr_oe != 0) {
725 /* XXX: should adjust the result */
726 /* Update the floating-point enabled exception summary */
727 env->fpscr |= 1 << FPSCR_FEX;
728 /* We must update the target FPR before raising the exception */
729 env->exception_index = POWERPC_EXCP_PROGRAM;
730 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
731 } else {
732 env->fpscr |= 1 << FPSCR_XX;
733 env->fpscr |= 1 << FPSCR_FI;
737 static always_inline void float_underflow_excp (void)
739 env->fpscr |= 1 << FPSCR_UX;
740 /* Update the floating-point exception summary */
741 env->fpscr |= 1 << FPSCR_FX;
742 if (fpscr_ue != 0) {
743 /* XXX: should adjust the result */
744 /* Update the floating-point enabled exception summary */
745 env->fpscr |= 1 << FPSCR_FEX;
746 /* We must update the target FPR before raising the exception */
747 env->exception_index = POWERPC_EXCP_PROGRAM;
748 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
752 static always_inline void float_inexact_excp (void)
754 env->fpscr |= 1 << FPSCR_XX;
755 /* Update the floating-point exception summary */
756 env->fpscr |= 1 << FPSCR_FX;
757 if (fpscr_xe != 0) {
758 /* Update the floating-point enabled exception summary */
759 env->fpscr |= 1 << FPSCR_FEX;
760 /* We must update the target FPR before raising the exception */
761 env->exception_index = POWERPC_EXCP_PROGRAM;
762 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
766 static always_inline void fpscr_set_rounding_mode (void)
768 int rnd_type;
770 /* Set rounding mode */
771 switch (fpscr_rn) {
772 case 0:
773 /* Best approximation (round to nearest) */
774 rnd_type = float_round_nearest_even;
775 break;
776 case 1:
777 /* Smaller magnitude (round toward zero) */
778 rnd_type = float_round_to_zero;
779 break;
780 case 2:
781 /* Round toward +infinite */
782 rnd_type = float_round_up;
783 break;
784 default:
785 case 3:
786 /* Round toward -infinite */
787 rnd_type = float_round_down;
788 break;
790 set_float_rounding_mode(rnd_type, &env->fp_status);
793 void helper_fpscr_clrbit (uint32_t bit)
795 int prev;
797 prev = (env->fpscr >> bit) & 1;
798 env->fpscr &= ~(1 << bit);
799 if (prev == 1) {
800 switch (bit) {
801 case FPSCR_RN1:
802 case FPSCR_RN:
803 fpscr_set_rounding_mode();
804 break;
805 default:
806 break;
811 void helper_fpscr_setbit (uint32_t bit)
813 int prev;
815 prev = (env->fpscr >> bit) & 1;
816 env->fpscr |= 1 << bit;
817 if (prev == 0) {
818 switch (bit) {
819 case FPSCR_VX:
820 env->fpscr |= 1 << FPSCR_FX;
821 if (fpscr_ve)
822 goto raise_ve;
823 case FPSCR_OX:
824 env->fpscr |= 1 << FPSCR_FX;
825 if (fpscr_oe)
826 goto raise_oe;
827 break;
828 case FPSCR_UX:
829 env->fpscr |= 1 << FPSCR_FX;
830 if (fpscr_ue)
831 goto raise_ue;
832 break;
833 case FPSCR_ZX:
834 env->fpscr |= 1 << FPSCR_FX;
835 if (fpscr_ze)
836 goto raise_ze;
837 break;
838 case FPSCR_XX:
839 env->fpscr |= 1 << FPSCR_FX;
840 if (fpscr_xe)
841 goto raise_xe;
842 break;
843 case FPSCR_VXSNAN:
844 case FPSCR_VXISI:
845 case FPSCR_VXIDI:
846 case FPSCR_VXZDZ:
847 case FPSCR_VXIMZ:
848 case FPSCR_VXVC:
849 case FPSCR_VXSOFT:
850 case FPSCR_VXSQRT:
851 case FPSCR_VXCVI:
852 env->fpscr |= 1 << FPSCR_VX;
853 env->fpscr |= 1 << FPSCR_FX;
854 if (fpscr_ve != 0)
855 goto raise_ve;
856 break;
857 case FPSCR_VE:
858 if (fpscr_vx != 0) {
859 raise_ve:
860 env->error_code = POWERPC_EXCP_FP;
861 if (fpscr_vxsnan)
862 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
863 if (fpscr_vxisi)
864 env->error_code |= POWERPC_EXCP_FP_VXISI;
865 if (fpscr_vxidi)
866 env->error_code |= POWERPC_EXCP_FP_VXIDI;
867 if (fpscr_vxzdz)
868 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
869 if (fpscr_vximz)
870 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
871 if (fpscr_vxvc)
872 env->error_code |= POWERPC_EXCP_FP_VXVC;
873 if (fpscr_vxsoft)
874 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
875 if (fpscr_vxsqrt)
876 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
877 if (fpscr_vxcvi)
878 env->error_code |= POWERPC_EXCP_FP_VXCVI;
879 goto raise_excp;
881 break;
882 case FPSCR_OE:
883 if (fpscr_ox != 0) {
884 raise_oe:
885 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
886 goto raise_excp;
888 break;
889 case FPSCR_UE:
890 if (fpscr_ux != 0) {
891 raise_ue:
892 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
893 goto raise_excp;
895 break;
896 case FPSCR_ZE:
897 if (fpscr_zx != 0) {
898 raise_ze:
899 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
900 goto raise_excp;
902 break;
903 case FPSCR_XE:
904 if (fpscr_xx != 0) {
905 raise_xe:
906 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
907 goto raise_excp;
909 break;
910 case FPSCR_RN1:
911 case FPSCR_RN:
912 fpscr_set_rounding_mode();
913 break;
914 default:
915 break;
916 raise_excp:
917 /* Update the floating-point enabled exception summary */
918 env->fpscr |= 1 << FPSCR_FEX;
919 /* We have to update Rc1 before raising the exception */
920 env->exception_index = POWERPC_EXCP_PROGRAM;
921 break;
926 void helper_store_fpscr (uint64_t arg, uint32_t mask)
929 * We use only the 32 LSB of the incoming fpr
931 uint32_t prev, new;
932 int i;
934 prev = env->fpscr;
935 new = (uint32_t)arg;
936 new &= ~0x60000000;
937 new |= prev & 0x60000000;
938 for (i = 0; i < 8; i++) {
939 if (mask & (1 << i)) {
940 env->fpscr &= ~(0xF << (4 * i));
941 env->fpscr |= new & (0xF << (4 * i));
944 /* Update VX and FEX */
945 if (fpscr_ix != 0)
946 env->fpscr |= 1 << FPSCR_VX;
947 else
948 env->fpscr &= ~(1 << FPSCR_VX);
949 if ((fpscr_ex & fpscr_eex) != 0) {
950 env->fpscr |= 1 << FPSCR_FEX;
951 env->exception_index = POWERPC_EXCP_PROGRAM;
952 /* XXX: we should compute it properly */
953 env->error_code = POWERPC_EXCP_FP;
955 else
956 env->fpscr &= ~(1 << FPSCR_FEX);
957 fpscr_set_rounding_mode();
960 void helper_float_check_status (void)
962 #ifdef CONFIG_SOFTFLOAT
963 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
964 (env->error_code & POWERPC_EXCP_FP)) {
965 /* Differred floating-point exception after target FPR update */
966 if (msr_fe0 != 0 || msr_fe1 != 0)
967 helper_raise_exception_err(env->exception_index, env->error_code);
968 } else {
969 int status = get_float_exception_flags(&env->fp_status);
970 if (status & float_flag_divbyzero) {
971 float_zero_divide_excp();
972 } else if (status & float_flag_overflow) {
973 float_overflow_excp();
974 } else if (status & float_flag_underflow) {
975 float_underflow_excp();
976 } else if (status & float_flag_inexact) {
977 float_inexact_excp();
980 #else
981 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
982 (env->error_code & POWERPC_EXCP_FP)) {
983 /* Differred floating-point exception after target FPR update */
984 if (msr_fe0 != 0 || msr_fe1 != 0)
985 helper_raise_exception_err(env->exception_index, env->error_code);
987 #endif
990 #ifdef CONFIG_SOFTFLOAT
991 void helper_reset_fpstatus (void)
993 set_float_exception_flags(0, &env->fp_status);
995 #endif
997 /* fadd - fadd. */
998 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1000 CPU_DoubleU farg1, farg2;
1002 farg1.ll = arg1;
1003 farg2.ll = arg2;
1004 #if USE_PRECISE_EMULATION
1005 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1006 float64_is_signaling_nan(farg2.d))) {
1007 /* sNaN addition */
1008 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1009 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1010 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1011 /* Magnitude subtraction of infinities */
1012 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1013 } else {
1014 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1016 #else
1017 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1018 #endif
1019 return farg1.ll;
1022 /* fsub - fsub. */
1023 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1025 CPU_DoubleU farg1, farg2;
1027 farg1.ll = arg1;
1028 farg2.ll = arg2;
1029 #if USE_PRECISE_EMULATION
1031 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1032 float64_is_signaling_nan(farg2.d))) {
1033 /* sNaN subtraction */
1034 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1035 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1036 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1037 /* Magnitude subtraction of infinities */
1038 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1039 } else {
1040 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1043 #else
1044 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1045 #endif
1046 return farg1.ll;
1049 /* fmul - fmul. */
1050 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1052 CPU_DoubleU farg1, farg2;
1054 farg1.ll = arg1;
1055 farg2.ll = arg2;
1056 #if USE_PRECISE_EMULATION
1057 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1058 float64_is_signaling_nan(farg2.d))) {
1059 /* sNaN multiplication */
1060 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1061 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1062 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1063 /* Multiplication of zero by infinity */
1064 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1065 } else {
1066 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1068 #else
1069 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1070 #endif
1071 return farg1.ll;
1074 /* fdiv - fdiv. */
1075 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1077 CPU_DoubleU farg1, farg2;
1079 farg1.ll = arg1;
1080 farg2.ll = arg2;
1081 #if USE_PRECISE_EMULATION
1082 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1083 float64_is_signaling_nan(farg2.d))) {
1084 /* sNaN division */
1085 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1086 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1087 /* Division of infinity by infinity */
1088 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1089 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1090 /* Division of zero by zero */
1091 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1092 } else {
1093 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1095 #else
1096 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1097 #endif
1098 return farg1.ll;
1101 /* fabs */
1102 uint64_t helper_fabs (uint64_t arg)
1104 CPU_DoubleU farg;
1106 farg.ll = arg;
1107 farg.d = float64_abs(farg.d);
1108 return farg.ll;
1111 /* fnabs */
1112 uint64_t helper_fnabs (uint64_t arg)
1114 CPU_DoubleU farg;
1116 farg.ll = arg;
1117 farg.d = float64_abs(farg.d);
1118 farg.d = float64_chs(farg.d);
1119 return farg.ll;
1122 /* fneg */
1123 uint64_t helper_fneg (uint64_t arg)
1125 CPU_DoubleU farg;
1127 farg.ll = arg;
1128 farg.d = float64_chs(farg.d);
1129 return farg.ll;
1132 /* fctiw - fctiw. */
1133 uint64_t helper_fctiw (uint64_t arg)
1135 CPU_DoubleU farg;
1136 farg.ll = arg;
1138 if (unlikely(float64_is_signaling_nan(farg.d))) {
1139 /* sNaN conversion */
1140 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1141 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1142 /* qNan / infinity conversion */
1143 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1144 } else {
1145 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1146 #if USE_PRECISE_EMULATION
1147 /* XXX: higher bits are not supposed to be significant.
1148 * to make tests easier, return the same as a real PowerPC 750
1150 farg.ll |= 0xFFF80000ULL << 32;
1151 #endif
1153 return farg.ll;
1156 /* fctiwz - fctiwz. */
1157 uint64_t helper_fctiwz (uint64_t arg)
1159 CPU_DoubleU farg;
1160 farg.ll = arg;
1162 if (unlikely(float64_is_signaling_nan(farg.d))) {
1163 /* sNaN conversion */
1164 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1165 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1166 /* qNan / infinity conversion */
1167 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1168 } else {
1169 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1170 #if USE_PRECISE_EMULATION
1171 /* XXX: higher bits are not supposed to be significant.
1172 * to make tests easier, return the same as a real PowerPC 750
1174 farg.ll |= 0xFFF80000ULL << 32;
1175 #endif
1177 return farg.ll;
1180 #if defined(TARGET_PPC64)
1181 /* fcfid - fcfid. */
1182 uint64_t helper_fcfid (uint64_t arg)
1184 CPU_DoubleU farg;
1185 farg.d = int64_to_float64(arg, &env->fp_status);
1186 return farg.ll;
1189 /* fctid - fctid. */
1190 uint64_t helper_fctid (uint64_t arg)
1192 CPU_DoubleU farg;
1193 farg.ll = arg;
1195 if (unlikely(float64_is_signaling_nan(farg.d))) {
1196 /* sNaN conversion */
1197 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1198 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1199 /* qNan / infinity conversion */
1200 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1201 } else {
1202 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1204 return farg.ll;
1207 /* fctidz - fctidz. */
1208 uint64_t helper_fctidz (uint64_t arg)
1210 CPU_DoubleU farg;
1211 farg.ll = arg;
1213 if (unlikely(float64_is_signaling_nan(farg.d))) {
1214 /* sNaN conversion */
1215 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1216 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1217 /* qNan / infinity conversion */
1218 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1219 } else {
1220 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1222 return farg.ll;
1225 #endif
1227 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1229 CPU_DoubleU farg;
1230 farg.ll = arg;
1232 if (unlikely(float64_is_signaling_nan(farg.d))) {
1233 /* sNaN round */
1234 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1235 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1236 /* qNan / infinity round */
1237 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1238 } else {
1239 set_float_rounding_mode(rounding_mode, &env->fp_status);
1240 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1241 /* Restore rounding mode from FPSCR */
1242 fpscr_set_rounding_mode();
1244 return farg.ll;
1247 uint64_t helper_frin (uint64_t arg)
1249 return do_fri(arg, float_round_nearest_even);
1252 uint64_t helper_friz (uint64_t arg)
1254 return do_fri(arg, float_round_to_zero);
1257 uint64_t helper_frip (uint64_t arg)
1259 return do_fri(arg, float_round_up);
1262 uint64_t helper_frim (uint64_t arg)
1264 return do_fri(arg, float_round_down);
1267 /* fmadd - fmadd. */
1268 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1270 CPU_DoubleU farg1, farg2, farg3;
1272 farg1.ll = arg1;
1273 farg2.ll = arg2;
1274 farg3.ll = arg3;
1275 #if USE_PRECISE_EMULATION
1276 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1277 float64_is_signaling_nan(farg2.d) ||
1278 float64_is_signaling_nan(farg3.d))) {
1279 /* sNaN operation */
1280 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1281 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1282 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1283 /* Multiplication of zero by infinity */
1284 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1285 } else {
1286 #ifdef FLOAT128
1287 /* This is the way the PowerPC specification defines it */
1288 float128 ft0_128, ft1_128;
1290 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1291 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1292 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1293 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1294 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1295 /* Magnitude subtraction of infinities */
1296 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1297 } else {
1298 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1299 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1300 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1302 #else
1303 /* This is OK on x86 hosts */
1304 farg1.d = (farg1.d * farg2.d) + farg3.d;
1305 #endif
1307 #else
1308 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1309 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1310 #endif
1311 return farg1.ll;
1314 /* fmsub - fmsub. */
1315 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1317 CPU_DoubleU farg1, farg2, farg3;
1319 farg1.ll = arg1;
1320 farg2.ll = arg2;
1321 farg3.ll = arg3;
1322 #if USE_PRECISE_EMULATION
1323 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1324 float64_is_signaling_nan(farg2.d) ||
1325 float64_is_signaling_nan(farg3.d))) {
1326 /* sNaN operation */
1327 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1328 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1329 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1330 /* Multiplication of zero by infinity */
1331 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1332 } else {
1333 #ifdef FLOAT128
1334 /* This is the way the PowerPC specification defines it */
1335 float128 ft0_128, ft1_128;
1337 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1338 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1339 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1340 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1341 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1342 /* Magnitude subtraction of infinities */
1343 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1344 } else {
1345 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1346 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1347 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1349 #else
1350 /* This is OK on x86 hosts */
1351 farg1.d = (farg1.d * farg2.d) - farg3.d;
1352 #endif
1354 #else
1355 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1356 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1357 #endif
1358 return farg1.ll;
1361 /* fnmadd - fnmadd. */
1362 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1364 CPU_DoubleU farg1, farg2, farg3;
1366 farg1.ll = arg1;
1367 farg2.ll = arg2;
1368 farg3.ll = arg3;
1370 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1371 float64_is_signaling_nan(farg2.d) ||
1372 float64_is_signaling_nan(farg3.d))) {
1373 /* sNaN operation */
1374 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1375 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1376 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1377 /* Multiplication of zero by infinity */
1378 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1379 } else {
1380 #if USE_PRECISE_EMULATION
1381 #ifdef FLOAT128
1382 /* This is the way the PowerPC specification defines it */
1383 float128 ft0_128, ft1_128;
1385 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1386 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1387 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1388 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1389 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1390 /* Magnitude subtraction of infinities */
1391 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1392 } else {
1393 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1394 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1395 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1397 #else
1398 /* This is OK on x86 hosts */
1399 farg1.d = (farg1.d * farg2.d) + farg3.d;
1400 #endif
1401 #else
1402 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1403 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1404 #endif
1405 if (likely(!float64_is_nan(farg1.d)))
1406 farg1.d = float64_chs(farg1.d);
1408 return farg1.ll;
1411 /* fnmsub - fnmsub. */
1412 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1414 CPU_DoubleU farg1, farg2, farg3;
1416 farg1.ll = arg1;
1417 farg2.ll = arg2;
1418 farg3.ll = arg3;
1420 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1421 float64_is_signaling_nan(farg2.d) ||
1422 float64_is_signaling_nan(farg3.d))) {
1423 /* sNaN operation */
1424 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1425 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1426 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1427 /* Multiplication of zero by infinity */
1428 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1429 } else {
1430 #if USE_PRECISE_EMULATION
1431 #ifdef FLOAT128
1432 /* This is the way the PowerPC specification defines it */
1433 float128 ft0_128, ft1_128;
1435 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1436 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1437 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1438 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1439 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1440 /* Magnitude subtraction of infinities */
1441 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1442 } else {
1443 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1444 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1445 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1447 #else
1448 /* This is OK on x86 hosts */
1449 farg1.d = (farg1.d * farg2.d) - farg3.d;
1450 #endif
1451 #else
1452 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1453 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1454 #endif
1455 if (likely(!float64_is_nan(farg1.d)))
1456 farg1.d = float64_chs(farg1.d);
1458 return farg1.ll;
1461 /* frsp - frsp. */
1462 uint64_t helper_frsp (uint64_t arg)
1464 CPU_DoubleU farg;
1465 float32 f32;
1466 farg.ll = arg;
1468 #if USE_PRECISE_EMULATION
1469 if (unlikely(float64_is_signaling_nan(farg.d))) {
1470 /* sNaN square root */
1471 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1472 } else {
1473 f32 = float64_to_float32(farg.d, &env->fp_status);
1474 farg.d = float32_to_float64(f32, &env->fp_status);
1476 #else
1477 f32 = float64_to_float32(farg.d, &env->fp_status);
1478 farg.d = float32_to_float64(f32, &env->fp_status);
1479 #endif
1480 return farg.ll;
1483 /* fsqrt - fsqrt. */
1484 uint64_t helper_fsqrt (uint64_t arg)
1486 CPU_DoubleU farg;
1487 farg.ll = arg;
1489 if (unlikely(float64_is_signaling_nan(farg.d))) {
1490 /* sNaN square root */
1491 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1492 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1493 /* Square root of a negative nonzero number */
1494 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1495 } else {
1496 farg.d = float64_sqrt(farg.d, &env->fp_status);
1498 return farg.ll;
1501 /* fre - fre. */
1502 uint64_t helper_fre (uint64_t arg)
1504 CPU_DoubleU farg;
1505 farg.ll = arg;
1507 if (unlikely(float64_is_signaling_nan(farg.d))) {
1508 /* sNaN reciprocal */
1509 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1510 } else {
1511 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1513 return farg.d;
1516 /* fres - fres. */
1517 uint64_t helper_fres (uint64_t arg)
1519 CPU_DoubleU farg;
1520 float32 f32;
1521 farg.ll = arg;
1523 if (unlikely(float64_is_signaling_nan(farg.d))) {
1524 /* sNaN reciprocal */
1525 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1526 } else {
1527 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1528 f32 = float64_to_float32(farg.d, &env->fp_status);
1529 farg.d = float32_to_float64(f32, &env->fp_status);
1531 return farg.ll;
1534 /* frsqrte - frsqrte. */
1535 uint64_t helper_frsqrte (uint64_t arg)
1537 CPU_DoubleU farg;
1538 float32 f32;
1539 farg.ll = arg;
1541 if (unlikely(float64_is_signaling_nan(farg.d))) {
1542 /* sNaN reciprocal square root */
1543 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1544 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1545 /* Reciprocal square root of a negative nonzero number */
1546 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1547 } else {
1548 farg.d = float64_sqrt(farg.d, &env->fp_status);
1549 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1550 f32 = float64_to_float32(farg.d, &env->fp_status);
1551 farg.d = float32_to_float64(f32, &env->fp_status);
1553 return farg.ll;
1556 /* fsel - fsel. */
1557 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1559 CPU_DoubleU farg1;
1561 farg1.ll = arg1;
1563 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1564 return arg2;
1565 else
1566 return arg3;
1569 void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1571 CPU_DoubleU farg1, farg2;
1572 uint32_t ret = 0;
1573 farg1.ll = arg1;
1574 farg2.ll = arg2;
1576 if (unlikely(float64_is_nan(farg1.d) ||
1577 float64_is_nan(farg2.d))) {
1578 ret = 0x01UL;
1579 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1580 ret = 0x08UL;
1581 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1582 ret = 0x04UL;
1583 } else {
1584 ret = 0x02UL;
1587 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1588 env->fpscr |= ret << FPSCR_FPRF;
1589 env->crf[crfD] = ret;
1590 if (unlikely(ret == 0x01UL
1591 && (float64_is_signaling_nan(farg1.d) ||
1592 float64_is_signaling_nan(farg2.d)))) {
1593 /* sNaN comparison */
1594 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1598 void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1600 CPU_DoubleU farg1, farg2;
1601 uint32_t ret = 0;
1602 farg1.ll = arg1;
1603 farg2.ll = arg2;
1605 if (unlikely(float64_is_nan(farg1.d) ||
1606 float64_is_nan(farg2.d))) {
1607 ret = 0x01UL;
1608 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1609 ret = 0x08UL;
1610 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1611 ret = 0x04UL;
1612 } else {
1613 ret = 0x02UL;
1616 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1617 env->fpscr |= ret << FPSCR_FPRF;
1618 env->crf[crfD] = ret;
1619 if (unlikely (ret == 0x01UL)) {
1620 if (float64_is_signaling_nan(farg1.d) ||
1621 float64_is_signaling_nan(farg2.d)) {
1622 /* sNaN comparison */
1623 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1624 POWERPC_EXCP_FP_VXVC);
1625 } else {
1626 /* qNaN comparison */
1627 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1632 #if !defined (CONFIG_USER_ONLY)
1633 void helper_store_msr (target_ulong val)
1635 val = hreg_store_msr(env, val, 0);
1636 if (val != 0) {
1637 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1638 helper_raise_exception(val);
1642 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1643 target_ulong msrm, int keep_msrh)
1645 #if defined(TARGET_PPC64)
1646 if (msr & (1ULL << MSR_SF)) {
1647 nip = (uint64_t)nip;
1648 msr &= (uint64_t)msrm;
1649 } else {
1650 nip = (uint32_t)nip;
1651 msr = (uint32_t)(msr & msrm);
1652 if (keep_msrh)
1653 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1655 #else
1656 nip = (uint32_t)nip;
1657 msr &= (uint32_t)msrm;
1658 #endif
1659 /* XXX: beware: this is false if VLE is supported */
1660 env->nip = nip & ~((target_ulong)0x00000003);
1661 hreg_store_msr(env, msr, 1);
1662 #if defined (DEBUG_OP)
1663 cpu_dump_rfi(env->nip, env->msr);
1664 #endif
1665 /* No need to raise an exception here,
1666 * as rfi is always the last insn of a TB
1668 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1671 void helper_rfi (void)
1673 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1674 ~((target_ulong)0x0), 1);
1677 #if defined(TARGET_PPC64)
1678 void helper_rfid (void)
1680 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1681 ~((target_ulong)0x0), 0);
1684 void helper_hrfid (void)
1686 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1687 ~((target_ulong)0x0), 0);
1689 #endif
1690 #endif
1692 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1694 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1695 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1696 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1697 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1698 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1699 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1703 #if defined(TARGET_PPC64)
1704 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1706 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1707 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1708 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1709 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1710 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1711 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1713 #endif
1715 /*****************************************************************************/
1716 /* PowerPC 601 specific instructions (POWER bridge) */
1718 target_ulong helper_clcs (uint32_t arg)
1720 switch (arg) {
1721 case 0x0CUL:
1722 /* Instruction cache line size */
1723 return env->icache_line_size;
1724 break;
1725 case 0x0DUL:
1726 /* Data cache line size */
1727 return env->dcache_line_size;
1728 break;
1729 case 0x0EUL:
1730 /* Minimum cache line size */
1731 return (env->icache_line_size < env->dcache_line_size) ?
1732 env->icache_line_size : env->dcache_line_size;
1733 break;
1734 case 0x0FUL:
1735 /* Maximum cache line size */
1736 return (env->icache_line_size > env->dcache_line_size) ?
1737 env->icache_line_size : env->dcache_line_size;
1738 break;
1739 default:
1740 /* Undefined */
1741 return 0;
1742 break;
1746 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1748 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1750 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1751 (int32_t)arg2 == 0) {
1752 env->spr[SPR_MQ] = 0;
1753 return INT32_MIN;
1754 } else {
1755 env->spr[SPR_MQ] = tmp % arg2;
1756 return tmp / (int32_t)arg2;
1760 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1762 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1764 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1765 (int32_t)arg2 == 0) {
1766 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1767 env->spr[SPR_MQ] = 0;
1768 return INT32_MIN;
1769 } else {
1770 env->spr[SPR_MQ] = tmp % arg2;
1771 tmp /= (int32_t)arg2;
1772 if ((int32_t)tmp != tmp) {
1773 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1774 } else {
1775 env->xer &= ~(1 << XER_OV);
1777 return tmp;
1781 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1783 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1784 (int32_t)arg2 == 0) {
1785 env->spr[SPR_MQ] = 0;
1786 return INT32_MIN;
1787 } else {
1788 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1789 return (int32_t)arg1 / (int32_t)arg2;
1793 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1795 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1796 (int32_t)arg2 == 0) {
1797 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1798 env->spr[SPR_MQ] = 0;
1799 return INT32_MIN;
1800 } else {
1801 env->xer &= ~(1 << XER_OV);
1802 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1803 return (int32_t)arg1 / (int32_t)arg2;
1807 #if !defined (CONFIG_USER_ONLY)
1808 target_ulong helper_rac (target_ulong addr)
1810 mmu_ctx_t ctx;
1811 int nb_BATs;
1812 target_ulong ret = 0;
1814 /* We don't have to generate many instances of this instruction,
1815 * as rac is supervisor only.
1817 /* XXX: FIX THIS: Pretend we have no BAT */
1818 nb_BATs = env->nb_BATs;
1819 env->nb_BATs = 0;
1820 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1821 ret = ctx.raddr;
1822 env->nb_BATs = nb_BATs;
1823 return ret;
1826 void helper_rfsvc (void)
1828 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1830 #endif
1832 /*****************************************************************************/
1833 /* 602 specific instructions */
1834 /* mfrom is the most crazy instruction ever seen, imho ! */
1835 /* Real implementation uses a ROM table. Do the same */
1836 /* Extremly decomposed:
1837 * -arg / 256
1838 * return 256 * log10(10 + 1.0) + 0.5
1840 #if !defined (CONFIG_USER_ONLY)
1841 target_ulong helper_602_mfrom (target_ulong arg)
1843 if (likely(arg < 602)) {
1844 #include "mfrom_table.c"
1845 return mfrom_ROM_table[arg];
1846 } else {
1847 return 0;
1850 #endif
1852 /*****************************************************************************/
1853 /* Embedded PowerPC specific helpers */
1855 /* XXX: to be improved to check access rights when in user-mode */
1856 target_ulong helper_load_dcr (target_ulong dcrn)
1858 target_ulong val = 0;
1860 if (unlikely(env->dcr_env == NULL)) {
1861 qemu_log("No DCR environment\n");
1862 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1863 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1864 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1865 qemu_log("DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1866 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1867 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1869 return val;
1872 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1874 if (unlikely(env->dcr_env == NULL)) {
1875 qemu_log("No DCR environment\n");
1876 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1877 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1878 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1879 qemu_log("DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1880 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1881 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1885 #if !defined(CONFIG_USER_ONLY)
1886 void helper_40x_rfci (void)
1888 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1889 ~((target_ulong)0xFFFF0000), 0);
1892 void helper_rfci (void)
1894 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1895 ~((target_ulong)0x3FFF0000), 0);
1898 void helper_rfdi (void)
1900 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1901 ~((target_ulong)0x3FFF0000), 0);
1904 void helper_rfmci (void)
1906 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1907 ~((target_ulong)0x3FFF0000), 0);
1909 #endif
1911 /* 440 specific */
1912 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1914 target_ulong mask;
1915 int i;
1917 i = 1;
1918 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1919 if ((high & mask) == 0) {
1920 if (update_Rc) {
1921 env->crf[0] = 0x4;
1923 goto done;
1925 i++;
1927 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1928 if ((low & mask) == 0) {
1929 if (update_Rc) {
1930 env->crf[0] = 0x8;
1932 goto done;
1934 i++;
1936 if (update_Rc) {
1937 env->crf[0] = 0x2;
1939 done:
1940 env->xer = (env->xer & ~0x7F) | i;
1941 if (update_Rc) {
1942 env->crf[0] |= xer_so;
1944 return i;
1947 /*****************************************************************************/
1948 /* Altivec extension helpers */
1949 #if defined(WORDS_BIGENDIAN)
1950 #define HI_IDX 0
1951 #define LO_IDX 1
1952 #else
1953 #define HI_IDX 1
1954 #define LO_IDX 0
1955 #endif
1957 #if defined(WORDS_BIGENDIAN)
1958 #define VECTOR_FOR_INORDER_I(index, element) \
1959 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1960 #else
1961 #define VECTOR_FOR_INORDER_I(index, element) \
1962 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1963 #endif
1965 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1966 * execute the following block. */
1967 #define DO_HANDLE_NAN(result, x) \
1968 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1969 CPU_FloatU __f; \
1970 __f.f = x; \
1971 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1972 result = __f.f; \
1973 } else
1975 #define HANDLE_NAN1(result, x) \
1976 DO_HANDLE_NAN(result, x)
1977 #define HANDLE_NAN2(result, x, y) \
1978 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1979 #define HANDLE_NAN3(result, x, y, z) \
1980 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1982 /* Saturating arithmetic helpers. */
1983 #define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1984 static always_inline to_type cvt##from##to (from_type x, int *sat) \
1986 to_type r; \
1987 if (use_min && x < min) { \
1988 r = min; \
1989 *sat = 1; \
1990 } else if (use_max && x > max) { \
1991 r = max; \
1992 *sat = 1; \
1993 } else { \
1994 r = x; \
1996 return r; \
1998 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1999 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
2000 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
2001 SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
2002 SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
2003 SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
2004 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
2005 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
2006 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
2007 #undef SATCVT
2009 #define LVE(name, access, swap, element) \
2010 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2012 size_t n_elems = ARRAY_SIZE(r->element); \
2013 int adjust = HI_IDX*(n_elems-1); \
2014 int sh = sizeof(r->element[0]) >> 1; \
2015 int index = (addr & 0xf) >> sh; \
2016 if(msr_le) { \
2017 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2018 } else { \
2019 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2022 #define I(x) (x)
2023 LVE(lvebx, ldub, I, u8)
2024 LVE(lvehx, lduw, bswap16, u16)
2025 LVE(lvewx, ldl, bswap32, u32)
2026 #undef I
2027 #undef LVE
2029 void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2031 int i, j = (sh & 0xf);
2033 VECTOR_FOR_INORDER_I (i, u8) {
2034 r->u8[i] = j++;
2038 void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2040 int i, j = 0x10 - (sh & 0xf);
2042 VECTOR_FOR_INORDER_I (i, u8) {
2043 r->u8[i] = j++;
2047 #define STVE(name, access, swap, element) \
2048 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2050 size_t n_elems = ARRAY_SIZE(r->element); \
2051 int adjust = HI_IDX*(n_elems-1); \
2052 int sh = sizeof(r->element[0]) >> 1; \
2053 int index = (addr & 0xf) >> sh; \
2054 if(msr_le) { \
2055 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2056 } else { \
2057 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2060 #define I(x) (x)
2061 STVE(stvebx, stb, I, u8)
2062 STVE(stvehx, stw, bswap16, u16)
2063 STVE(stvewx, stl, bswap32, u32)
2064 #undef I
2065 #undef LVE
2067 void helper_mtvscr (ppc_avr_t *r)
2069 #if defined(WORDS_BIGENDIAN)
2070 env->vscr = r->u32[3];
2071 #else
2072 env->vscr = r->u32[0];
2073 #endif
2074 set_flush_to_zero(vscr_nj, &env->vec_status);
2077 void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2079 int i;
2080 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2081 r->u32[i] = ~a->u32[i] < b->u32[i];
2085 #define VARITH_DO(name, op, element) \
2086 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2088 int i; \
2089 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2090 r->element[i] = a->element[i] op b->element[i]; \
2093 #define VARITH(suffix, element) \
2094 VARITH_DO(add##suffix, +, element) \
2095 VARITH_DO(sub##suffix, -, element)
2096 VARITH(ubm, u8)
2097 VARITH(uhm, u16)
2098 VARITH(uwm, u32)
2099 #undef VARITH_DO
2100 #undef VARITH
2102 #define VARITHFP(suffix, func) \
2103 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2105 int i; \
2106 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2107 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2108 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2112 VARITHFP(addfp, float32_add)
2113 VARITHFP(subfp, float32_sub)
2114 #undef VARITHFP
2116 #define VARITHSAT_CASE(type, op, cvt, element) \
2118 type result = (type)a->element[i] op (type)b->element[i]; \
2119 r->element[i] = cvt(result, &sat); \
2122 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2123 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2125 int sat = 0; \
2126 int i; \
2127 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2128 switch (sizeof(r->element[0])) { \
2129 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2130 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2131 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2134 if (sat) { \
2135 env->vscr |= (1 << VSCR_SAT); \
2138 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2139 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2140 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2141 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2142 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2143 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2144 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2145 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2146 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2147 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2148 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2149 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2150 #undef VARITHSAT_CASE
2151 #undef VARITHSAT_DO
2152 #undef VARITHSAT_SIGNED
2153 #undef VARITHSAT_UNSIGNED
2155 #define VAVG_DO(name, element, etype) \
2156 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2158 int i; \
2159 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2160 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2161 r->element[i] = x >> 1; \
2165 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2166 VAVG_DO(avgs##type, signed_element, signed_type) \
2167 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2168 VAVG(b, s8, int16_t, u8, uint16_t)
2169 VAVG(h, s16, int32_t, u16, uint32_t)
2170 VAVG(w, s32, int64_t, u32, uint64_t)
2171 #undef VAVG_DO
2172 #undef VAVG
2174 #define VCF(suffix, cvt, element) \
2175 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2177 int i; \
2178 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2179 float32 t = cvt(b->element[i], &env->vec_status); \
2180 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2183 VCF(ux, uint32_to_float32, u32)
2184 VCF(sx, int32_to_float32, s32)
2185 #undef VCF
2187 #define VCMP_DO(suffix, compare, element, record) \
2188 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2190 uint32_t ones = (uint32_t)-1; \
2191 uint32_t all = ones; \
2192 uint32_t none = 0; \
2193 int i; \
2194 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2195 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2196 switch (sizeof (a->element[0])) { \
2197 case 4: r->u32[i] = result; break; \
2198 case 2: r->u16[i] = result; break; \
2199 case 1: r->u8[i] = result; break; \
2201 all &= result; \
2202 none |= result; \
2204 if (record) { \
2205 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2208 #define VCMP(suffix, compare, element) \
2209 VCMP_DO(suffix, compare, element, 0) \
2210 VCMP_DO(suffix##_dot, compare, element, 1)
2211 VCMP(equb, ==, u8)
2212 VCMP(equh, ==, u16)
2213 VCMP(equw, ==, u32)
2214 VCMP(gtub, >, u8)
2215 VCMP(gtuh, >, u16)
2216 VCMP(gtuw, >, u32)
2217 VCMP(gtsb, >, s8)
2218 VCMP(gtsh, >, s16)
2219 VCMP(gtsw, >, s32)
2220 #undef VCMP_DO
2221 #undef VCMP
2223 #define VCMPFP_DO(suffix, compare, order, record) \
2224 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2226 uint32_t ones = (uint32_t)-1; \
2227 uint32_t all = ones; \
2228 uint32_t none = 0; \
2229 int i; \
2230 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2231 uint32_t result; \
2232 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2233 if (rel == float_relation_unordered) { \
2234 result = 0; \
2235 } else if (rel compare order) { \
2236 result = ones; \
2237 } else { \
2238 result = 0; \
2240 r->u32[i] = result; \
2241 all &= result; \
2242 none |= result; \
2244 if (record) { \
2245 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2248 #define VCMPFP(suffix, compare, order) \
2249 VCMPFP_DO(suffix, compare, order, 0) \
2250 VCMPFP_DO(suffix##_dot, compare, order, 1)
2251 VCMPFP(eqfp, ==, float_relation_equal)
2252 VCMPFP(gefp, !=, float_relation_less)
2253 VCMPFP(gtfp, ==, float_relation_greater)
2254 #undef VCMPFP_DO
2255 #undef VCMPFP
2257 static always_inline void vcmpbfp_internal (ppc_avr_t *r, ppc_avr_t *a,
2258 ppc_avr_t *b, int record)
2260 int i;
2261 int all_in = 0;
2262 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2263 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2264 if (le_rel == float_relation_unordered) {
2265 r->u32[i] = 0xc0000000;
2266 /* ALL_IN does not need to be updated here. */
2267 } else {
2268 float32 bneg = float32_chs(b->f[i]);
2269 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2270 int le = le_rel != float_relation_greater;
2271 int ge = ge_rel != float_relation_less;
2272 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2273 all_in |= (!le | !ge);
2276 if (record) {
2277 env->crf[6] = (all_in == 0) << 1;
2281 void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2283 vcmpbfp_internal(r, a, b, 0);
2286 void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2288 vcmpbfp_internal(r, a, b, 1);
2291 #define VCT(suffix, satcvt, element) \
2292 void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2294 int i; \
2295 int sat = 0; \
2296 float_status s = env->vec_status; \
2297 set_float_rounding_mode(float_round_to_zero, &s); \
2298 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2299 if (float32_is_nan(b->f[i]) || \
2300 float32_is_signaling_nan(b->f[i])) { \
2301 r->element[i] = 0; \
2302 } else { \
2303 float64 t = float32_to_float64(b->f[i], &s); \
2304 int64_t j; \
2305 t = float64_scalbn(t, uim, &s); \
2306 j = float64_to_int64(t, &s); \
2307 r->element[i] = satcvt(j, &sat); \
2310 if (sat) { \
2311 env->vscr |= (1 << VSCR_SAT); \
2314 VCT(uxs, cvtsduw, u32)
2315 VCT(sxs, cvtsdsw, s32)
2316 #undef VCT
2318 void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2320 int i;
2321 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2322 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2323 /* Need to do the computation in higher precision and round
2324 * once at the end. */
2325 float64 af, bf, cf, t;
2326 af = float32_to_float64(a->f[i], &env->vec_status);
2327 bf = float32_to_float64(b->f[i], &env->vec_status);
2328 cf = float32_to_float64(c->f[i], &env->vec_status);
2329 t = float64_mul(af, cf, &env->vec_status);
2330 t = float64_add(t, bf, &env->vec_status);
2331 r->f[i] = float64_to_float32(t, &env->vec_status);
2336 void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2338 int sat = 0;
2339 int i;
2341 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2342 int32_t prod = a->s16[i] * b->s16[i];
2343 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2344 r->s16[i] = cvtswsh (t, &sat);
2347 if (sat) {
2348 env->vscr |= (1 << VSCR_SAT);
2352 void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2354 int sat = 0;
2355 int i;
2357 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2358 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2359 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2360 r->s16[i] = cvtswsh (t, &sat);
2363 if (sat) {
2364 env->vscr |= (1 << VSCR_SAT);
2368 #define VMINMAX_DO(name, compare, element) \
2369 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2371 int i; \
2372 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2373 if (a->element[i] compare b->element[i]) { \
2374 r->element[i] = b->element[i]; \
2375 } else { \
2376 r->element[i] = a->element[i]; \
2380 #define VMINMAX(suffix, element) \
2381 VMINMAX_DO(min##suffix, >, element) \
2382 VMINMAX_DO(max##suffix, <, element)
2383 VMINMAX(sb, s8)
2384 VMINMAX(sh, s16)
2385 VMINMAX(sw, s32)
2386 VMINMAX(ub, u8)
2387 VMINMAX(uh, u16)
2388 VMINMAX(uw, u32)
2389 #undef VMINMAX_DO
2390 #undef VMINMAX
2392 #define VMINMAXFP(suffix, rT, rF) \
2393 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2395 int i; \
2396 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2397 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2398 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2399 r->f[i] = rT->f[i]; \
2400 } else { \
2401 r->f[i] = rF->f[i]; \
2406 VMINMAXFP(minfp, a, b)
2407 VMINMAXFP(maxfp, b, a)
2408 #undef VMINMAXFP
2410 void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2412 int i;
2413 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2414 int32_t prod = a->s16[i] * b->s16[i];
2415 r->s16[i] = (int16_t) (prod + c->s16[i]);
2419 #define VMRG_DO(name, element, highp) \
2420 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2422 ppc_avr_t result; \
2423 int i; \
2424 size_t n_elems = ARRAY_SIZE(r->element); \
2425 for (i = 0; i < n_elems/2; i++) { \
2426 if (highp) { \
2427 result.element[i*2+HI_IDX] = a->element[i]; \
2428 result.element[i*2+LO_IDX] = b->element[i]; \
2429 } else { \
2430 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2431 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2434 *r = result; \
2436 #if defined(WORDS_BIGENDIAN)
2437 #define MRGHI 0
2438 #define MRGLO 1
2439 #else
2440 #define MRGHI 1
2441 #define MRGLO 0
2442 #endif
2443 #define VMRG(suffix, element) \
2444 VMRG_DO(mrgl##suffix, element, MRGHI) \
2445 VMRG_DO(mrgh##suffix, element, MRGLO)
2446 VMRG(b, u8)
2447 VMRG(h, u16)
2448 VMRG(w, u32)
2449 #undef VMRG_DO
2450 #undef VMRG
2451 #undef MRGHI
2452 #undef MRGLO
2454 void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2456 int32_t prod[16];
2457 int i;
2459 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2460 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2463 VECTOR_FOR_INORDER_I(i, s32) {
2464 r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2468 void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2470 int32_t prod[8];
2471 int i;
2473 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2474 prod[i] = a->s16[i] * b->s16[i];
2477 VECTOR_FOR_INORDER_I(i, s32) {
2478 r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2482 void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2484 int32_t prod[8];
2485 int i;
2486 int sat = 0;
2488 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2489 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2492 VECTOR_FOR_INORDER_I (i, s32) {
2493 int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2494 r->u32[i] = cvtsdsw(t, &sat);
2497 if (sat) {
2498 env->vscr |= (1 << VSCR_SAT);
2502 void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2504 uint16_t prod[16];
2505 int i;
2507 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2508 prod[i] = a->u8[i] * b->u8[i];
2511 VECTOR_FOR_INORDER_I(i, u32) {
2512 r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2516 void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2518 uint32_t prod[8];
2519 int i;
2521 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2522 prod[i] = a->u16[i] * b->u16[i];
2525 VECTOR_FOR_INORDER_I(i, u32) {
2526 r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2530 void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2532 uint32_t prod[8];
2533 int i;
2534 int sat = 0;
2536 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2537 prod[i] = a->u16[i] * b->u16[i];
2540 VECTOR_FOR_INORDER_I (i, s32) {
2541 uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2542 r->u32[i] = cvtuduw(t, &sat);
2545 if (sat) {
2546 env->vscr |= (1 << VSCR_SAT);
2550 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2551 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2553 int i; \
2554 VECTOR_FOR_INORDER_I(i, prod_element) { \
2555 if (evenp) { \
2556 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2557 } else { \
2558 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2562 #define VMUL(suffix, mul_element, prod_element) \
2563 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2564 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2565 VMUL(sb, s8, s16)
2566 VMUL(sh, s16, s32)
2567 VMUL(ub, u8, u16)
2568 VMUL(uh, u16, u32)
2569 #undef VMUL_DO
2570 #undef VMUL
2572 void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2574 int i;
2575 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2576 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2577 /* Need to do the computation is higher precision and round
2578 * once at the end. */
2579 float64 af, bf, cf, t;
2580 af = float32_to_float64(a->f[i], &env->vec_status);
2581 bf = float32_to_float64(b->f[i], &env->vec_status);
2582 cf = float32_to_float64(c->f[i], &env->vec_status);
2583 t = float64_mul(af, cf, &env->vec_status);
2584 t = float64_sub(t, bf, &env->vec_status);
2585 t = float64_chs(t);
2586 r->f[i] = float64_to_float32(t, &env->vec_status);
2591 void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2593 ppc_avr_t result;
2594 int i;
2595 VECTOR_FOR_INORDER_I (i, u8) {
2596 int s = c->u8[i] & 0x1f;
2597 #if defined(WORDS_BIGENDIAN)
2598 int index = s & 0xf;
2599 #else
2600 int index = 15 - (s & 0xf);
2601 #endif
2602 if (s & 0x10) {
2603 result.u8[i] = b->u8[index];
2604 } else {
2605 result.u8[i] = a->u8[index];
2608 *r = result;
2611 #if defined(WORDS_BIGENDIAN)
2612 #define PKBIG 1
2613 #else
2614 #define PKBIG 0
2615 #endif
2616 void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2618 int i, j;
2619 ppc_avr_t result;
2620 #if defined(WORDS_BIGENDIAN)
2621 const ppc_avr_t *x[2] = { a, b };
2622 #else
2623 const ppc_avr_t *x[2] = { b, a };
2624 #endif
2626 VECTOR_FOR_INORDER_I (i, u64) {
2627 VECTOR_FOR_INORDER_I (j, u32){
2628 uint32_t e = x[i]->u32[j];
2629 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2630 ((e >> 6) & 0x3e0) |
2631 ((e >> 3) & 0x1f));
2634 *r = result;
2637 #define VPK(suffix, from, to, cvt, dosat) \
2638 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2640 int i; \
2641 int sat = 0; \
2642 ppc_avr_t result; \
2643 ppc_avr_t *a0 = PKBIG ? a : b; \
2644 ppc_avr_t *a1 = PKBIG ? b : a; \
2645 VECTOR_FOR_INORDER_I (i, from) { \
2646 result.to[i] = cvt(a0->from[i], &sat); \
2647 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2649 *r = result; \
2650 if (dosat && sat) { \
2651 env->vscr |= (1 << VSCR_SAT); \
2654 #define I(x, y) (x)
2655 VPK(shss, s16, s8, cvtshsb, 1)
2656 VPK(shus, s16, u8, cvtshub, 1)
2657 VPK(swss, s32, s16, cvtswsh, 1)
2658 VPK(swus, s32, u16, cvtswuh, 1)
2659 VPK(uhus, u16, u8, cvtuhub, 1)
2660 VPK(uwus, u32, u16, cvtuwuh, 1)
2661 VPK(uhum, u16, u8, I, 0)
2662 VPK(uwum, u32, u16, I, 0)
2663 #undef I
2664 #undef VPK
2665 #undef PKBIG
2667 void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2669 int i;
2670 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2671 HANDLE_NAN1(r->f[i], b->f[i]) {
2672 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2677 #define VRFI(suffix, rounding) \
2678 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2680 int i; \
2681 float_status s = env->vec_status; \
2682 set_float_rounding_mode(rounding, &s); \
2683 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2684 HANDLE_NAN1(r->f[i], b->f[i]) { \
2685 r->f[i] = float32_round_to_int (b->f[i], &s); \
2689 VRFI(n, float_round_nearest_even)
2690 VRFI(m, float_round_down)
2691 VRFI(p, float_round_up)
2692 VRFI(z, float_round_to_zero)
2693 #undef VRFI
2695 #define VROTATE(suffix, element) \
2696 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2698 int i; \
2699 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2700 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2701 unsigned int shift = b->element[i] & mask; \
2702 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2705 VROTATE(b, u8)
2706 VROTATE(h, u16)
2707 VROTATE(w, u32)
2708 #undef VROTATE
2710 void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2712 int i;
2713 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2714 HANDLE_NAN1(r->f[i], b->f[i]) {
2715 float32 t = float32_sqrt(b->f[i], &env->vec_status);
2716 r->f[i] = float32_div(float32_one, t, &env->vec_status);
2721 void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2723 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2724 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2727 void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2729 int i;
2730 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2731 HANDLE_NAN1(r->f[i], b->f[i]) {
2732 r->f[i] = float32_log2(b->f[i], &env->vec_status);
2737 #if defined(WORDS_BIGENDIAN)
2738 #define LEFT 0
2739 #define RIGHT 1
2740 #else
2741 #define LEFT 1
2742 #define RIGHT 0
2743 #endif
2744 /* The specification says that the results are undefined if all of the
2745 * shift counts are not identical. We check to make sure that they are
2746 * to conform to what real hardware appears to do. */
2747 #define VSHIFT(suffix, leftp) \
2748 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2750 int shift = b->u8[LO_IDX*0x15] & 0x7; \
2751 int doit = 1; \
2752 int i; \
2753 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2754 doit = doit && ((b->u8[i] & 0x7) == shift); \
2756 if (doit) { \
2757 if (shift == 0) { \
2758 *r = *a; \
2759 } else if (leftp) { \
2760 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2761 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2762 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2763 } else { \
2764 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2765 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2766 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2770 VSHIFT(l, LEFT)
2771 VSHIFT(r, RIGHT)
2772 #undef VSHIFT
2773 #undef LEFT
2774 #undef RIGHT
2776 #define VSL(suffix, element) \
2777 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2779 int i; \
2780 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2781 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2782 unsigned int shift = b->element[i] & mask; \
2783 r->element[i] = a->element[i] << shift; \
2786 VSL(b, u8)
2787 VSL(h, u16)
2788 VSL(w, u32)
2789 #undef VSL
2791 void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2793 int sh = shift & 0xf;
2794 int i;
2795 ppc_avr_t result;
2797 #if defined(WORDS_BIGENDIAN)
2798 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2799 int index = sh + i;
2800 if (index > 0xf) {
2801 result.u8[i] = b->u8[index-0x10];
2802 } else {
2803 result.u8[i] = a->u8[index];
2806 #else
2807 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2808 int index = (16 - sh) + i;
2809 if (index > 0xf) {
2810 result.u8[i] = a->u8[index-0x10];
2811 } else {
2812 result.u8[i] = b->u8[index];
2815 #endif
2816 *r = result;
2819 void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2821 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2823 #if defined (WORDS_BIGENDIAN)
2824 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2825 memset (&r->u8[16-sh], 0, sh);
2826 #else
2827 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2828 memset (&r->u8[0], 0, sh);
2829 #endif
2832 /* Experimental testing shows that hardware masks the immediate. */
2833 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2834 #if defined(WORDS_BIGENDIAN)
2835 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2836 #else
2837 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2838 #endif
2839 #define VSPLT(suffix, element) \
2840 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2842 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2843 int i; \
2844 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2845 r->element[i] = s; \
2848 VSPLT(b, u8)
2849 VSPLT(h, u16)
2850 VSPLT(w, u32)
2851 #undef VSPLT
2852 #undef SPLAT_ELEMENT
2853 #undef _SPLAT_MASKED
2855 #define VSPLTI(suffix, element, splat_type) \
2856 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2858 splat_type x = (int8_t)(splat << 3) >> 3; \
2859 int i; \
2860 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2861 r->element[i] = x; \
2864 VSPLTI(b, s8, int8_t)
2865 VSPLTI(h, s16, int16_t)
2866 VSPLTI(w, s32, int32_t)
2867 #undef VSPLTI
2869 #define VSR(suffix, element) \
2870 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2872 int i; \
2873 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2874 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2875 unsigned int shift = b->element[i] & mask; \
2876 r->element[i] = a->element[i] >> shift; \
2879 VSR(ab, s8)
2880 VSR(ah, s16)
2881 VSR(aw, s32)
2882 VSR(b, u8)
2883 VSR(h, u16)
2884 VSR(w, u32)
2885 #undef VSR
2887 void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2889 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2891 #if defined (WORDS_BIGENDIAN)
2892 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2893 memset (&r->u8[0], 0, sh);
2894 #else
2895 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2896 memset (&r->u8[16-sh], 0, sh);
2897 #endif
2900 void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2902 int i;
2903 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2904 r->u32[i] = a->u32[i] >= b->u32[i];
2908 void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2910 int64_t t;
2911 int i, upper;
2912 ppc_avr_t result;
2913 int sat = 0;
2915 #if defined(WORDS_BIGENDIAN)
2916 upper = ARRAY_SIZE(r->s32)-1;
2917 #else
2918 upper = 0;
2919 #endif
2920 t = (int64_t)b->s32[upper];
2921 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2922 t += a->s32[i];
2923 result.s32[i] = 0;
2925 result.s32[upper] = cvtsdsw(t, &sat);
2926 *r = result;
2928 if (sat) {
2929 env->vscr |= (1 << VSCR_SAT);
2933 void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2935 int i, j, upper;
2936 ppc_avr_t result;
2937 int sat = 0;
2939 #if defined(WORDS_BIGENDIAN)
2940 upper = 1;
2941 #else
2942 upper = 0;
2943 #endif
2944 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2945 int64_t t = (int64_t)b->s32[upper+i*2];
2946 result.u64[i] = 0;
2947 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2948 t += a->s32[2*i+j];
2950 result.s32[upper+i*2] = cvtsdsw(t, &sat);
2953 *r = result;
2954 if (sat) {
2955 env->vscr |= (1 << VSCR_SAT);
2959 void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2961 int i, j;
2962 int sat = 0;
2964 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2965 int64_t t = (int64_t)b->s32[i];
2966 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2967 t += a->s8[4*i+j];
2969 r->s32[i] = cvtsdsw(t, &sat);
2972 if (sat) {
2973 env->vscr |= (1 << VSCR_SAT);
2977 void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2979 int sat = 0;
2980 int i;
2982 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2983 int64_t t = (int64_t)b->s32[i];
2984 t += a->s16[2*i] + a->s16[2*i+1];
2985 r->s32[i] = cvtsdsw(t, &sat);
2988 if (sat) {
2989 env->vscr |= (1 << VSCR_SAT);
2993 void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2995 int i, j;
2996 int sat = 0;
2998 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2999 uint64_t t = (uint64_t)b->u32[i];
3000 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
3001 t += a->u8[4*i+j];
3003 r->u32[i] = cvtuduw(t, &sat);
3006 if (sat) {
3007 env->vscr |= (1 << VSCR_SAT);
3011 #if defined(WORDS_BIGENDIAN)
3012 #define UPKHI 1
3013 #define UPKLO 0
3014 #else
3015 #define UPKHI 0
3016 #define UPKLO 1
3017 #endif
3018 #define VUPKPX(suffix, hi) \
3019 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3021 int i; \
3022 ppc_avr_t result; \
3023 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3024 uint16_t e = b->u16[hi ? i : i+4]; \
3025 uint8_t a = (e >> 15) ? 0xff : 0; \
3026 uint8_t r = (e >> 10) & 0x1f; \
3027 uint8_t g = (e >> 5) & 0x1f; \
3028 uint8_t b = e & 0x1f; \
3029 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3031 *r = result; \
3033 VUPKPX(lpx, UPKLO)
3034 VUPKPX(hpx, UPKHI)
3035 #undef VUPKPX
3037 #define VUPK(suffix, unpacked, packee, hi) \
3038 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3040 int i; \
3041 ppc_avr_t result; \
3042 if (hi) { \
3043 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3044 result.unpacked[i] = b->packee[i]; \
3046 } else { \
3047 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3048 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3051 *r = result; \
3053 VUPK(hsb, s16, s8, UPKHI)
3054 VUPK(hsh, s32, s16, UPKHI)
3055 VUPK(lsb, s16, s8, UPKLO)
3056 VUPK(lsh, s32, s16, UPKLO)
3057 #undef VUPK
3058 #undef UPKHI
3059 #undef UPKLO
3061 #undef DO_HANDLE_NAN
3062 #undef HANDLE_NAN1
3063 #undef HANDLE_NAN2
3064 #undef HANDLE_NAN3
3065 #undef VECTOR_FOR_INORDER_I
3066 #undef HI_IDX
3067 #undef LO_IDX
3069 /*****************************************************************************/
3070 /* SPE extension helpers */
3071 /* Use a table to make this quicker */
3072 static uint8_t hbrev[16] = {
3073 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3074 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3077 static always_inline uint8_t byte_reverse (uint8_t val)
3079 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3082 static always_inline uint32_t word_reverse (uint32_t val)
3084 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3085 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3088 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3089 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3091 uint32_t a, b, d, mask;
3093 mask = UINT32_MAX >> (32 - MASKBITS);
3094 a = arg1 & mask;
3095 b = arg2 & mask;
3096 d = word_reverse(1 + word_reverse(a | ~b));
3097 return (arg1 & ~mask) | (d & b);
3100 uint32_t helper_cntlsw32 (uint32_t val)
3102 if (val & 0x80000000)
3103 return clz32(~val);
3104 else
3105 return clz32(val);
3108 uint32_t helper_cntlzw32 (uint32_t val)
3110 return clz32(val);
3113 /* Single-precision floating-point conversions */
3114 static always_inline uint32_t efscfsi (uint32_t val)
3116 CPU_FloatU u;
3118 u.f = int32_to_float32(val, &env->vec_status);
3120 return u.l;
3123 static always_inline uint32_t efscfui (uint32_t val)
3125 CPU_FloatU u;
3127 u.f = uint32_to_float32(val, &env->vec_status);
3129 return u.l;
3132 static always_inline int32_t efsctsi (uint32_t val)
3134 CPU_FloatU u;
3136 u.l = val;
3137 /* NaN are not treated the same way IEEE 754 does */
3138 if (unlikely(float32_is_nan(u.f)))
3139 return 0;
3141 return float32_to_int32(u.f, &env->vec_status);
3144 static always_inline uint32_t efsctui (uint32_t val)
3146 CPU_FloatU u;
3148 u.l = val;
3149 /* NaN are not treated the same way IEEE 754 does */
3150 if (unlikely(float32_is_nan(u.f)))
3151 return 0;
3153 return float32_to_uint32(u.f, &env->vec_status);
3156 static always_inline uint32_t efsctsiz (uint32_t val)
3158 CPU_FloatU u;
3160 u.l = val;
3161 /* NaN are not treated the same way IEEE 754 does */
3162 if (unlikely(float32_is_nan(u.f)))
3163 return 0;
3165 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3168 static always_inline uint32_t efsctuiz (uint32_t val)
3170 CPU_FloatU u;
3172 u.l = val;
3173 /* NaN are not treated the same way IEEE 754 does */
3174 if (unlikely(float32_is_nan(u.f)))
3175 return 0;
3177 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3180 static always_inline uint32_t efscfsf (uint32_t val)
3182 CPU_FloatU u;
3183 float32 tmp;
3185 u.f = int32_to_float32(val, &env->vec_status);
3186 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3187 u.f = float32_div(u.f, tmp, &env->vec_status);
3189 return u.l;
3192 static always_inline uint32_t efscfuf (uint32_t val)
3194 CPU_FloatU u;
3195 float32 tmp;
3197 u.f = uint32_to_float32(val, &env->vec_status);
3198 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3199 u.f = float32_div(u.f, tmp, &env->vec_status);
3201 return u.l;
3204 static always_inline uint32_t efsctsf (uint32_t val)
3206 CPU_FloatU u;
3207 float32 tmp;
3209 u.l = val;
3210 /* NaN are not treated the same way IEEE 754 does */
3211 if (unlikely(float32_is_nan(u.f)))
3212 return 0;
3213 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3214 u.f = float32_mul(u.f, tmp, &env->vec_status);
3216 return float32_to_int32(u.f, &env->vec_status);
3219 static always_inline uint32_t efsctuf (uint32_t val)
3221 CPU_FloatU u;
3222 float32 tmp;
3224 u.l = val;
3225 /* NaN are not treated the same way IEEE 754 does */
3226 if (unlikely(float32_is_nan(u.f)))
3227 return 0;
3228 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3229 u.f = float32_mul(u.f, tmp, &env->vec_status);
3231 return float32_to_uint32(u.f, &env->vec_status);
3234 #define HELPER_SPE_SINGLE_CONV(name) \
3235 uint32_t helper_e##name (uint32_t val) \
3237 return e##name(val); \
3239 /* efscfsi */
3240 HELPER_SPE_SINGLE_CONV(fscfsi);
3241 /* efscfui */
3242 HELPER_SPE_SINGLE_CONV(fscfui);
3243 /* efscfuf */
3244 HELPER_SPE_SINGLE_CONV(fscfuf);
3245 /* efscfsf */
3246 HELPER_SPE_SINGLE_CONV(fscfsf);
3247 /* efsctsi */
3248 HELPER_SPE_SINGLE_CONV(fsctsi);
3249 /* efsctui */
3250 HELPER_SPE_SINGLE_CONV(fsctui);
3251 /* efsctsiz */
3252 HELPER_SPE_SINGLE_CONV(fsctsiz);
3253 /* efsctuiz */
3254 HELPER_SPE_SINGLE_CONV(fsctuiz);
3255 /* efsctsf */
3256 HELPER_SPE_SINGLE_CONV(fsctsf);
3257 /* efsctuf */
3258 HELPER_SPE_SINGLE_CONV(fsctuf);
3260 #define HELPER_SPE_VECTOR_CONV(name) \
3261 uint64_t helper_ev##name (uint64_t val) \
3263 return ((uint64_t)e##name(val >> 32) << 32) | \
3264 (uint64_t)e##name(val); \
3266 /* evfscfsi */
3267 HELPER_SPE_VECTOR_CONV(fscfsi);
3268 /* evfscfui */
3269 HELPER_SPE_VECTOR_CONV(fscfui);
3270 /* evfscfuf */
3271 HELPER_SPE_VECTOR_CONV(fscfuf);
3272 /* evfscfsf */
3273 HELPER_SPE_VECTOR_CONV(fscfsf);
3274 /* evfsctsi */
3275 HELPER_SPE_VECTOR_CONV(fsctsi);
3276 /* evfsctui */
3277 HELPER_SPE_VECTOR_CONV(fsctui);
3278 /* evfsctsiz */
3279 HELPER_SPE_VECTOR_CONV(fsctsiz);
3280 /* evfsctuiz */
3281 HELPER_SPE_VECTOR_CONV(fsctuiz);
3282 /* evfsctsf */
3283 HELPER_SPE_VECTOR_CONV(fsctsf);
3284 /* evfsctuf */
3285 HELPER_SPE_VECTOR_CONV(fsctuf);
3287 /* Single-precision floating-point arithmetic */
3288 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
3290 CPU_FloatU u1, u2;
3291 u1.l = op1;
3292 u2.l = op2;
3293 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3294 return u1.l;
3297 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
3299 CPU_FloatU u1, u2;
3300 u1.l = op1;
3301 u2.l = op2;
3302 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3303 return u1.l;
3306 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
3308 CPU_FloatU u1, u2;
3309 u1.l = op1;
3310 u2.l = op2;
3311 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3312 return u1.l;
3315 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
3317 CPU_FloatU u1, u2;
3318 u1.l = op1;
3319 u2.l = op2;
3320 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3321 return u1.l;
3324 #define HELPER_SPE_SINGLE_ARITH(name) \
3325 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3327 return e##name(op1, op2); \
3329 /* efsadd */
3330 HELPER_SPE_SINGLE_ARITH(fsadd);
3331 /* efssub */
3332 HELPER_SPE_SINGLE_ARITH(fssub);
3333 /* efsmul */
3334 HELPER_SPE_SINGLE_ARITH(fsmul);
3335 /* efsdiv */
3336 HELPER_SPE_SINGLE_ARITH(fsdiv);
3338 #define HELPER_SPE_VECTOR_ARITH(name) \
3339 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3341 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3342 (uint64_t)e##name(op1, op2); \
3344 /* evfsadd */
3345 HELPER_SPE_VECTOR_ARITH(fsadd);
3346 /* evfssub */
3347 HELPER_SPE_VECTOR_ARITH(fssub);
3348 /* evfsmul */
3349 HELPER_SPE_VECTOR_ARITH(fsmul);
3350 /* evfsdiv */
3351 HELPER_SPE_VECTOR_ARITH(fsdiv);
3353 /* Single-precision floating-point comparisons */
3354 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
3356 CPU_FloatU u1, u2;
3357 u1.l = op1;
3358 u2.l = op2;
3359 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3362 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
3364 CPU_FloatU u1, u2;
3365 u1.l = op1;
3366 u2.l = op2;
3367 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3370 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
3372 CPU_FloatU u1, u2;
3373 u1.l = op1;
3374 u2.l = op2;
3375 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3378 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
3380 /* XXX: TODO: test special values (NaN, infinites, ...) */
3381 return efststlt(op1, op2);
3384 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
3386 /* XXX: TODO: test special values (NaN, infinites, ...) */
3387 return efststgt(op1, op2);
3390 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
3392 /* XXX: TODO: test special values (NaN, infinites, ...) */
3393 return efststeq(op1, op2);
3396 #define HELPER_SINGLE_SPE_CMP(name) \
3397 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3399 return e##name(op1, op2) << 2; \
3401 /* efststlt */
3402 HELPER_SINGLE_SPE_CMP(fststlt);
3403 /* efststgt */
3404 HELPER_SINGLE_SPE_CMP(fststgt);
3405 /* efststeq */
3406 HELPER_SINGLE_SPE_CMP(fststeq);
3407 /* efscmplt */
3408 HELPER_SINGLE_SPE_CMP(fscmplt);
3409 /* efscmpgt */
3410 HELPER_SINGLE_SPE_CMP(fscmpgt);
3411 /* efscmpeq */
3412 HELPER_SINGLE_SPE_CMP(fscmpeq);
3414 static always_inline uint32_t evcmp_merge (int t0, int t1)
3416 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3419 #define HELPER_VECTOR_SPE_CMP(name) \
3420 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3422 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3424 /* evfststlt */
3425 HELPER_VECTOR_SPE_CMP(fststlt);
3426 /* evfststgt */
3427 HELPER_VECTOR_SPE_CMP(fststgt);
3428 /* evfststeq */
3429 HELPER_VECTOR_SPE_CMP(fststeq);
3430 /* evfscmplt */
3431 HELPER_VECTOR_SPE_CMP(fscmplt);
3432 /* evfscmpgt */
3433 HELPER_VECTOR_SPE_CMP(fscmpgt);
3434 /* evfscmpeq */
3435 HELPER_VECTOR_SPE_CMP(fscmpeq);
3437 /* Double-precision floating-point conversion */
3438 uint64_t helper_efdcfsi (uint32_t val)
3440 CPU_DoubleU u;
3442 u.d = int32_to_float64(val, &env->vec_status);
3444 return u.ll;
3447 uint64_t helper_efdcfsid (uint64_t val)
3449 CPU_DoubleU u;
3451 u.d = int64_to_float64(val, &env->vec_status);
3453 return u.ll;
3456 uint64_t helper_efdcfui (uint32_t val)
3458 CPU_DoubleU u;
3460 u.d = uint32_to_float64(val, &env->vec_status);
3462 return u.ll;
3465 uint64_t helper_efdcfuid (uint64_t val)
3467 CPU_DoubleU u;
3469 u.d = uint64_to_float64(val, &env->vec_status);
3471 return u.ll;
3474 uint32_t helper_efdctsi (uint64_t val)
3476 CPU_DoubleU u;
3478 u.ll = val;
3479 /* NaN are not treated the same way IEEE 754 does */
3480 if (unlikely(float64_is_nan(u.d)))
3481 return 0;
3483 return float64_to_int32(u.d, &env->vec_status);
3486 uint32_t helper_efdctui (uint64_t val)
3488 CPU_DoubleU u;
3490 u.ll = val;
3491 /* NaN are not treated the same way IEEE 754 does */
3492 if (unlikely(float64_is_nan(u.d)))
3493 return 0;
3495 return float64_to_uint32(u.d, &env->vec_status);
3498 uint32_t helper_efdctsiz (uint64_t val)
3500 CPU_DoubleU u;
3502 u.ll = val;
3503 /* NaN are not treated the same way IEEE 754 does */
3504 if (unlikely(float64_is_nan(u.d)))
3505 return 0;
3507 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3510 uint64_t helper_efdctsidz (uint64_t val)
3512 CPU_DoubleU u;
3514 u.ll = val;
3515 /* NaN are not treated the same way IEEE 754 does */
3516 if (unlikely(float64_is_nan(u.d)))
3517 return 0;
3519 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3522 uint32_t helper_efdctuiz (uint64_t val)
3524 CPU_DoubleU u;
3526 u.ll = val;
3527 /* NaN are not treated the same way IEEE 754 does */
3528 if (unlikely(float64_is_nan(u.d)))
3529 return 0;
3531 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3534 uint64_t helper_efdctuidz (uint64_t val)
3536 CPU_DoubleU u;
3538 u.ll = val;
3539 /* NaN are not treated the same way IEEE 754 does */
3540 if (unlikely(float64_is_nan(u.d)))
3541 return 0;
3543 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3546 uint64_t helper_efdcfsf (uint32_t val)
3548 CPU_DoubleU u;
3549 float64 tmp;
3551 u.d = int32_to_float64(val, &env->vec_status);
3552 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3553 u.d = float64_div(u.d, tmp, &env->vec_status);
3555 return u.ll;
3558 uint64_t helper_efdcfuf (uint32_t val)
3560 CPU_DoubleU u;
3561 float64 tmp;
3563 u.d = uint32_to_float64(val, &env->vec_status);
3564 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3565 u.d = float64_div(u.d, tmp, &env->vec_status);
3567 return u.ll;
3570 uint32_t helper_efdctsf (uint64_t val)
3572 CPU_DoubleU u;
3573 float64 tmp;
3575 u.ll = val;
3576 /* NaN are not treated the same way IEEE 754 does */
3577 if (unlikely(float64_is_nan(u.d)))
3578 return 0;
3579 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3580 u.d = float64_mul(u.d, tmp, &env->vec_status);
3582 return float64_to_int32(u.d, &env->vec_status);
3585 uint32_t helper_efdctuf (uint64_t val)
3587 CPU_DoubleU u;
3588 float64 tmp;
3590 u.ll = val;
3591 /* NaN are not treated the same way IEEE 754 does */
3592 if (unlikely(float64_is_nan(u.d)))
3593 return 0;
3594 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3595 u.d = float64_mul(u.d, tmp, &env->vec_status);
3597 return float64_to_uint32(u.d, &env->vec_status);
3600 uint32_t helper_efscfd (uint64_t val)
3602 CPU_DoubleU u1;
3603 CPU_FloatU u2;
3605 u1.ll = val;
3606 u2.f = float64_to_float32(u1.d, &env->vec_status);
3608 return u2.l;
3611 uint64_t helper_efdcfs (uint32_t val)
3613 CPU_DoubleU u2;
3614 CPU_FloatU u1;
3616 u1.l = val;
3617 u2.d = float32_to_float64(u1.f, &env->vec_status);
3619 return u2.ll;
3622 /* Double precision fixed-point arithmetic */
3623 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3625 CPU_DoubleU u1, u2;
3626 u1.ll = op1;
3627 u2.ll = op2;
3628 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3629 return u1.ll;
3632 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3634 CPU_DoubleU u1, u2;
3635 u1.ll = op1;
3636 u2.ll = op2;
3637 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3638 return u1.ll;
3641 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3643 CPU_DoubleU u1, u2;
3644 u1.ll = op1;
3645 u2.ll = op2;
3646 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3647 return u1.ll;
3650 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3652 CPU_DoubleU u1, u2;
3653 u1.ll = op1;
3654 u2.ll = op2;
3655 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3656 return u1.ll;
3659 /* Double precision floating point helpers */
3660 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3662 CPU_DoubleU u1, u2;
3663 u1.ll = op1;
3664 u2.ll = op2;
3665 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3668 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3670 CPU_DoubleU u1, u2;
3671 u1.ll = op1;
3672 u2.ll = op2;
3673 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3676 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3678 CPU_DoubleU u1, u2;
3679 u1.ll = op1;
3680 u2.ll = op2;
3681 return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3684 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3686 /* XXX: TODO: test special values (NaN, infinites, ...) */
3687 return helper_efdtstlt(op1, op2);
3690 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3692 /* XXX: TODO: test special values (NaN, infinites, ...) */
3693 return helper_efdtstgt(op1, op2);
3696 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3698 /* XXX: TODO: test special values (NaN, infinites, ...) */
3699 return helper_efdtsteq(op1, op2);
3702 /*****************************************************************************/
3703 /* Softmmu support */
3704 #if !defined (CONFIG_USER_ONLY)
3706 #define MMUSUFFIX _mmu
3708 #define SHIFT 0
3709 #include "softmmu_template.h"
3711 #define SHIFT 1
3712 #include "softmmu_template.h"
3714 #define SHIFT 2
3715 #include "softmmu_template.h"
3717 #define SHIFT 3
3718 #include "softmmu_template.h"
3720 /* try to fill the TLB and return an exception if error. If retaddr is
3721 NULL, it means that the function was called in C code (i.e. not
3722 from generated code or from helper.c) */
3723 /* XXX: fix it to restore all registers */
3724 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3726 TranslationBlock *tb;
3727 CPUState *saved_env;
3728 unsigned long pc;
3729 int ret;
3731 /* XXX: hack to restore env in all cases, even if not called from
3732 generated code */
3733 saved_env = env;
3734 env = cpu_single_env;
3735 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3736 if (unlikely(ret != 0)) {
3737 if (likely(retaddr)) {
3738 /* now we have a real cpu fault */
3739 pc = (unsigned long)retaddr;
3740 tb = tb_find_pc(pc);
3741 if (likely(tb)) {
3742 /* the PC is inside the translated code. It means that we have
3743 a virtual CPU fault */
3744 cpu_restore_state(tb, env, pc, NULL);
3747 helper_raise_exception_err(env->exception_index, env->error_code);
3749 env = saved_env;
3752 /* Segment registers load and store */
3753 target_ulong helper_load_sr (target_ulong sr_num)
3755 #if defined(TARGET_PPC64)
3756 if (env->mmu_model & POWERPC_MMU_64)
3757 return ppc_load_sr(env, sr_num);
3758 #endif
3759 return env->sr[sr_num];
3762 void helper_store_sr (target_ulong sr_num, target_ulong val)
3764 ppc_store_sr(env, sr_num, val);
3767 /* SLB management */
3768 #if defined(TARGET_PPC64)
3769 target_ulong helper_load_slb (target_ulong slb_nr)
3771 return ppc_load_slb(env, slb_nr);
3774 void helper_store_slb (target_ulong rb, target_ulong rs)
3776 ppc_store_slb(env, rb, rs);
3779 void helper_slbia (void)
3781 ppc_slb_invalidate_all(env);
3784 void helper_slbie (target_ulong addr)
3786 ppc_slb_invalidate_one(env, addr);
3789 #endif /* defined(TARGET_PPC64) */
3791 /* TLB management */
3792 void helper_tlbia (void)
3794 ppc_tlb_invalidate_all(env);
3797 void helper_tlbie (target_ulong addr)
3799 ppc_tlb_invalidate_one(env, addr);
3802 /* Software driven TLBs management */
3803 /* PowerPC 602/603 software TLB load instructions helpers */
3804 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3806 target_ulong RPN, CMP, EPN;
3807 int way;
3809 RPN = env->spr[SPR_RPA];
3810 if (is_code) {
3811 CMP = env->spr[SPR_ICMP];
3812 EPN = env->spr[SPR_IMISS];
3813 } else {
3814 CMP = env->spr[SPR_DCMP];
3815 EPN = env->spr[SPR_DMISS];
3817 way = (env->spr[SPR_SRR1] >> 17) & 1;
3818 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3819 " PTE1 " ADDRX " way %d\n",
3820 __func__, new_EPN, EPN, CMP, RPN, way);
3821 /* Store this TLB */
3822 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3823 way, is_code, CMP, RPN);
3826 void helper_6xx_tlbd (target_ulong EPN)
3828 do_6xx_tlb(EPN, 0);
3831 void helper_6xx_tlbi (target_ulong EPN)
3833 do_6xx_tlb(EPN, 1);
3836 /* PowerPC 74xx software TLB load instructions helpers */
3837 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3839 target_ulong RPN, CMP, EPN;
3840 int way;
3842 RPN = env->spr[SPR_PTELO];
3843 CMP = env->spr[SPR_PTEHI];
3844 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3845 way = env->spr[SPR_TLBMISS] & 0x3;
3846 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3847 " PTE1 " ADDRX " way %d\n",
3848 __func__, new_EPN, EPN, CMP, RPN, way);
3849 /* Store this TLB */
3850 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3851 way, is_code, CMP, RPN);
3854 void helper_74xx_tlbd (target_ulong EPN)
3856 do_74xx_tlb(EPN, 0);
3859 void helper_74xx_tlbi (target_ulong EPN)
3861 do_74xx_tlb(EPN, 1);
3864 static always_inline target_ulong booke_tlb_to_page_size (int size)
3866 return 1024 << (2 * size);
3869 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3871 int size;
3873 switch (page_size) {
3874 case 0x00000400UL:
3875 size = 0x0;
3876 break;
3877 case 0x00001000UL:
3878 size = 0x1;
3879 break;
3880 case 0x00004000UL:
3881 size = 0x2;
3882 break;
3883 case 0x00010000UL:
3884 size = 0x3;
3885 break;
3886 case 0x00040000UL:
3887 size = 0x4;
3888 break;
3889 case 0x00100000UL:
3890 size = 0x5;
3891 break;
3892 case 0x00400000UL:
3893 size = 0x6;
3894 break;
3895 case 0x01000000UL:
3896 size = 0x7;
3897 break;
3898 case 0x04000000UL:
3899 size = 0x8;
3900 break;
3901 case 0x10000000UL:
3902 size = 0x9;
3903 break;
3904 case 0x40000000UL:
3905 size = 0xA;
3906 break;
3907 #if defined (TARGET_PPC64)
3908 case 0x000100000000ULL:
3909 size = 0xB;
3910 break;
3911 case 0x000400000000ULL:
3912 size = 0xC;
3913 break;
3914 case 0x001000000000ULL:
3915 size = 0xD;
3916 break;
3917 case 0x004000000000ULL:
3918 size = 0xE;
3919 break;
3920 case 0x010000000000ULL:
3921 size = 0xF;
3922 break;
3923 #endif
3924 default:
3925 size = -1;
3926 break;
3929 return size;
3932 /* Helpers for 4xx TLB management */
3933 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3935 ppcemb_tlb_t *tlb;
3936 target_ulong ret;
3937 int size;
3939 entry &= 0x3F;
3940 tlb = &env->tlb[entry].tlbe;
3941 ret = tlb->EPN;
3942 if (tlb->prot & PAGE_VALID)
3943 ret |= 0x400;
3944 size = booke_page_size_to_tlb(tlb->size);
3945 if (size < 0 || size > 0x7)
3946 size = 1;
3947 ret |= size << 7;
3948 env->spr[SPR_40x_PID] = tlb->PID;
3949 return ret;
3952 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3954 ppcemb_tlb_t *tlb;
3955 target_ulong ret;
3957 entry &= 0x3F;
3958 tlb = &env->tlb[entry].tlbe;
3959 ret = tlb->RPN;
3960 if (tlb->prot & PAGE_EXEC)
3961 ret |= 0x200;
3962 if (tlb->prot & PAGE_WRITE)
3963 ret |= 0x100;
3964 return ret;
3967 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3969 ppcemb_tlb_t *tlb;
3970 target_ulong page, end;
3972 LOG_SWTLB("%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3973 entry &= 0x3F;
3974 tlb = &env->tlb[entry].tlbe;
3975 /* Invalidate previous TLB (if it's valid) */
3976 if (tlb->prot & PAGE_VALID) {
3977 end = tlb->EPN + tlb->size;
3978 LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
3979 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3980 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3981 tlb_flush_page(env, page);
3983 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3984 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3985 * If this ever occurs, one should use the ppcemb target instead
3986 * of the ppc or ppc64 one
3988 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3989 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3990 "are not supported (%d)\n",
3991 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3993 tlb->EPN = val & ~(tlb->size - 1);
3994 if (val & 0x40)
3995 tlb->prot |= PAGE_VALID;
3996 else
3997 tlb->prot &= ~PAGE_VALID;
3998 if (val & 0x20) {
3999 /* XXX: TO BE FIXED */
4000 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
4002 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4003 tlb->attr = val & 0xFF;
4004 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
4005 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
4006 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4007 tlb->prot & PAGE_READ ? 'r' : '-',
4008 tlb->prot & PAGE_WRITE ? 'w' : '-',
4009 tlb->prot & PAGE_EXEC ? 'x' : '-',
4010 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4011 /* Invalidate new TLB (if valid) */
4012 if (tlb->prot & PAGE_VALID) {
4013 end = tlb->EPN + tlb->size;
4014 LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
4015 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
4016 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
4017 tlb_flush_page(env, page);
4021 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4023 ppcemb_tlb_t *tlb;
4025 LOG_SWTLB("%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
4026 entry &= 0x3F;
4027 tlb = &env->tlb[entry].tlbe;
4028 tlb->RPN = val & 0xFFFFFC00;
4029 tlb->prot = PAGE_READ;
4030 if (val & 0x200)
4031 tlb->prot |= PAGE_EXEC;
4032 if (val & 0x100)
4033 tlb->prot |= PAGE_WRITE;
4034 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
4035 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
4036 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4037 tlb->prot & PAGE_READ ? 'r' : '-',
4038 tlb->prot & PAGE_WRITE ? 'w' : '-',
4039 tlb->prot & PAGE_EXEC ? 'x' : '-',
4040 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4043 target_ulong helper_4xx_tlbsx (target_ulong address)
4045 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4048 /* PowerPC 440 TLB management */
4049 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4051 ppcemb_tlb_t *tlb;
4052 target_ulong EPN, RPN, size;
4053 int do_flush_tlbs;
4055 LOG_SWTLB("%s word %d entry %d value " ADDRX "\n",
4056 __func__, word, (int)entry, value);
4057 do_flush_tlbs = 0;
4058 entry &= 0x3F;
4059 tlb = &env->tlb[entry].tlbe;
4060 switch (word) {
4061 default:
4062 /* Just here to please gcc */
4063 case 0:
4064 EPN = value & 0xFFFFFC00;
4065 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4066 do_flush_tlbs = 1;
4067 tlb->EPN = EPN;
4068 size = booke_tlb_to_page_size((value >> 4) & 0xF);
4069 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4070 do_flush_tlbs = 1;
4071 tlb->size = size;
4072 tlb->attr &= ~0x1;
4073 tlb->attr |= (value >> 8) & 1;
4074 if (value & 0x200) {
4075 tlb->prot |= PAGE_VALID;
4076 } else {
4077 if (tlb->prot & PAGE_VALID) {
4078 tlb->prot &= ~PAGE_VALID;
4079 do_flush_tlbs = 1;
4082 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4083 if (do_flush_tlbs)
4084 tlb_flush(env, 1);
4085 break;
4086 case 1:
4087 RPN = value & 0xFFFFFC0F;
4088 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4089 tlb_flush(env, 1);
4090 tlb->RPN = RPN;
4091 break;
4092 case 2:
4093 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4094 tlb->prot = tlb->prot & PAGE_VALID;
4095 if (value & 0x1)
4096 tlb->prot |= PAGE_READ << 4;
4097 if (value & 0x2)
4098 tlb->prot |= PAGE_WRITE << 4;
4099 if (value & 0x4)
4100 tlb->prot |= PAGE_EXEC << 4;
4101 if (value & 0x8)
4102 tlb->prot |= PAGE_READ;
4103 if (value & 0x10)
4104 tlb->prot |= PAGE_WRITE;
4105 if (value & 0x20)
4106 tlb->prot |= PAGE_EXEC;
4107 break;
4111 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4113 ppcemb_tlb_t *tlb;
4114 target_ulong ret;
4115 int size;
4117 entry &= 0x3F;
4118 tlb = &env->tlb[entry].tlbe;
4119 switch (word) {
4120 default:
4121 /* Just here to please gcc */
4122 case 0:
4123 ret = tlb->EPN;
4124 size = booke_page_size_to_tlb(tlb->size);
4125 if (size < 0 || size > 0xF)
4126 size = 1;
4127 ret |= size << 4;
4128 if (tlb->attr & 0x1)
4129 ret |= 0x100;
4130 if (tlb->prot & PAGE_VALID)
4131 ret |= 0x200;
4132 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4133 env->spr[SPR_440_MMUCR] |= tlb->PID;
4134 break;
4135 case 1:
4136 ret = tlb->RPN;
4137 break;
4138 case 2:
4139 ret = tlb->attr & ~0x1;
4140 if (tlb->prot & (PAGE_READ << 4))
4141 ret |= 0x1;
4142 if (tlb->prot & (PAGE_WRITE << 4))
4143 ret |= 0x2;
4144 if (tlb->prot & (PAGE_EXEC << 4))
4145 ret |= 0x4;
4146 if (tlb->prot & PAGE_READ)
4147 ret |= 0x8;
4148 if (tlb->prot & PAGE_WRITE)
4149 ret |= 0x10;
4150 if (tlb->prot & PAGE_EXEC)
4151 ret |= 0x20;
4152 break;
4154 return ret;
4157 target_ulong helper_440_tlbsx (target_ulong address)
4159 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4162 #endif /* !CONFIG_USER_ONLY */