target-ppc: Add vcmp{eq, ge, gt, b}fp{, .} instructions
[qemu/mini2440/sniper_sniper_test.git] / target-ppc / op_helper.c
blobd40caf0e8e66d74f114cf214e86194fc9a18c0c5
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <string.h>
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "helper.h"
25 #include "helper_regs.h"
27 //#define DEBUG_OP
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
31 #ifdef DEBUG_SOFTWARE_TLB
32 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
33 #else
34 # define LOG_SWTLB(...) do { } while (0)
35 #endif
38 /*****************************************************************************/
39 /* Exceptions processing helpers */
41 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
43 #if 0
44 printf("Raise exception %3x code : %d\n", exception, error_code);
45 #endif
46 env->exception_index = exception;
47 env->error_code = error_code;
48 cpu_loop_exit();
51 void helper_raise_exception (uint32_t exception)
53 helper_raise_exception_err(exception, 0);
56 /*****************************************************************************/
57 /* Registers load and stores */
58 target_ulong helper_load_cr (void)
60 return (env->crf[0] << 28) |
61 (env->crf[1] << 24) |
62 (env->crf[2] << 20) |
63 (env->crf[3] << 16) |
64 (env->crf[4] << 12) |
65 (env->crf[5] << 8) |
66 (env->crf[6] << 4) |
67 (env->crf[7] << 0);
70 void helper_store_cr (target_ulong val, uint32_t mask)
72 int i, sh;
74 for (i = 0, sh = 7; i < 8; i++, sh--) {
75 if (mask & (1 << sh))
76 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
80 /*****************************************************************************/
81 /* SPR accesses */
82 void helper_load_dump_spr (uint32_t sprn)
84 qemu_log("Read SPR %d %03x => " ADDRX "\n",
85 sprn, sprn, env->spr[sprn]);
88 void helper_store_dump_spr (uint32_t sprn)
90 qemu_log("Write SPR %d %03x <= " ADDRX "\n",
91 sprn, sprn, env->spr[sprn]);
94 target_ulong helper_load_tbl (void)
96 return cpu_ppc_load_tbl(env);
99 target_ulong helper_load_tbu (void)
101 return cpu_ppc_load_tbu(env);
104 target_ulong helper_load_atbl (void)
106 return cpu_ppc_load_atbl(env);
109 target_ulong helper_load_atbu (void)
111 return cpu_ppc_load_atbu(env);
114 target_ulong helper_load_601_rtcl (void)
116 return cpu_ppc601_load_rtcl(env);
119 target_ulong helper_load_601_rtcu (void)
121 return cpu_ppc601_load_rtcu(env);
124 #if !defined(CONFIG_USER_ONLY)
125 #if defined (TARGET_PPC64)
126 void helper_store_asr (target_ulong val)
128 ppc_store_asr(env, val);
130 #endif
132 void helper_store_sdr1 (target_ulong val)
134 ppc_store_sdr1(env, val);
137 void helper_store_tbl (target_ulong val)
139 cpu_ppc_store_tbl(env, val);
142 void helper_store_tbu (target_ulong val)
144 cpu_ppc_store_tbu(env, val);
147 void helper_store_atbl (target_ulong val)
149 cpu_ppc_store_atbl(env, val);
152 void helper_store_atbu (target_ulong val)
154 cpu_ppc_store_atbu(env, val);
157 void helper_store_601_rtcl (target_ulong val)
159 cpu_ppc601_store_rtcl(env, val);
162 void helper_store_601_rtcu (target_ulong val)
164 cpu_ppc601_store_rtcu(env, val);
167 target_ulong helper_load_decr (void)
169 return cpu_ppc_load_decr(env);
172 void helper_store_decr (target_ulong val)
174 cpu_ppc_store_decr(env, val);
177 void helper_store_hid0_601 (target_ulong val)
179 target_ulong hid0;
181 hid0 = env->spr[SPR_HID0];
182 if ((val ^ hid0) & 0x00000008) {
183 /* Change current endianness */
184 env->hflags &= ~(1 << MSR_LE);
185 env->hflags_nmsr &= ~(1 << MSR_LE);
186 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
187 env->hflags |= env->hflags_nmsr;
188 qemu_log("%s: set endianness to %c => " ADDRX "\n",
189 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
191 env->spr[SPR_HID0] = (uint32_t)val;
194 void helper_store_403_pbr (uint32_t num, target_ulong value)
196 if (likely(env->pb[num] != value)) {
197 env->pb[num] = value;
198 /* Should be optimized */
199 tlb_flush(env, 1);
203 target_ulong helper_load_40x_pit (void)
205 return load_40x_pit(env);
208 void helper_store_40x_pit (target_ulong val)
210 store_40x_pit(env, val);
213 void helper_store_40x_dbcr0 (target_ulong val)
215 store_40x_dbcr0(env, val);
218 void helper_store_40x_sler (target_ulong val)
220 store_40x_sler(env, val);
223 void helper_store_booke_tcr (target_ulong val)
225 store_booke_tcr(env, val);
228 void helper_store_booke_tsr (target_ulong val)
230 store_booke_tsr(env, val);
233 void helper_store_ibatu (uint32_t nr, target_ulong val)
235 ppc_store_ibatu(env, nr, val);
238 void helper_store_ibatl (uint32_t nr, target_ulong val)
240 ppc_store_ibatl(env, nr, val);
243 void helper_store_dbatu (uint32_t nr, target_ulong val)
245 ppc_store_dbatu(env, nr, val);
248 void helper_store_dbatl (uint32_t nr, target_ulong val)
250 ppc_store_dbatl(env, nr, val);
253 void helper_store_601_batl (uint32_t nr, target_ulong val)
255 ppc_store_ibatl_601(env, nr, val);
258 void helper_store_601_batu (uint32_t nr, target_ulong val)
260 ppc_store_ibatu_601(env, nr, val);
262 #endif
264 /*****************************************************************************/
265 /* Memory load and stores */
267 static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
269 #if defined(TARGET_PPC64)
270 if (!msr_sf)
271 return (uint32_t)(addr + arg);
272 else
273 #endif
274 return addr + arg;
277 void helper_lmw (target_ulong addr, uint32_t reg)
279 for (; reg < 32; reg++) {
280 if (msr_le)
281 env->gpr[reg] = bswap32(ldl(addr));
282 else
283 env->gpr[reg] = ldl(addr);
284 addr = addr_add(addr, 4);
288 void helper_stmw (target_ulong addr, uint32_t reg)
290 for (; reg < 32; reg++) {
291 if (msr_le)
292 stl(addr, bswap32((uint32_t)env->gpr[reg]));
293 else
294 stl(addr, (uint32_t)env->gpr[reg]);
295 addr = addr_add(addr, 4);
299 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
301 int sh;
302 for (; nb > 3; nb -= 4) {
303 env->gpr[reg] = ldl(addr);
304 reg = (reg + 1) % 32;
305 addr = addr_add(addr, 4);
307 if (unlikely(nb > 0)) {
308 env->gpr[reg] = 0;
309 for (sh = 24; nb > 0; nb--, sh -= 8) {
310 env->gpr[reg] |= ldub(addr) << sh;
311 addr = addr_add(addr, 1);
315 /* PPC32 specification says we must generate an exception if
316 * rA is in the range of registers to be loaded.
317 * In an other hand, IBM says this is valid, but rA won't be loaded.
318 * For now, I'll follow the spec...
320 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
322 if (likely(xer_bc != 0)) {
323 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
324 (reg < rb && (reg + xer_bc) > rb))) {
325 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
326 POWERPC_EXCP_INVAL |
327 POWERPC_EXCP_INVAL_LSWX);
328 } else {
329 helper_lsw(addr, xer_bc, reg);
334 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
336 int sh;
337 for (; nb > 3; nb -= 4) {
338 stl(addr, env->gpr[reg]);
339 reg = (reg + 1) % 32;
340 addr = addr_add(addr, 4);
342 if (unlikely(nb > 0)) {
343 for (sh = 24; nb > 0; nb--, sh -= 8) {
344 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
345 addr = addr_add(addr, 1);
350 static void do_dcbz(target_ulong addr, int dcache_line_size)
352 addr &= ~(dcache_line_size - 1);
353 int i;
354 for (i = 0 ; i < dcache_line_size ; i += 4) {
355 stl(addr + i , 0);
357 if (env->reserve == addr)
358 env->reserve = (target_ulong)-1ULL;
361 void helper_dcbz(target_ulong addr)
363 do_dcbz(addr, env->dcache_line_size);
366 void helper_dcbz_970(target_ulong addr)
368 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
369 do_dcbz(addr, 32);
370 else
371 do_dcbz(addr, env->dcache_line_size);
374 void helper_icbi(target_ulong addr)
376 uint32_t tmp;
378 addr &= ~(env->dcache_line_size - 1);
379 /* Invalidate one cache line :
380 * PowerPC specification says this is to be treated like a load
381 * (not a fetch) by the MMU. To be sure it will be so,
382 * do the load "by hand".
384 tmp = ldl(addr);
385 tb_invalidate_page_range(addr, addr + env->icache_line_size);
388 // XXX: to be tested
389 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
391 int i, c, d;
392 d = 24;
393 for (i = 0; i < xer_bc; i++) {
394 c = ldub(addr);
395 addr = addr_add(addr, 1);
396 /* ra (if not 0) and rb are never modified */
397 if (likely(reg != rb && (ra == 0 || reg != ra))) {
398 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
400 if (unlikely(c == xer_cmp))
401 break;
402 if (likely(d != 0)) {
403 d -= 8;
404 } else {
405 d = 24;
406 reg++;
407 reg = reg & 0x1F;
410 return i;
413 /*****************************************************************************/
414 /* Fixed point operations helpers */
415 #if defined(TARGET_PPC64)
417 /* multiply high word */
418 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
420 uint64_t tl, th;
422 muls64(&tl, &th, arg1, arg2);
423 return th;
426 /* multiply high word unsigned */
427 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
429 uint64_t tl, th;
431 mulu64(&tl, &th, arg1, arg2);
432 return th;
435 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
437 int64_t th;
438 uint64_t tl;
440 muls64(&tl, (uint64_t *)&th, arg1, arg2);
441 /* If th != 0 && th != -1, then we had an overflow */
442 if (likely((uint64_t)(th + 1) <= 1)) {
443 env->xer &= ~(1 << XER_OV);
444 } else {
445 env->xer |= (1 << XER_OV) | (1 << XER_SO);
447 return (int64_t)tl;
449 #endif
451 target_ulong helper_cntlzw (target_ulong t)
453 return clz32(t);
456 #if defined(TARGET_PPC64)
457 target_ulong helper_cntlzd (target_ulong t)
459 return clz64(t);
461 #endif
463 /* shift right arithmetic helper */
464 target_ulong helper_sraw (target_ulong value, target_ulong shift)
466 int32_t ret;
468 if (likely(!(shift & 0x20))) {
469 if (likely((uint32_t)shift != 0)) {
470 shift &= 0x1f;
471 ret = (int32_t)value >> shift;
472 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
473 env->xer &= ~(1 << XER_CA);
474 } else {
475 env->xer |= (1 << XER_CA);
477 } else {
478 ret = (int32_t)value;
479 env->xer &= ~(1 << XER_CA);
481 } else {
482 ret = (int32_t)value >> 31;
483 if (ret) {
484 env->xer |= (1 << XER_CA);
485 } else {
486 env->xer &= ~(1 << XER_CA);
489 return (target_long)ret;
492 #if defined(TARGET_PPC64)
493 target_ulong helper_srad (target_ulong value, target_ulong shift)
495 int64_t ret;
497 if (likely(!(shift & 0x40))) {
498 if (likely((uint64_t)shift != 0)) {
499 shift &= 0x3f;
500 ret = (int64_t)value >> shift;
501 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
502 env->xer &= ~(1 << XER_CA);
503 } else {
504 env->xer |= (1 << XER_CA);
506 } else {
507 ret = (int64_t)value;
508 env->xer &= ~(1 << XER_CA);
510 } else {
511 ret = (int64_t)value >> 63;
512 if (ret) {
513 env->xer |= (1 << XER_CA);
514 } else {
515 env->xer &= ~(1 << XER_CA);
518 return ret;
520 #endif
522 target_ulong helper_popcntb (target_ulong val)
524 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
525 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
526 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
527 return val;
530 #if defined(TARGET_PPC64)
531 target_ulong helper_popcntb_64 (target_ulong val)
533 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
534 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
535 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
536 return val;
538 #endif
540 /*****************************************************************************/
541 /* Floating point operations helpers */
542 uint64_t helper_float32_to_float64(uint32_t arg)
544 CPU_FloatU f;
545 CPU_DoubleU d;
546 f.l = arg;
547 d.d = float32_to_float64(f.f, &env->fp_status);
548 return d.ll;
551 uint32_t helper_float64_to_float32(uint64_t arg)
553 CPU_FloatU f;
554 CPU_DoubleU d;
555 d.ll = arg;
556 f.f = float64_to_float32(d.d, &env->fp_status);
557 return f.l;
560 static always_inline int isden (float64 d)
562 CPU_DoubleU u;
564 u.d = d;
566 return ((u.ll >> 52) & 0x7FF) == 0;
569 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
571 CPU_DoubleU farg;
572 int isneg;
573 int ret;
574 farg.ll = arg;
575 isneg = float64_is_neg(farg.d);
576 if (unlikely(float64_is_nan(farg.d))) {
577 if (float64_is_signaling_nan(farg.d)) {
578 /* Signaling NaN: flags are undefined */
579 ret = 0x00;
580 } else {
581 /* Quiet NaN */
582 ret = 0x11;
584 } else if (unlikely(float64_is_infinity(farg.d))) {
585 /* +/- infinity */
586 if (isneg)
587 ret = 0x09;
588 else
589 ret = 0x05;
590 } else {
591 if (float64_is_zero(farg.d)) {
592 /* +/- zero */
593 if (isneg)
594 ret = 0x12;
595 else
596 ret = 0x02;
597 } else {
598 if (isden(farg.d)) {
599 /* Denormalized numbers */
600 ret = 0x10;
601 } else {
602 /* Normalized numbers */
603 ret = 0x00;
605 if (isneg) {
606 ret |= 0x08;
607 } else {
608 ret |= 0x04;
612 if (set_fprf) {
613 /* We update FPSCR_FPRF */
614 env->fpscr &= ~(0x1F << FPSCR_FPRF);
615 env->fpscr |= ret << FPSCR_FPRF;
617 /* We just need fpcc to update Rc1 */
618 return ret & 0xF;
621 /* Floating-point invalid operations exception */
622 static always_inline uint64_t fload_invalid_op_excp (int op)
624 uint64_t ret = 0;
625 int ve;
627 ve = fpscr_ve;
628 switch (op) {
629 case POWERPC_EXCP_FP_VXSNAN:
630 env->fpscr |= 1 << FPSCR_VXSNAN;
631 break;
632 case POWERPC_EXCP_FP_VXSOFT:
633 env->fpscr |= 1 << FPSCR_VXSOFT;
634 break;
635 case POWERPC_EXCP_FP_VXISI:
636 /* Magnitude subtraction of infinities */
637 env->fpscr |= 1 << FPSCR_VXISI;
638 goto update_arith;
639 case POWERPC_EXCP_FP_VXIDI:
640 /* Division of infinity by infinity */
641 env->fpscr |= 1 << FPSCR_VXIDI;
642 goto update_arith;
643 case POWERPC_EXCP_FP_VXZDZ:
644 /* Division of zero by zero */
645 env->fpscr |= 1 << FPSCR_VXZDZ;
646 goto update_arith;
647 case POWERPC_EXCP_FP_VXIMZ:
648 /* Multiplication of zero by infinity */
649 env->fpscr |= 1 << FPSCR_VXIMZ;
650 goto update_arith;
651 case POWERPC_EXCP_FP_VXVC:
652 /* Ordered comparison of NaN */
653 env->fpscr |= 1 << FPSCR_VXVC;
654 env->fpscr &= ~(0xF << FPSCR_FPCC);
655 env->fpscr |= 0x11 << FPSCR_FPCC;
656 /* We must update the target FPR before raising the exception */
657 if (ve != 0) {
658 env->exception_index = POWERPC_EXCP_PROGRAM;
659 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
660 /* Update the floating-point enabled exception summary */
661 env->fpscr |= 1 << FPSCR_FEX;
662 /* Exception is differed */
663 ve = 0;
665 break;
666 case POWERPC_EXCP_FP_VXSQRT:
667 /* Square root of a negative number */
668 env->fpscr |= 1 << FPSCR_VXSQRT;
669 update_arith:
670 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
671 if (ve == 0) {
672 /* Set the result to quiet NaN */
673 ret = 0xFFF8000000000000ULL;
674 env->fpscr &= ~(0xF << FPSCR_FPCC);
675 env->fpscr |= 0x11 << FPSCR_FPCC;
677 break;
678 case POWERPC_EXCP_FP_VXCVI:
679 /* Invalid conversion */
680 env->fpscr |= 1 << FPSCR_VXCVI;
681 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
682 if (ve == 0) {
683 /* Set the result to quiet NaN */
684 ret = 0xFFF8000000000000ULL;
685 env->fpscr &= ~(0xF << FPSCR_FPCC);
686 env->fpscr |= 0x11 << FPSCR_FPCC;
688 break;
690 /* Update the floating-point invalid operation summary */
691 env->fpscr |= 1 << FPSCR_VX;
692 /* Update the floating-point exception summary */
693 env->fpscr |= 1 << FPSCR_FX;
694 if (ve != 0) {
695 /* Update the floating-point enabled exception summary */
696 env->fpscr |= 1 << FPSCR_FEX;
697 if (msr_fe0 != 0 || msr_fe1 != 0)
698 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
700 return ret;
703 static always_inline void float_zero_divide_excp (void)
705 env->fpscr |= 1 << FPSCR_ZX;
706 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
707 /* Update the floating-point exception summary */
708 env->fpscr |= 1 << FPSCR_FX;
709 if (fpscr_ze != 0) {
710 /* Update the floating-point enabled exception summary */
711 env->fpscr |= 1 << FPSCR_FEX;
712 if (msr_fe0 != 0 || msr_fe1 != 0) {
713 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
714 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
719 static always_inline void float_overflow_excp (void)
721 env->fpscr |= 1 << FPSCR_OX;
722 /* Update the floating-point exception summary */
723 env->fpscr |= 1 << FPSCR_FX;
724 if (fpscr_oe != 0) {
725 /* XXX: should adjust the result */
726 /* Update the floating-point enabled exception summary */
727 env->fpscr |= 1 << FPSCR_FEX;
728 /* We must update the target FPR before raising the exception */
729 env->exception_index = POWERPC_EXCP_PROGRAM;
730 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
731 } else {
732 env->fpscr |= 1 << FPSCR_XX;
733 env->fpscr |= 1 << FPSCR_FI;
737 static always_inline void float_underflow_excp (void)
739 env->fpscr |= 1 << FPSCR_UX;
740 /* Update the floating-point exception summary */
741 env->fpscr |= 1 << FPSCR_FX;
742 if (fpscr_ue != 0) {
743 /* XXX: should adjust the result */
744 /* Update the floating-point enabled exception summary */
745 env->fpscr |= 1 << FPSCR_FEX;
746 /* We must update the target FPR before raising the exception */
747 env->exception_index = POWERPC_EXCP_PROGRAM;
748 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
752 static always_inline void float_inexact_excp (void)
754 env->fpscr |= 1 << FPSCR_XX;
755 /* Update the floating-point exception summary */
756 env->fpscr |= 1 << FPSCR_FX;
757 if (fpscr_xe != 0) {
758 /* Update the floating-point enabled exception summary */
759 env->fpscr |= 1 << FPSCR_FEX;
760 /* We must update the target FPR before raising the exception */
761 env->exception_index = POWERPC_EXCP_PROGRAM;
762 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
766 static always_inline void fpscr_set_rounding_mode (void)
768 int rnd_type;
770 /* Set rounding mode */
771 switch (fpscr_rn) {
772 case 0:
773 /* Best approximation (round to nearest) */
774 rnd_type = float_round_nearest_even;
775 break;
776 case 1:
777 /* Smaller magnitude (round toward zero) */
778 rnd_type = float_round_to_zero;
779 break;
780 case 2:
781 /* Round toward +infinite */
782 rnd_type = float_round_up;
783 break;
784 default:
785 case 3:
786 /* Round toward -infinite */
787 rnd_type = float_round_down;
788 break;
790 set_float_rounding_mode(rnd_type, &env->fp_status);
793 void helper_fpscr_clrbit (uint32_t bit)
795 int prev;
797 prev = (env->fpscr >> bit) & 1;
798 env->fpscr &= ~(1 << bit);
799 if (prev == 1) {
800 switch (bit) {
801 case FPSCR_RN1:
802 case FPSCR_RN:
803 fpscr_set_rounding_mode();
804 break;
805 default:
806 break;
811 void helper_fpscr_setbit (uint32_t bit)
813 int prev;
815 prev = (env->fpscr >> bit) & 1;
816 env->fpscr |= 1 << bit;
817 if (prev == 0) {
818 switch (bit) {
819 case FPSCR_VX:
820 env->fpscr |= 1 << FPSCR_FX;
821 if (fpscr_ve)
822 goto raise_ve;
823 case FPSCR_OX:
824 env->fpscr |= 1 << FPSCR_FX;
825 if (fpscr_oe)
826 goto raise_oe;
827 break;
828 case FPSCR_UX:
829 env->fpscr |= 1 << FPSCR_FX;
830 if (fpscr_ue)
831 goto raise_ue;
832 break;
833 case FPSCR_ZX:
834 env->fpscr |= 1 << FPSCR_FX;
835 if (fpscr_ze)
836 goto raise_ze;
837 break;
838 case FPSCR_XX:
839 env->fpscr |= 1 << FPSCR_FX;
840 if (fpscr_xe)
841 goto raise_xe;
842 break;
843 case FPSCR_VXSNAN:
844 case FPSCR_VXISI:
845 case FPSCR_VXIDI:
846 case FPSCR_VXZDZ:
847 case FPSCR_VXIMZ:
848 case FPSCR_VXVC:
849 case FPSCR_VXSOFT:
850 case FPSCR_VXSQRT:
851 case FPSCR_VXCVI:
852 env->fpscr |= 1 << FPSCR_VX;
853 env->fpscr |= 1 << FPSCR_FX;
854 if (fpscr_ve != 0)
855 goto raise_ve;
856 break;
857 case FPSCR_VE:
858 if (fpscr_vx != 0) {
859 raise_ve:
860 env->error_code = POWERPC_EXCP_FP;
861 if (fpscr_vxsnan)
862 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
863 if (fpscr_vxisi)
864 env->error_code |= POWERPC_EXCP_FP_VXISI;
865 if (fpscr_vxidi)
866 env->error_code |= POWERPC_EXCP_FP_VXIDI;
867 if (fpscr_vxzdz)
868 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
869 if (fpscr_vximz)
870 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
871 if (fpscr_vxvc)
872 env->error_code |= POWERPC_EXCP_FP_VXVC;
873 if (fpscr_vxsoft)
874 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
875 if (fpscr_vxsqrt)
876 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
877 if (fpscr_vxcvi)
878 env->error_code |= POWERPC_EXCP_FP_VXCVI;
879 goto raise_excp;
881 break;
882 case FPSCR_OE:
883 if (fpscr_ox != 0) {
884 raise_oe:
885 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
886 goto raise_excp;
888 break;
889 case FPSCR_UE:
890 if (fpscr_ux != 0) {
891 raise_ue:
892 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
893 goto raise_excp;
895 break;
896 case FPSCR_ZE:
897 if (fpscr_zx != 0) {
898 raise_ze:
899 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
900 goto raise_excp;
902 break;
903 case FPSCR_XE:
904 if (fpscr_xx != 0) {
905 raise_xe:
906 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
907 goto raise_excp;
909 break;
910 case FPSCR_RN1:
911 case FPSCR_RN:
912 fpscr_set_rounding_mode();
913 break;
914 default:
915 break;
916 raise_excp:
917 /* Update the floating-point enabled exception summary */
918 env->fpscr |= 1 << FPSCR_FEX;
919 /* We have to update Rc1 before raising the exception */
920 env->exception_index = POWERPC_EXCP_PROGRAM;
921 break;
926 void helper_store_fpscr (uint64_t arg, uint32_t mask)
929 * We use only the 32 LSB of the incoming fpr
931 uint32_t prev, new;
932 int i;
934 prev = env->fpscr;
935 new = (uint32_t)arg;
936 new &= ~0x60000000;
937 new |= prev & 0x60000000;
938 for (i = 0; i < 8; i++) {
939 if (mask & (1 << i)) {
940 env->fpscr &= ~(0xF << (4 * i));
941 env->fpscr |= new & (0xF << (4 * i));
944 /* Update VX and FEX */
945 if (fpscr_ix != 0)
946 env->fpscr |= 1 << FPSCR_VX;
947 else
948 env->fpscr &= ~(1 << FPSCR_VX);
949 if ((fpscr_ex & fpscr_eex) != 0) {
950 env->fpscr |= 1 << FPSCR_FEX;
951 env->exception_index = POWERPC_EXCP_PROGRAM;
952 /* XXX: we should compute it properly */
953 env->error_code = POWERPC_EXCP_FP;
955 else
956 env->fpscr &= ~(1 << FPSCR_FEX);
957 fpscr_set_rounding_mode();
960 void helper_float_check_status (void)
962 #ifdef CONFIG_SOFTFLOAT
963 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
964 (env->error_code & POWERPC_EXCP_FP)) {
965 /* Differred floating-point exception after target FPR update */
966 if (msr_fe0 != 0 || msr_fe1 != 0)
967 helper_raise_exception_err(env->exception_index, env->error_code);
968 } else {
969 int status = get_float_exception_flags(&env->fp_status);
970 if (status & float_flag_divbyzero) {
971 float_zero_divide_excp();
972 } else if (status & float_flag_overflow) {
973 float_overflow_excp();
974 } else if (status & float_flag_underflow) {
975 float_underflow_excp();
976 } else if (status & float_flag_inexact) {
977 float_inexact_excp();
980 #else
981 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
982 (env->error_code & POWERPC_EXCP_FP)) {
983 /* Differred floating-point exception after target FPR update */
984 if (msr_fe0 != 0 || msr_fe1 != 0)
985 helper_raise_exception_err(env->exception_index, env->error_code);
987 #endif
990 #ifdef CONFIG_SOFTFLOAT
991 void helper_reset_fpstatus (void)
993 set_float_exception_flags(0, &env->fp_status);
995 #endif
997 /* fadd - fadd. */
998 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1000 CPU_DoubleU farg1, farg2;
1002 farg1.ll = arg1;
1003 farg2.ll = arg2;
1004 #if USE_PRECISE_EMULATION
1005 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1006 float64_is_signaling_nan(farg2.d))) {
1007 /* sNaN addition */
1008 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1009 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1010 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1011 /* Magnitude subtraction of infinities */
1012 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1013 } else {
1014 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1016 #else
1017 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1018 #endif
1019 return farg1.ll;
1022 /* fsub - fsub. */
1023 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1025 CPU_DoubleU farg1, farg2;
1027 farg1.ll = arg1;
1028 farg2.ll = arg2;
1029 #if USE_PRECISE_EMULATION
1031 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1032 float64_is_signaling_nan(farg2.d))) {
1033 /* sNaN subtraction */
1034 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1035 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1036 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1037 /* Magnitude subtraction of infinities */
1038 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1039 } else {
1040 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1043 #else
1044 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1045 #endif
1046 return farg1.ll;
1049 /* fmul - fmul. */
1050 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1052 CPU_DoubleU farg1, farg2;
1054 farg1.ll = arg1;
1055 farg2.ll = arg2;
1056 #if USE_PRECISE_EMULATION
1057 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1058 float64_is_signaling_nan(farg2.d))) {
1059 /* sNaN multiplication */
1060 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1061 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1062 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1063 /* Multiplication of zero by infinity */
1064 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1065 } else {
1066 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1068 #else
1069 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1070 #endif
1071 return farg1.ll;
1074 /* fdiv - fdiv. */
1075 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1077 CPU_DoubleU farg1, farg2;
1079 farg1.ll = arg1;
1080 farg2.ll = arg2;
1081 #if USE_PRECISE_EMULATION
1082 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1083 float64_is_signaling_nan(farg2.d))) {
1084 /* sNaN division */
1085 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1086 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1087 /* Division of infinity by infinity */
1088 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1089 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1090 /* Division of zero by zero */
1091 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1092 } else {
1093 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1095 #else
1096 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1097 #endif
1098 return farg1.ll;
1101 /* fabs */
1102 uint64_t helper_fabs (uint64_t arg)
1104 CPU_DoubleU farg;
1106 farg.ll = arg;
1107 farg.d = float64_abs(farg.d);
1108 return farg.ll;
1111 /* fnabs */
1112 uint64_t helper_fnabs (uint64_t arg)
1114 CPU_DoubleU farg;
1116 farg.ll = arg;
1117 farg.d = float64_abs(farg.d);
1118 farg.d = float64_chs(farg.d);
1119 return farg.ll;
1122 /* fneg */
1123 uint64_t helper_fneg (uint64_t arg)
1125 CPU_DoubleU farg;
1127 farg.ll = arg;
1128 farg.d = float64_chs(farg.d);
1129 return farg.ll;
1132 /* fctiw - fctiw. */
1133 uint64_t helper_fctiw (uint64_t arg)
1135 CPU_DoubleU farg;
1136 farg.ll = arg;
1138 if (unlikely(float64_is_signaling_nan(farg.d))) {
1139 /* sNaN conversion */
1140 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1141 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1142 /* qNan / infinity conversion */
1143 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1144 } else {
1145 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1146 #if USE_PRECISE_EMULATION
1147 /* XXX: higher bits are not supposed to be significant.
1148 * to make tests easier, return the same as a real PowerPC 750
1150 farg.ll |= 0xFFF80000ULL << 32;
1151 #endif
1153 return farg.ll;
1156 /* fctiwz - fctiwz. */
1157 uint64_t helper_fctiwz (uint64_t arg)
1159 CPU_DoubleU farg;
1160 farg.ll = arg;
1162 if (unlikely(float64_is_signaling_nan(farg.d))) {
1163 /* sNaN conversion */
1164 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1165 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1166 /* qNan / infinity conversion */
1167 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1168 } else {
1169 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1170 #if USE_PRECISE_EMULATION
1171 /* XXX: higher bits are not supposed to be significant.
1172 * to make tests easier, return the same as a real PowerPC 750
1174 farg.ll |= 0xFFF80000ULL << 32;
1175 #endif
1177 return farg.ll;
1180 #if defined(TARGET_PPC64)
1181 /* fcfid - fcfid. */
1182 uint64_t helper_fcfid (uint64_t arg)
1184 CPU_DoubleU farg;
1185 farg.d = int64_to_float64(arg, &env->fp_status);
1186 return farg.ll;
1189 /* fctid - fctid. */
1190 uint64_t helper_fctid (uint64_t arg)
1192 CPU_DoubleU farg;
1193 farg.ll = arg;
1195 if (unlikely(float64_is_signaling_nan(farg.d))) {
1196 /* sNaN conversion */
1197 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1198 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1199 /* qNan / infinity conversion */
1200 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1201 } else {
1202 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1204 return farg.ll;
1207 /* fctidz - fctidz. */
1208 uint64_t helper_fctidz (uint64_t arg)
1210 CPU_DoubleU farg;
1211 farg.ll = arg;
1213 if (unlikely(float64_is_signaling_nan(farg.d))) {
1214 /* sNaN conversion */
1215 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1216 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1217 /* qNan / infinity conversion */
1218 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1219 } else {
1220 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1222 return farg.ll;
1225 #endif
1227 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1229 CPU_DoubleU farg;
1230 farg.ll = arg;
1232 if (unlikely(float64_is_signaling_nan(farg.d))) {
1233 /* sNaN round */
1234 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1235 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1236 /* qNan / infinity round */
1237 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1238 } else {
1239 set_float_rounding_mode(rounding_mode, &env->fp_status);
1240 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1241 /* Restore rounding mode from FPSCR */
1242 fpscr_set_rounding_mode();
1244 return farg.ll;
1247 uint64_t helper_frin (uint64_t arg)
1249 return do_fri(arg, float_round_nearest_even);
1252 uint64_t helper_friz (uint64_t arg)
1254 return do_fri(arg, float_round_to_zero);
1257 uint64_t helper_frip (uint64_t arg)
1259 return do_fri(arg, float_round_up);
1262 uint64_t helper_frim (uint64_t arg)
1264 return do_fri(arg, float_round_down);
1267 /* fmadd - fmadd. */
1268 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1270 CPU_DoubleU farg1, farg2, farg3;
1272 farg1.ll = arg1;
1273 farg2.ll = arg2;
1274 farg3.ll = arg3;
1275 #if USE_PRECISE_EMULATION
1276 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1277 float64_is_signaling_nan(farg2.d) ||
1278 float64_is_signaling_nan(farg3.d))) {
1279 /* sNaN operation */
1280 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1281 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1282 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1283 /* Multiplication of zero by infinity */
1284 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1285 } else {
1286 #ifdef FLOAT128
1287 /* This is the way the PowerPC specification defines it */
1288 float128 ft0_128, ft1_128;
1290 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1291 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1292 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1293 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1294 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1295 /* Magnitude subtraction of infinities */
1296 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1297 } else {
1298 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1299 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1300 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1302 #else
1303 /* This is OK on x86 hosts */
1304 farg1.d = (farg1.d * farg2.d) + farg3.d;
1305 #endif
1307 #else
1308 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1309 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1310 #endif
1311 return farg1.ll;
1314 /* fmsub - fmsub. */
1315 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1317 CPU_DoubleU farg1, farg2, farg3;
1319 farg1.ll = arg1;
1320 farg2.ll = arg2;
1321 farg3.ll = arg3;
1322 #if USE_PRECISE_EMULATION
1323 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1324 float64_is_signaling_nan(farg2.d) ||
1325 float64_is_signaling_nan(farg3.d))) {
1326 /* sNaN operation */
1327 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1328 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1329 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1330 /* Multiplication of zero by infinity */
1331 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1332 } else {
1333 #ifdef FLOAT128
1334 /* This is the way the PowerPC specification defines it */
1335 float128 ft0_128, ft1_128;
1337 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1338 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1339 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1340 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1341 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1342 /* Magnitude subtraction of infinities */
1343 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1344 } else {
1345 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1346 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1347 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1349 #else
1350 /* This is OK on x86 hosts */
1351 farg1.d = (farg1.d * farg2.d) - farg3.d;
1352 #endif
1354 #else
1355 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1356 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1357 #endif
1358 return farg1.ll;
1361 /* fnmadd - fnmadd. */
1362 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1364 CPU_DoubleU farg1, farg2, farg3;
1366 farg1.ll = arg1;
1367 farg2.ll = arg2;
1368 farg3.ll = arg3;
1370 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1371 float64_is_signaling_nan(farg2.d) ||
1372 float64_is_signaling_nan(farg3.d))) {
1373 /* sNaN operation */
1374 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1375 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1376 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1377 /* Multiplication of zero by infinity */
1378 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1379 } else {
1380 #if USE_PRECISE_EMULATION
1381 #ifdef FLOAT128
1382 /* This is the way the PowerPC specification defines it */
1383 float128 ft0_128, ft1_128;
1385 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1386 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1387 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1388 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1389 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1390 /* Magnitude subtraction of infinities */
1391 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1392 } else {
1393 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1394 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1395 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1397 #else
1398 /* This is OK on x86 hosts */
1399 farg1.d = (farg1.d * farg2.d) + farg3.d;
1400 #endif
1401 #else
1402 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1403 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1404 #endif
1405 if (likely(!float64_is_nan(farg1.d)))
1406 farg1.d = float64_chs(farg1.d);
1408 return farg1.ll;
1411 /* fnmsub - fnmsub. */
1412 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1414 CPU_DoubleU farg1, farg2, farg3;
1416 farg1.ll = arg1;
1417 farg2.ll = arg2;
1418 farg3.ll = arg3;
1420 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1421 float64_is_signaling_nan(farg2.d) ||
1422 float64_is_signaling_nan(farg3.d))) {
1423 /* sNaN operation */
1424 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1425 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1426 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1427 /* Multiplication of zero by infinity */
1428 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1429 } else {
1430 #if USE_PRECISE_EMULATION
1431 #ifdef FLOAT128
1432 /* This is the way the PowerPC specification defines it */
1433 float128 ft0_128, ft1_128;
1435 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1436 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1437 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1438 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1439 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1440 /* Magnitude subtraction of infinities */
1441 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1442 } else {
1443 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1444 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1445 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1447 #else
1448 /* This is OK on x86 hosts */
1449 farg1.d = (farg1.d * farg2.d) - farg3.d;
1450 #endif
1451 #else
1452 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1453 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1454 #endif
1455 if (likely(!float64_is_nan(farg1.d)))
1456 farg1.d = float64_chs(farg1.d);
1458 return farg1.ll;
1461 /* frsp - frsp. */
1462 uint64_t helper_frsp (uint64_t arg)
1464 CPU_DoubleU farg;
1465 float32 f32;
1466 farg.ll = arg;
1468 #if USE_PRECISE_EMULATION
1469 if (unlikely(float64_is_signaling_nan(farg.d))) {
1470 /* sNaN square root */
1471 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1472 } else {
1473 f32 = float64_to_float32(farg.d, &env->fp_status);
1474 farg.d = float32_to_float64(f32, &env->fp_status);
1476 #else
1477 f32 = float64_to_float32(farg.d, &env->fp_status);
1478 farg.d = float32_to_float64(f32, &env->fp_status);
1479 #endif
1480 return farg.ll;
1483 /* fsqrt - fsqrt. */
1484 uint64_t helper_fsqrt (uint64_t arg)
1486 CPU_DoubleU farg;
1487 farg.ll = arg;
1489 if (unlikely(float64_is_signaling_nan(farg.d))) {
1490 /* sNaN square root */
1491 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1492 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1493 /* Square root of a negative nonzero number */
1494 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1495 } else {
1496 farg.d = float64_sqrt(farg.d, &env->fp_status);
1498 return farg.ll;
1501 /* fre - fre. */
1502 uint64_t helper_fre (uint64_t arg)
1504 CPU_DoubleU farg;
1505 farg.ll = arg;
1507 if (unlikely(float64_is_signaling_nan(farg.d))) {
1508 /* sNaN reciprocal */
1509 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1510 } else {
1511 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1513 return farg.d;
1516 /* fres - fres. */
1517 uint64_t helper_fres (uint64_t arg)
1519 CPU_DoubleU farg;
1520 float32 f32;
1521 farg.ll = arg;
1523 if (unlikely(float64_is_signaling_nan(farg.d))) {
1524 /* sNaN reciprocal */
1525 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1526 } else {
1527 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1528 f32 = float64_to_float32(farg.d, &env->fp_status);
1529 farg.d = float32_to_float64(f32, &env->fp_status);
1531 return farg.ll;
1534 /* frsqrte - frsqrte. */
1535 uint64_t helper_frsqrte (uint64_t arg)
1537 CPU_DoubleU farg;
1538 float32 f32;
1539 farg.ll = arg;
1541 if (unlikely(float64_is_signaling_nan(farg.d))) {
1542 /* sNaN reciprocal square root */
1543 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1544 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1545 /* Reciprocal square root of a negative nonzero number */
1546 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1547 } else {
1548 farg.d = float64_sqrt(farg.d, &env->fp_status);
1549 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1550 f32 = float64_to_float32(farg.d, &env->fp_status);
1551 farg.d = float32_to_float64(f32, &env->fp_status);
1553 return farg.ll;
1556 /* fsel - fsel. */
1557 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1559 CPU_DoubleU farg1;
1561 farg1.ll = arg1;
1563 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1564 return arg2;
1565 else
1566 return arg3;
1569 void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1571 CPU_DoubleU farg1, farg2;
1572 uint32_t ret = 0;
1573 farg1.ll = arg1;
1574 farg2.ll = arg2;
1576 if (unlikely(float64_is_nan(farg1.d) ||
1577 float64_is_nan(farg2.d))) {
1578 ret = 0x01UL;
1579 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1580 ret = 0x08UL;
1581 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1582 ret = 0x04UL;
1583 } else {
1584 ret = 0x02UL;
1587 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1588 env->fpscr |= ret << FPSCR_FPRF;
1589 env->crf[crfD] = ret;
1590 if (unlikely(ret == 0x01UL
1591 && (float64_is_signaling_nan(farg1.d) ||
1592 float64_is_signaling_nan(farg2.d)))) {
1593 /* sNaN comparison */
1594 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1598 void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1600 CPU_DoubleU farg1, farg2;
1601 uint32_t ret = 0;
1602 farg1.ll = arg1;
1603 farg2.ll = arg2;
1605 if (unlikely(float64_is_nan(farg1.d) ||
1606 float64_is_nan(farg2.d))) {
1607 ret = 0x01UL;
1608 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1609 ret = 0x08UL;
1610 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1611 ret = 0x04UL;
1612 } else {
1613 ret = 0x02UL;
1616 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1617 env->fpscr |= ret << FPSCR_FPRF;
1618 env->crf[crfD] = ret;
1619 if (unlikely (ret == 0x01UL)) {
1620 if (float64_is_signaling_nan(farg1.d) ||
1621 float64_is_signaling_nan(farg2.d)) {
1622 /* sNaN comparison */
1623 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1624 POWERPC_EXCP_FP_VXVC);
1625 } else {
1626 /* qNaN comparison */
1627 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1632 #if !defined (CONFIG_USER_ONLY)
1633 void helper_store_msr (target_ulong val)
1635 val = hreg_store_msr(env, val, 0);
1636 if (val != 0) {
1637 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1638 helper_raise_exception(val);
1642 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1643 target_ulong msrm, int keep_msrh)
1645 #if defined(TARGET_PPC64)
1646 if (msr & (1ULL << MSR_SF)) {
1647 nip = (uint64_t)nip;
1648 msr &= (uint64_t)msrm;
1649 } else {
1650 nip = (uint32_t)nip;
1651 msr = (uint32_t)(msr & msrm);
1652 if (keep_msrh)
1653 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1655 #else
1656 nip = (uint32_t)nip;
1657 msr &= (uint32_t)msrm;
1658 #endif
1659 /* XXX: beware: this is false if VLE is supported */
1660 env->nip = nip & ~((target_ulong)0x00000003);
1661 hreg_store_msr(env, msr, 1);
1662 #if defined (DEBUG_OP)
1663 cpu_dump_rfi(env->nip, env->msr);
1664 #endif
1665 /* No need to raise an exception here,
1666 * as rfi is always the last insn of a TB
1668 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1671 void helper_rfi (void)
1673 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1674 ~((target_ulong)0xFFFF0000), 1);
1677 #if defined(TARGET_PPC64)
1678 void helper_rfid (void)
1680 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1681 ~((target_ulong)0xFFFF0000), 0);
1684 void helper_hrfid (void)
1686 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1687 ~((target_ulong)0xFFFF0000), 0);
1689 #endif
1690 #endif
1692 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1694 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1695 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1696 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1697 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1698 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1699 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1703 #if defined(TARGET_PPC64)
1704 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1706 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1707 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1708 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1709 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1710 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1711 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1713 #endif
1715 /*****************************************************************************/
1716 /* PowerPC 601 specific instructions (POWER bridge) */
1718 target_ulong helper_clcs (uint32_t arg)
1720 switch (arg) {
1721 case 0x0CUL:
1722 /* Instruction cache line size */
1723 return env->icache_line_size;
1724 break;
1725 case 0x0DUL:
1726 /* Data cache line size */
1727 return env->dcache_line_size;
1728 break;
1729 case 0x0EUL:
1730 /* Minimum cache line size */
1731 return (env->icache_line_size < env->dcache_line_size) ?
1732 env->icache_line_size : env->dcache_line_size;
1733 break;
1734 case 0x0FUL:
1735 /* Maximum cache line size */
1736 return (env->icache_line_size > env->dcache_line_size) ?
1737 env->icache_line_size : env->dcache_line_size;
1738 break;
1739 default:
1740 /* Undefined */
1741 return 0;
1742 break;
1746 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1748 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1750 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1751 (int32_t)arg2 == 0) {
1752 env->spr[SPR_MQ] = 0;
1753 return INT32_MIN;
1754 } else {
1755 env->spr[SPR_MQ] = tmp % arg2;
1756 return tmp / (int32_t)arg2;
1760 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1762 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1764 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1765 (int32_t)arg2 == 0) {
1766 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1767 env->spr[SPR_MQ] = 0;
1768 return INT32_MIN;
1769 } else {
1770 env->spr[SPR_MQ] = tmp % arg2;
1771 tmp /= (int32_t)arg2;
1772 if ((int32_t)tmp != tmp) {
1773 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1774 } else {
1775 env->xer &= ~(1 << XER_OV);
1777 return tmp;
1781 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1783 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1784 (int32_t)arg2 == 0) {
1785 env->spr[SPR_MQ] = 0;
1786 return INT32_MIN;
1787 } else {
1788 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1789 return (int32_t)arg1 / (int32_t)arg2;
1793 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1795 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1796 (int32_t)arg2 == 0) {
1797 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1798 env->spr[SPR_MQ] = 0;
1799 return INT32_MIN;
1800 } else {
1801 env->xer &= ~(1 << XER_OV);
1802 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1803 return (int32_t)arg1 / (int32_t)arg2;
1807 #if !defined (CONFIG_USER_ONLY)
1808 target_ulong helper_rac (target_ulong addr)
1810 mmu_ctx_t ctx;
1811 int nb_BATs;
1812 target_ulong ret = 0;
1814 /* We don't have to generate many instances of this instruction,
1815 * as rac is supervisor only.
1817 /* XXX: FIX THIS: Pretend we have no BAT */
1818 nb_BATs = env->nb_BATs;
1819 env->nb_BATs = 0;
1820 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1821 ret = ctx.raddr;
1822 env->nb_BATs = nb_BATs;
1823 return ret;
1826 void helper_rfsvc (void)
1828 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1830 #endif
1832 /*****************************************************************************/
1833 /* 602 specific instructions */
1834 /* mfrom is the most crazy instruction ever seen, imho ! */
1835 /* Real implementation uses a ROM table. Do the same */
1836 /* Extremly decomposed:
1837 * -arg / 256
1838 * return 256 * log10(10 + 1.0) + 0.5
1840 #if !defined (CONFIG_USER_ONLY)
1841 target_ulong helper_602_mfrom (target_ulong arg)
1843 if (likely(arg < 602)) {
1844 #include "mfrom_table.c"
1845 return mfrom_ROM_table[arg];
1846 } else {
1847 return 0;
1850 #endif
1852 /*****************************************************************************/
1853 /* Embedded PowerPC specific helpers */
1855 /* XXX: to be improved to check access rights when in user-mode */
1856 target_ulong helper_load_dcr (target_ulong dcrn)
1858 target_ulong val = 0;
1860 if (unlikely(env->dcr_env == NULL)) {
1861 qemu_log("No DCR environment\n");
1862 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1863 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1864 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1865 qemu_log("DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1866 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1867 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1869 return val;
1872 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1874 if (unlikely(env->dcr_env == NULL)) {
1875 qemu_log("No DCR environment\n");
1876 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1877 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1878 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1879 qemu_log("DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1880 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1881 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1885 #if !defined(CONFIG_USER_ONLY)
1886 void helper_40x_rfci (void)
1888 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1889 ~((target_ulong)0xFFFF0000), 0);
1892 void helper_rfci (void)
1894 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1895 ~((target_ulong)0x3FFF0000), 0);
1898 void helper_rfdi (void)
1900 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1901 ~((target_ulong)0x3FFF0000), 0);
1904 void helper_rfmci (void)
1906 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1907 ~((target_ulong)0x3FFF0000), 0);
1909 #endif
1911 /* 440 specific */
1912 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1914 target_ulong mask;
1915 int i;
1917 i = 1;
1918 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1919 if ((high & mask) == 0) {
1920 if (update_Rc) {
1921 env->crf[0] = 0x4;
1923 goto done;
1925 i++;
1927 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1928 if ((low & mask) == 0) {
1929 if (update_Rc) {
1930 env->crf[0] = 0x8;
1932 goto done;
1934 i++;
1936 if (update_Rc) {
1937 env->crf[0] = 0x2;
1939 done:
1940 env->xer = (env->xer & ~0x7F) | i;
1941 if (update_Rc) {
1942 env->crf[0] |= xer_so;
1944 return i;
1947 /*****************************************************************************/
1948 /* Altivec extension helpers */
1949 #if defined(WORDS_BIGENDIAN)
1950 #define HI_IDX 0
1951 #define LO_IDX 1
1952 #else
1953 #define HI_IDX 1
1954 #define LO_IDX 0
1955 #endif
1957 #if defined(WORDS_BIGENDIAN)
1958 #define VECTOR_FOR_INORDER_I(index, element) \
1959 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1960 #else
1961 #define VECTOR_FOR_INORDER_I(index, element) \
1962 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1963 #endif
1965 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1966 * execute the following block. */
1967 #define DO_HANDLE_NAN(result, x) \
1968 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1969 CPU_FloatU __f; \
1970 __f.f = x; \
1971 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1972 result = __f.f; \
1973 } else
1975 #define HANDLE_NAN1(result, x) \
1976 DO_HANDLE_NAN(result, x)
1977 #define HANDLE_NAN2(result, x, y) \
1978 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1979 #define HANDLE_NAN3(result, x, y, z) \
1980 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1982 /* Saturating arithmetic helpers. */
1983 #define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1984 static always_inline to_type cvt##from##to (from_type x, int *sat) \
1986 to_type r; \
1987 if (use_min && x < min) { \
1988 r = min; \
1989 *sat = 1; \
1990 } else if (use_max && x > max) { \
1991 r = max; \
1992 *sat = 1; \
1993 } else { \
1994 r = x; \
1996 return r; \
1998 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1999 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
2000 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
2001 SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
2002 SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
2003 SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
2004 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
2005 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
2006 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
2007 #undef SATCVT
2009 #define LVE(name, access, swap, element) \
2010 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2012 size_t n_elems = ARRAY_SIZE(r->element); \
2013 int adjust = HI_IDX*(n_elems-1); \
2014 int sh = sizeof(r->element[0]) >> 1; \
2015 int index = (addr & 0xf) >> sh; \
2016 if(msr_le) { \
2017 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2018 } else { \
2019 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2022 #define I(x) (x)
2023 LVE(lvebx, ldub, I, u8)
2024 LVE(lvehx, lduw, bswap16, u16)
2025 LVE(lvewx, ldl, bswap32, u32)
2026 #undef I
2027 #undef LVE
2029 void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2031 int i, j = (sh & 0xf);
2033 VECTOR_FOR_INORDER_I (i, u8) {
2034 r->u8[i] = j++;
2038 void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2040 int i, j = 0x10 - (sh & 0xf);
2042 VECTOR_FOR_INORDER_I (i, u8) {
2043 r->u8[i] = j++;
2047 #define STVE(name, access, swap, element) \
2048 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2050 size_t n_elems = ARRAY_SIZE(r->element); \
2051 int adjust = HI_IDX*(n_elems-1); \
2052 int sh = sizeof(r->element[0]) >> 1; \
2053 int index = (addr & 0xf) >> sh; \
2054 if(msr_le) { \
2055 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2056 } else { \
2057 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2060 #define I(x) (x)
2061 STVE(stvebx, stb, I, u8)
2062 STVE(stvehx, stw, bswap16, u16)
2063 STVE(stvewx, stl, bswap32, u32)
2064 #undef I
2065 #undef LVE
2067 void helper_mtvscr (ppc_avr_t *r)
2069 #if defined(WORDS_BIGENDIAN)
2070 env->vscr = r->u32[3];
2071 #else
2072 env->vscr = r->u32[0];
2073 #endif
2074 set_flush_to_zero(vscr_nj, &env->vec_status);
2077 void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2079 int i;
2080 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2081 r->u32[i] = ~a->u32[i] < b->u32[i];
2085 #define VARITH_DO(name, op, element) \
2086 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2088 int i; \
2089 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2090 r->element[i] = a->element[i] op b->element[i]; \
2093 #define VARITH(suffix, element) \
2094 VARITH_DO(add##suffix, +, element) \
2095 VARITH_DO(sub##suffix, -, element)
2096 VARITH(ubm, u8)
2097 VARITH(uhm, u16)
2098 VARITH(uwm, u32)
2099 #undef VARITH_DO
2100 #undef VARITH
2102 #define VARITHFP(suffix, func) \
2103 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2105 int i; \
2106 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2107 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2108 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2112 VARITHFP(addfp, float32_add)
2113 VARITHFP(subfp, float32_sub)
2114 #undef VARITHFP
2116 #define VARITHSAT_CASE(type, op, cvt, element) \
2118 type result = (type)a->element[i] op (type)b->element[i]; \
2119 r->element[i] = cvt(result, &sat); \
2122 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2123 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2125 int sat = 0; \
2126 int i; \
2127 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2128 switch (sizeof(r->element[0])) { \
2129 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2130 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2131 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2134 if (sat) { \
2135 env->vscr |= (1 << VSCR_SAT); \
2138 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2139 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2140 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2141 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2142 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2143 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2144 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2145 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2146 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2147 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2148 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2149 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2150 #undef VARITHSAT_CASE
2151 #undef VARITHSAT_DO
2152 #undef VARITHSAT_SIGNED
2153 #undef VARITHSAT_UNSIGNED
2155 #define VAVG_DO(name, element, etype) \
2156 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2158 int i; \
2159 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2160 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2161 r->element[i] = x >> 1; \
2165 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2166 VAVG_DO(avgs##type, signed_element, signed_type) \
2167 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2168 VAVG(b, s8, int16_t, u8, uint16_t)
2169 VAVG(h, s16, int32_t, u16, uint32_t)
2170 VAVG(w, s32, int64_t, u32, uint64_t)
2171 #undef VAVG_DO
2172 #undef VAVG
2174 #define VCF(suffix, cvt, element) \
2175 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2177 int i; \
2178 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2179 float32 t = cvt(b->element[i], &env->vec_status); \
2180 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2183 VCF(ux, uint32_to_float32, u32)
2184 VCF(sx, int32_to_float32, s32)
2185 #undef VCF
2187 #define VCMP_DO(suffix, compare, element, record) \
2188 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2190 uint32_t ones = (uint32_t)-1; \
2191 uint32_t all = ones; \
2192 uint32_t none = 0; \
2193 int i; \
2194 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2195 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2196 switch (sizeof (a->element[0])) { \
2197 case 4: r->u32[i] = result; break; \
2198 case 2: r->u16[i] = result; break; \
2199 case 1: r->u8[i] = result; break; \
2201 all &= result; \
2202 none |= result; \
2204 if (record) { \
2205 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2208 #define VCMP(suffix, compare, element) \
2209 VCMP_DO(suffix, compare, element, 0) \
2210 VCMP_DO(suffix##_dot, compare, element, 1)
2211 VCMP(equb, ==, u8)
2212 VCMP(equh, ==, u16)
2213 VCMP(equw, ==, u32)
2214 VCMP(gtub, >, u8)
2215 VCMP(gtuh, >, u16)
2216 VCMP(gtuw, >, u32)
2217 VCMP(gtsb, >, s8)
2218 VCMP(gtsh, >, s16)
2219 VCMP(gtsw, >, s32)
2220 #undef VCMP_DO
2221 #undef VCMP
2223 #define VCMPFP_DO(suffix, compare, order, record) \
2224 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2226 uint32_t ones = (uint32_t)-1; \
2227 uint32_t all = ones; \
2228 uint32_t none = 0; \
2229 int i; \
2230 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2231 uint32_t result; \
2232 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2233 if (rel == float_relation_unordered) { \
2234 result = 0; \
2235 } else if (rel compare order) { \
2236 result = ones; \
2237 } else { \
2238 result = 0; \
2240 r->u32[i] = result; \
2241 all &= result; \
2242 none |= result; \
2244 if (record) { \
2245 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2248 #define VCMPFP(suffix, compare, order) \
2249 VCMPFP_DO(suffix, compare, order, 0) \
2250 VCMPFP_DO(suffix##_dot, compare, order, 1)
2251 VCMPFP(eqfp, ==, float_relation_equal)
2252 VCMPFP(gefp, !=, float_relation_less)
2253 VCMPFP(gtfp, ==, float_relation_greater)
2254 #undef VCMPFP_DO
2255 #undef VCMPFP
2257 static always_inline void vcmpbfp_internal (ppc_avr_t *r, ppc_avr_t *a,
2258 ppc_avr_t *b, int record)
2260 int i;
2261 int all_in = 0;
2262 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2263 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2264 if (le_rel == float_relation_unordered) {
2265 r->u32[i] = 0xc0000000;
2266 /* ALL_IN does not need to be updated here. */
2267 } else {
2268 float32 bneg = float32_chs(b->f[i]);
2269 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2270 int le = le_rel != float_relation_greater;
2271 int ge = ge_rel != float_relation_less;
2272 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2273 all_in |= (!le | !ge);
2276 if (record) {
2277 env->crf[6] = (all_in == 0) << 1;
2281 void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2283 vcmpbfp_internal(r, a, b, 0);
2286 void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2288 vcmpbfp_internal(r, a, b, 1);
2291 void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2293 int i;
2294 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2295 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2296 /* Need to do the computation in higher precision and round
2297 * once at the end. */
2298 float64 af, bf, cf, t;
2299 af = float32_to_float64(a->f[i], &env->vec_status);
2300 bf = float32_to_float64(b->f[i], &env->vec_status);
2301 cf = float32_to_float64(c->f[i], &env->vec_status);
2302 t = float64_mul(af, cf, &env->vec_status);
2303 t = float64_add(t, bf, &env->vec_status);
2304 r->f[i] = float64_to_float32(t, &env->vec_status);
2309 void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2311 int sat = 0;
2312 int i;
2314 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2315 int32_t prod = a->s16[i] * b->s16[i];
2316 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2317 r->s16[i] = cvtswsh (t, &sat);
2320 if (sat) {
2321 env->vscr |= (1 << VSCR_SAT);
2325 void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2327 int sat = 0;
2328 int i;
2330 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2331 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2332 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2333 r->s16[i] = cvtswsh (t, &sat);
2336 if (sat) {
2337 env->vscr |= (1 << VSCR_SAT);
2341 #define VMINMAX_DO(name, compare, element) \
2342 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2344 int i; \
2345 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2346 if (a->element[i] compare b->element[i]) { \
2347 r->element[i] = b->element[i]; \
2348 } else { \
2349 r->element[i] = a->element[i]; \
2353 #define VMINMAX(suffix, element) \
2354 VMINMAX_DO(min##suffix, >, element) \
2355 VMINMAX_DO(max##suffix, <, element)
2356 VMINMAX(sb, s8)
2357 VMINMAX(sh, s16)
2358 VMINMAX(sw, s32)
2359 VMINMAX(ub, u8)
2360 VMINMAX(uh, u16)
2361 VMINMAX(uw, u32)
2362 #undef VMINMAX_DO
2363 #undef VMINMAX
2365 #define VMINMAXFP(suffix, rT, rF) \
2366 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2368 int i; \
2369 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2370 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2371 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2372 r->f[i] = rT->f[i]; \
2373 } else { \
2374 r->f[i] = rF->f[i]; \
2379 VMINMAXFP(minfp, a, b)
2380 VMINMAXFP(maxfp, b, a)
2381 #undef VMINMAXFP
2383 void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2385 int i;
2386 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2387 int32_t prod = a->s16[i] * b->s16[i];
2388 r->s16[i] = (int16_t) (prod + c->s16[i]);
2392 #define VMRG_DO(name, element, highp) \
2393 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2395 ppc_avr_t result; \
2396 int i; \
2397 size_t n_elems = ARRAY_SIZE(r->element); \
2398 for (i = 0; i < n_elems/2; i++) { \
2399 if (highp) { \
2400 result.element[i*2+HI_IDX] = a->element[i]; \
2401 result.element[i*2+LO_IDX] = b->element[i]; \
2402 } else { \
2403 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2404 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2407 *r = result; \
2409 #if defined(WORDS_BIGENDIAN)
2410 #define MRGHI 0
2411 #define MRGLO 1
2412 #else
2413 #define MRGHI 1
2414 #define MRGLO 0
2415 #endif
2416 #define VMRG(suffix, element) \
2417 VMRG_DO(mrgl##suffix, element, MRGHI) \
2418 VMRG_DO(mrgh##suffix, element, MRGLO)
2419 VMRG(b, u8)
2420 VMRG(h, u16)
2421 VMRG(w, u32)
2422 #undef VMRG_DO
2423 #undef VMRG
2424 #undef MRGHI
2425 #undef MRGLO
2427 void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2429 int32_t prod[16];
2430 int i;
2432 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2433 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2436 VECTOR_FOR_INORDER_I(i, s32) {
2437 r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2441 void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2443 int32_t prod[8];
2444 int i;
2446 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2447 prod[i] = a->s16[i] * b->s16[i];
2450 VECTOR_FOR_INORDER_I(i, s32) {
2451 r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2455 void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2457 int32_t prod[8];
2458 int i;
2459 int sat = 0;
2461 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2462 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2465 VECTOR_FOR_INORDER_I (i, s32) {
2466 int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2467 r->u32[i] = cvtsdsw(t, &sat);
2470 if (sat) {
2471 env->vscr |= (1 << VSCR_SAT);
2475 void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2477 uint16_t prod[16];
2478 int i;
2480 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2481 prod[i] = a->u8[i] * b->u8[i];
2484 VECTOR_FOR_INORDER_I(i, u32) {
2485 r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2489 void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2491 uint32_t prod[8];
2492 int i;
2494 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2495 prod[i] = a->u16[i] * b->u16[i];
2498 VECTOR_FOR_INORDER_I(i, u32) {
2499 r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2503 void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2505 uint32_t prod[8];
2506 int i;
2507 int sat = 0;
2509 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2510 prod[i] = a->u16[i] * b->u16[i];
2513 VECTOR_FOR_INORDER_I (i, s32) {
2514 uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2515 r->u32[i] = cvtuduw(t, &sat);
2518 if (sat) {
2519 env->vscr |= (1 << VSCR_SAT);
2523 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2524 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2526 int i; \
2527 VECTOR_FOR_INORDER_I(i, prod_element) { \
2528 if (evenp) { \
2529 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2530 } else { \
2531 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2535 #define VMUL(suffix, mul_element, prod_element) \
2536 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2537 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2538 VMUL(sb, s8, s16)
2539 VMUL(sh, s16, s32)
2540 VMUL(ub, u8, u16)
2541 VMUL(uh, u16, u32)
2542 #undef VMUL_DO
2543 #undef VMUL
2545 void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2547 int i;
2548 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2549 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2550 /* Need to do the computation is higher precision and round
2551 * once at the end. */
2552 float64 af, bf, cf, t;
2553 af = float32_to_float64(a->f[i], &env->vec_status);
2554 bf = float32_to_float64(b->f[i], &env->vec_status);
2555 cf = float32_to_float64(c->f[i], &env->vec_status);
2556 t = float64_mul(af, cf, &env->vec_status);
2557 t = float64_sub(t, bf, &env->vec_status);
2558 t = float64_chs(t);
2559 r->f[i] = float64_to_float32(t, &env->vec_status);
2564 void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2566 ppc_avr_t result;
2567 int i;
2568 VECTOR_FOR_INORDER_I (i, u8) {
2569 int s = c->u8[i] & 0x1f;
2570 #if defined(WORDS_BIGENDIAN)
2571 int index = s & 0xf;
2572 #else
2573 int index = 15 - (s & 0xf);
2574 #endif
2575 if (s & 0x10) {
2576 result.u8[i] = b->u8[index];
2577 } else {
2578 result.u8[i] = a->u8[index];
2581 *r = result;
2584 #if defined(WORDS_BIGENDIAN)
2585 #define PKBIG 1
2586 #else
2587 #define PKBIG 0
2588 #endif
2589 void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2591 int i, j;
2592 ppc_avr_t result;
2593 #if defined(WORDS_BIGENDIAN)
2594 const ppc_avr_t *x[2] = { a, b };
2595 #else
2596 const ppc_avr_t *x[2] = { b, a };
2597 #endif
2599 VECTOR_FOR_INORDER_I (i, u64) {
2600 VECTOR_FOR_INORDER_I (j, u32){
2601 uint32_t e = x[i]->u32[j];
2602 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2603 ((e >> 6) & 0x3e0) |
2604 ((e >> 3) & 0x1f));
2607 *r = result;
2610 #define VPK(suffix, from, to, cvt, dosat) \
2611 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2613 int i; \
2614 int sat = 0; \
2615 ppc_avr_t result; \
2616 ppc_avr_t *a0 = PKBIG ? a : b; \
2617 ppc_avr_t *a1 = PKBIG ? b : a; \
2618 VECTOR_FOR_INORDER_I (i, from) { \
2619 result.to[i] = cvt(a0->from[i], &sat); \
2620 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2622 *r = result; \
2623 if (dosat && sat) { \
2624 env->vscr |= (1 << VSCR_SAT); \
2627 #define I(x, y) (x)
2628 VPK(shss, s16, s8, cvtshsb, 1)
2629 VPK(shus, s16, u8, cvtshub, 1)
2630 VPK(swss, s32, s16, cvtswsh, 1)
2631 VPK(swus, s32, u16, cvtswuh, 1)
2632 VPK(uhus, u16, u8, cvtuhub, 1)
2633 VPK(uwus, u32, u16, cvtuwuh, 1)
2634 VPK(uhum, u16, u8, I, 0)
2635 VPK(uwum, u32, u16, I, 0)
2636 #undef I
2637 #undef VPK
2638 #undef PKBIG
2640 #define VRFI(suffix, rounding) \
2641 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2643 int i; \
2644 float_status s = env->vec_status; \
2645 set_float_rounding_mode(rounding, &s); \
2646 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2647 HANDLE_NAN1(r->f[i], b->f[i]) { \
2648 r->f[i] = float32_round_to_int (b->f[i], &s); \
2652 VRFI(n, float_round_nearest_even)
2653 VRFI(m, float_round_down)
2654 VRFI(p, float_round_up)
2655 VRFI(z, float_round_to_zero)
2656 #undef VRFI
2658 #define VROTATE(suffix, element) \
2659 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2661 int i; \
2662 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2663 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2664 unsigned int shift = b->element[i] & mask; \
2665 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2668 VROTATE(b, u8)
2669 VROTATE(h, u16)
2670 VROTATE(w, u32)
2671 #undef VROTATE
2673 void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2675 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2676 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2679 void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2681 int i;
2682 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2683 HANDLE_NAN1(r->f[i], b->f[i]) {
2684 r->f[i] = float32_log2(b->f[i], &env->vec_status);
2689 #if defined(WORDS_BIGENDIAN)
2690 #define LEFT 0
2691 #define RIGHT 1
2692 #else
2693 #define LEFT 1
2694 #define RIGHT 0
2695 #endif
2696 /* The specification says that the results are undefined if all of the
2697 * shift counts are not identical. We check to make sure that they are
2698 * to conform to what real hardware appears to do. */
2699 #define VSHIFT(suffix, leftp) \
2700 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2702 int shift = b->u8[LO_IDX*0x15] & 0x7; \
2703 int doit = 1; \
2704 int i; \
2705 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2706 doit = doit && ((b->u8[i] & 0x7) == shift); \
2708 if (doit) { \
2709 if (shift == 0) { \
2710 *r = *a; \
2711 } else if (leftp) { \
2712 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2713 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2714 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2715 } else { \
2716 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2717 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2718 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2722 VSHIFT(l, LEFT)
2723 VSHIFT(r, RIGHT)
2724 #undef VSHIFT
2725 #undef LEFT
2726 #undef RIGHT
2728 #define VSL(suffix, element) \
2729 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2731 int i; \
2732 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2733 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2734 unsigned int shift = b->element[i] & mask; \
2735 r->element[i] = a->element[i] << shift; \
2738 VSL(b, u8)
2739 VSL(h, u16)
2740 VSL(w, u32)
2741 #undef VSL
2743 void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2745 int sh = shift & 0xf;
2746 int i;
2747 ppc_avr_t result;
2749 #if defined(WORDS_BIGENDIAN)
2750 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2751 int index = sh + i;
2752 if (index > 0xf) {
2753 result.u8[i] = b->u8[index-0x10];
2754 } else {
2755 result.u8[i] = a->u8[index];
2758 #else
2759 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2760 int index = (16 - sh) + i;
2761 if (index > 0xf) {
2762 result.u8[i] = a->u8[index-0x10];
2763 } else {
2764 result.u8[i] = b->u8[index];
2767 #endif
2768 *r = result;
2771 void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2773 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2775 #if defined (WORDS_BIGENDIAN)
2776 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2777 memset (&r->u8[16-sh], 0, sh);
2778 #else
2779 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2780 memset (&r->u8[0], 0, sh);
2781 #endif
2784 /* Experimental testing shows that hardware masks the immediate. */
2785 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2786 #if defined(WORDS_BIGENDIAN)
2787 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2788 #else
2789 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2790 #endif
2791 #define VSPLT(suffix, element) \
2792 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2794 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2795 int i; \
2796 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2797 r->element[i] = s; \
2800 VSPLT(b, u8)
2801 VSPLT(h, u16)
2802 VSPLT(w, u32)
2803 #undef VSPLT
2804 #undef SPLAT_ELEMENT
2805 #undef _SPLAT_MASKED
2807 #define VSPLTI(suffix, element, splat_type) \
2808 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2810 splat_type x = (int8_t)(splat << 3) >> 3; \
2811 int i; \
2812 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2813 r->element[i] = x; \
2816 VSPLTI(b, s8, int8_t)
2817 VSPLTI(h, s16, int16_t)
2818 VSPLTI(w, s32, int32_t)
2819 #undef VSPLTI
2821 #define VSR(suffix, element) \
2822 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2824 int i; \
2825 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2826 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2827 unsigned int shift = b->element[i] & mask; \
2828 r->element[i] = a->element[i] >> shift; \
2831 VSR(ab, s8)
2832 VSR(ah, s16)
2833 VSR(aw, s32)
2834 VSR(b, u8)
2835 VSR(h, u16)
2836 VSR(w, u32)
2837 #undef VSR
2839 void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2841 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2843 #if defined (WORDS_BIGENDIAN)
2844 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2845 memset (&r->u8[0], 0, sh);
2846 #else
2847 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2848 memset (&r->u8[16-sh], 0, sh);
2849 #endif
2852 void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2854 int i;
2855 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2856 r->u32[i] = a->u32[i] >= b->u32[i];
2860 void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2862 int64_t t;
2863 int i, upper;
2864 ppc_avr_t result;
2865 int sat = 0;
2867 #if defined(WORDS_BIGENDIAN)
2868 upper = ARRAY_SIZE(r->s32)-1;
2869 #else
2870 upper = 0;
2871 #endif
2872 t = (int64_t)b->s32[upper];
2873 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2874 t += a->s32[i];
2875 result.s32[i] = 0;
2877 result.s32[upper] = cvtsdsw(t, &sat);
2878 *r = result;
2880 if (sat) {
2881 env->vscr |= (1 << VSCR_SAT);
2885 void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2887 int i, j, upper;
2888 ppc_avr_t result;
2889 int sat = 0;
2891 #if defined(WORDS_BIGENDIAN)
2892 upper = 1;
2893 #else
2894 upper = 0;
2895 #endif
2896 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2897 int64_t t = (int64_t)b->s32[upper+i*2];
2898 result.u64[i] = 0;
2899 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2900 t += a->s32[2*i+j];
2902 result.s32[upper+i*2] = cvtsdsw(t, &sat);
2905 *r = result;
2906 if (sat) {
2907 env->vscr |= (1 << VSCR_SAT);
2911 void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2913 int i, j;
2914 int sat = 0;
2916 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2917 int64_t t = (int64_t)b->s32[i];
2918 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2919 t += a->s8[4*i+j];
2921 r->s32[i] = cvtsdsw(t, &sat);
2924 if (sat) {
2925 env->vscr |= (1 << VSCR_SAT);
2929 void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2931 int sat = 0;
2932 int i;
2934 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2935 int64_t t = (int64_t)b->s32[i];
2936 t += a->s16[2*i] + a->s16[2*i+1];
2937 r->s32[i] = cvtsdsw(t, &sat);
2940 if (sat) {
2941 env->vscr |= (1 << VSCR_SAT);
2945 void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2947 int i, j;
2948 int sat = 0;
2950 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2951 uint64_t t = (uint64_t)b->u32[i];
2952 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2953 t += a->u8[4*i+j];
2955 r->u32[i] = cvtuduw(t, &sat);
2958 if (sat) {
2959 env->vscr |= (1 << VSCR_SAT);
2963 #if defined(WORDS_BIGENDIAN)
2964 #define UPKHI 1
2965 #define UPKLO 0
2966 #else
2967 #define UPKHI 0
2968 #define UPKLO 1
2969 #endif
2970 #define VUPKPX(suffix, hi) \
2971 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2973 int i; \
2974 ppc_avr_t result; \
2975 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
2976 uint16_t e = b->u16[hi ? i : i+4]; \
2977 uint8_t a = (e >> 15) ? 0xff : 0; \
2978 uint8_t r = (e >> 10) & 0x1f; \
2979 uint8_t g = (e >> 5) & 0x1f; \
2980 uint8_t b = e & 0x1f; \
2981 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
2983 *r = result; \
2985 VUPKPX(lpx, UPKLO)
2986 VUPKPX(hpx, UPKHI)
2987 #undef VUPKPX
2989 #define VUPK(suffix, unpacked, packee, hi) \
2990 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2992 int i; \
2993 ppc_avr_t result; \
2994 if (hi) { \
2995 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
2996 result.unpacked[i] = b->packee[i]; \
2998 } else { \
2999 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3000 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3003 *r = result; \
3005 VUPK(hsb, s16, s8, UPKHI)
3006 VUPK(hsh, s32, s16, UPKHI)
3007 VUPK(lsb, s16, s8, UPKLO)
3008 VUPK(lsh, s32, s16, UPKLO)
3009 #undef VUPK
3010 #undef UPKHI
3011 #undef UPKLO
3013 #undef DO_HANDLE_NAN
3014 #undef HANDLE_NAN1
3015 #undef HANDLE_NAN2
3016 #undef HANDLE_NAN3
3017 #undef VECTOR_FOR_INORDER_I
3018 #undef HI_IDX
3019 #undef LO_IDX
3021 /*****************************************************************************/
3022 /* SPE extension helpers */
3023 /* Use a table to make this quicker */
3024 static uint8_t hbrev[16] = {
3025 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3026 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3029 static always_inline uint8_t byte_reverse (uint8_t val)
3031 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3034 static always_inline uint32_t word_reverse (uint32_t val)
3036 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3037 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3040 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3041 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3043 uint32_t a, b, d, mask;
3045 mask = UINT32_MAX >> (32 - MASKBITS);
3046 a = arg1 & mask;
3047 b = arg2 & mask;
3048 d = word_reverse(1 + word_reverse(a | ~b));
3049 return (arg1 & ~mask) | (d & b);
3052 uint32_t helper_cntlsw32 (uint32_t val)
3054 if (val & 0x80000000)
3055 return clz32(~val);
3056 else
3057 return clz32(val);
3060 uint32_t helper_cntlzw32 (uint32_t val)
3062 return clz32(val);
3065 /* Single-precision floating-point conversions */
3066 static always_inline uint32_t efscfsi (uint32_t val)
3068 CPU_FloatU u;
3070 u.f = int32_to_float32(val, &env->vec_status);
3072 return u.l;
3075 static always_inline uint32_t efscfui (uint32_t val)
3077 CPU_FloatU u;
3079 u.f = uint32_to_float32(val, &env->vec_status);
3081 return u.l;
3084 static always_inline int32_t efsctsi (uint32_t val)
3086 CPU_FloatU u;
3088 u.l = val;
3089 /* NaN are not treated the same way IEEE 754 does */
3090 if (unlikely(float32_is_nan(u.f)))
3091 return 0;
3093 return float32_to_int32(u.f, &env->vec_status);
3096 static always_inline uint32_t efsctui (uint32_t val)
3098 CPU_FloatU u;
3100 u.l = val;
3101 /* NaN are not treated the same way IEEE 754 does */
3102 if (unlikely(float32_is_nan(u.f)))
3103 return 0;
3105 return float32_to_uint32(u.f, &env->vec_status);
3108 static always_inline uint32_t efsctsiz (uint32_t val)
3110 CPU_FloatU u;
3112 u.l = val;
3113 /* NaN are not treated the same way IEEE 754 does */
3114 if (unlikely(float32_is_nan(u.f)))
3115 return 0;
3117 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3120 static always_inline uint32_t efsctuiz (uint32_t val)
3122 CPU_FloatU u;
3124 u.l = val;
3125 /* NaN are not treated the same way IEEE 754 does */
3126 if (unlikely(float32_is_nan(u.f)))
3127 return 0;
3129 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3132 static always_inline uint32_t efscfsf (uint32_t val)
3134 CPU_FloatU u;
3135 float32 tmp;
3137 u.f = int32_to_float32(val, &env->vec_status);
3138 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3139 u.f = float32_div(u.f, tmp, &env->vec_status);
3141 return u.l;
3144 static always_inline uint32_t efscfuf (uint32_t val)
3146 CPU_FloatU u;
3147 float32 tmp;
3149 u.f = uint32_to_float32(val, &env->vec_status);
3150 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3151 u.f = float32_div(u.f, tmp, &env->vec_status);
3153 return u.l;
3156 static always_inline uint32_t efsctsf (uint32_t val)
3158 CPU_FloatU u;
3159 float32 tmp;
3161 u.l = val;
3162 /* NaN are not treated the same way IEEE 754 does */
3163 if (unlikely(float32_is_nan(u.f)))
3164 return 0;
3165 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3166 u.f = float32_mul(u.f, tmp, &env->vec_status);
3168 return float32_to_int32(u.f, &env->vec_status);
3171 static always_inline uint32_t efsctuf (uint32_t val)
3173 CPU_FloatU u;
3174 float32 tmp;
3176 u.l = val;
3177 /* NaN are not treated the same way IEEE 754 does */
3178 if (unlikely(float32_is_nan(u.f)))
3179 return 0;
3180 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3181 u.f = float32_mul(u.f, tmp, &env->vec_status);
3183 return float32_to_uint32(u.f, &env->vec_status);
3186 #define HELPER_SPE_SINGLE_CONV(name) \
3187 uint32_t helper_e##name (uint32_t val) \
3189 return e##name(val); \
3191 /* efscfsi */
3192 HELPER_SPE_SINGLE_CONV(fscfsi);
3193 /* efscfui */
3194 HELPER_SPE_SINGLE_CONV(fscfui);
3195 /* efscfuf */
3196 HELPER_SPE_SINGLE_CONV(fscfuf);
3197 /* efscfsf */
3198 HELPER_SPE_SINGLE_CONV(fscfsf);
3199 /* efsctsi */
3200 HELPER_SPE_SINGLE_CONV(fsctsi);
3201 /* efsctui */
3202 HELPER_SPE_SINGLE_CONV(fsctui);
3203 /* efsctsiz */
3204 HELPER_SPE_SINGLE_CONV(fsctsiz);
3205 /* efsctuiz */
3206 HELPER_SPE_SINGLE_CONV(fsctuiz);
3207 /* efsctsf */
3208 HELPER_SPE_SINGLE_CONV(fsctsf);
3209 /* efsctuf */
3210 HELPER_SPE_SINGLE_CONV(fsctuf);
3212 #define HELPER_SPE_VECTOR_CONV(name) \
3213 uint64_t helper_ev##name (uint64_t val) \
3215 return ((uint64_t)e##name(val >> 32) << 32) | \
3216 (uint64_t)e##name(val); \
3218 /* evfscfsi */
3219 HELPER_SPE_VECTOR_CONV(fscfsi);
3220 /* evfscfui */
3221 HELPER_SPE_VECTOR_CONV(fscfui);
3222 /* evfscfuf */
3223 HELPER_SPE_VECTOR_CONV(fscfuf);
3224 /* evfscfsf */
3225 HELPER_SPE_VECTOR_CONV(fscfsf);
3226 /* evfsctsi */
3227 HELPER_SPE_VECTOR_CONV(fsctsi);
3228 /* evfsctui */
3229 HELPER_SPE_VECTOR_CONV(fsctui);
3230 /* evfsctsiz */
3231 HELPER_SPE_VECTOR_CONV(fsctsiz);
3232 /* evfsctuiz */
3233 HELPER_SPE_VECTOR_CONV(fsctuiz);
3234 /* evfsctsf */
3235 HELPER_SPE_VECTOR_CONV(fsctsf);
3236 /* evfsctuf */
3237 HELPER_SPE_VECTOR_CONV(fsctuf);
3239 /* Single-precision floating-point arithmetic */
3240 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
3242 CPU_FloatU u1, u2;
3243 u1.l = op1;
3244 u2.l = op2;
3245 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3246 return u1.l;
3249 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
3251 CPU_FloatU u1, u2;
3252 u1.l = op1;
3253 u2.l = op2;
3254 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3255 return u1.l;
3258 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
3260 CPU_FloatU u1, u2;
3261 u1.l = op1;
3262 u2.l = op2;
3263 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3264 return u1.l;
3267 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
3269 CPU_FloatU u1, u2;
3270 u1.l = op1;
3271 u2.l = op2;
3272 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3273 return u1.l;
3276 #define HELPER_SPE_SINGLE_ARITH(name) \
3277 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3279 return e##name(op1, op2); \
3281 /* efsadd */
3282 HELPER_SPE_SINGLE_ARITH(fsadd);
3283 /* efssub */
3284 HELPER_SPE_SINGLE_ARITH(fssub);
3285 /* efsmul */
3286 HELPER_SPE_SINGLE_ARITH(fsmul);
3287 /* efsdiv */
3288 HELPER_SPE_SINGLE_ARITH(fsdiv);
3290 #define HELPER_SPE_VECTOR_ARITH(name) \
3291 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3293 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3294 (uint64_t)e##name(op1, op2); \
3296 /* evfsadd */
3297 HELPER_SPE_VECTOR_ARITH(fsadd);
3298 /* evfssub */
3299 HELPER_SPE_VECTOR_ARITH(fssub);
3300 /* evfsmul */
3301 HELPER_SPE_VECTOR_ARITH(fsmul);
3302 /* evfsdiv */
3303 HELPER_SPE_VECTOR_ARITH(fsdiv);
3305 /* Single-precision floating-point comparisons */
3306 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
3308 CPU_FloatU u1, u2;
3309 u1.l = op1;
3310 u2.l = op2;
3311 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3314 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
3316 CPU_FloatU u1, u2;
3317 u1.l = op1;
3318 u2.l = op2;
3319 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3322 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
3324 CPU_FloatU u1, u2;
3325 u1.l = op1;
3326 u2.l = op2;
3327 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3330 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
3332 /* XXX: TODO: test special values (NaN, infinites, ...) */
3333 return efststlt(op1, op2);
3336 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
3338 /* XXX: TODO: test special values (NaN, infinites, ...) */
3339 return efststgt(op1, op2);
3342 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
3344 /* XXX: TODO: test special values (NaN, infinites, ...) */
3345 return efststeq(op1, op2);
3348 #define HELPER_SINGLE_SPE_CMP(name) \
3349 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3351 return e##name(op1, op2) << 2; \
3353 /* efststlt */
3354 HELPER_SINGLE_SPE_CMP(fststlt);
3355 /* efststgt */
3356 HELPER_SINGLE_SPE_CMP(fststgt);
3357 /* efststeq */
3358 HELPER_SINGLE_SPE_CMP(fststeq);
3359 /* efscmplt */
3360 HELPER_SINGLE_SPE_CMP(fscmplt);
3361 /* efscmpgt */
3362 HELPER_SINGLE_SPE_CMP(fscmpgt);
3363 /* efscmpeq */
3364 HELPER_SINGLE_SPE_CMP(fscmpeq);
3366 static always_inline uint32_t evcmp_merge (int t0, int t1)
3368 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3371 #define HELPER_VECTOR_SPE_CMP(name) \
3372 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3374 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3376 /* evfststlt */
3377 HELPER_VECTOR_SPE_CMP(fststlt);
3378 /* evfststgt */
3379 HELPER_VECTOR_SPE_CMP(fststgt);
3380 /* evfststeq */
3381 HELPER_VECTOR_SPE_CMP(fststeq);
3382 /* evfscmplt */
3383 HELPER_VECTOR_SPE_CMP(fscmplt);
3384 /* evfscmpgt */
3385 HELPER_VECTOR_SPE_CMP(fscmpgt);
3386 /* evfscmpeq */
3387 HELPER_VECTOR_SPE_CMP(fscmpeq);
3389 /* Double-precision floating-point conversion */
3390 uint64_t helper_efdcfsi (uint32_t val)
3392 CPU_DoubleU u;
3394 u.d = int32_to_float64(val, &env->vec_status);
3396 return u.ll;
3399 uint64_t helper_efdcfsid (uint64_t val)
3401 CPU_DoubleU u;
3403 u.d = int64_to_float64(val, &env->vec_status);
3405 return u.ll;
3408 uint64_t helper_efdcfui (uint32_t val)
3410 CPU_DoubleU u;
3412 u.d = uint32_to_float64(val, &env->vec_status);
3414 return u.ll;
3417 uint64_t helper_efdcfuid (uint64_t val)
3419 CPU_DoubleU u;
3421 u.d = uint64_to_float64(val, &env->vec_status);
3423 return u.ll;
3426 uint32_t helper_efdctsi (uint64_t val)
3428 CPU_DoubleU u;
3430 u.ll = val;
3431 /* NaN are not treated the same way IEEE 754 does */
3432 if (unlikely(float64_is_nan(u.d)))
3433 return 0;
3435 return float64_to_int32(u.d, &env->vec_status);
3438 uint32_t helper_efdctui (uint64_t val)
3440 CPU_DoubleU u;
3442 u.ll = val;
3443 /* NaN are not treated the same way IEEE 754 does */
3444 if (unlikely(float64_is_nan(u.d)))
3445 return 0;
3447 return float64_to_uint32(u.d, &env->vec_status);
3450 uint32_t helper_efdctsiz (uint64_t val)
3452 CPU_DoubleU u;
3454 u.ll = val;
3455 /* NaN are not treated the same way IEEE 754 does */
3456 if (unlikely(float64_is_nan(u.d)))
3457 return 0;
3459 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3462 uint64_t helper_efdctsidz (uint64_t val)
3464 CPU_DoubleU u;
3466 u.ll = val;
3467 /* NaN are not treated the same way IEEE 754 does */
3468 if (unlikely(float64_is_nan(u.d)))
3469 return 0;
3471 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3474 uint32_t helper_efdctuiz (uint64_t val)
3476 CPU_DoubleU u;
3478 u.ll = val;
3479 /* NaN are not treated the same way IEEE 754 does */
3480 if (unlikely(float64_is_nan(u.d)))
3481 return 0;
3483 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3486 uint64_t helper_efdctuidz (uint64_t val)
3488 CPU_DoubleU u;
3490 u.ll = val;
3491 /* NaN are not treated the same way IEEE 754 does */
3492 if (unlikely(float64_is_nan(u.d)))
3493 return 0;
3495 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3498 uint64_t helper_efdcfsf (uint32_t val)
3500 CPU_DoubleU u;
3501 float64 tmp;
3503 u.d = int32_to_float64(val, &env->vec_status);
3504 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3505 u.d = float64_div(u.d, tmp, &env->vec_status);
3507 return u.ll;
3510 uint64_t helper_efdcfuf (uint32_t val)
3512 CPU_DoubleU u;
3513 float64 tmp;
3515 u.d = uint32_to_float64(val, &env->vec_status);
3516 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3517 u.d = float64_div(u.d, tmp, &env->vec_status);
3519 return u.ll;
3522 uint32_t helper_efdctsf (uint64_t val)
3524 CPU_DoubleU u;
3525 float64 tmp;
3527 u.ll = val;
3528 /* NaN are not treated the same way IEEE 754 does */
3529 if (unlikely(float64_is_nan(u.d)))
3530 return 0;
3531 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3532 u.d = float64_mul(u.d, tmp, &env->vec_status);
3534 return float64_to_int32(u.d, &env->vec_status);
3537 uint32_t helper_efdctuf (uint64_t val)
3539 CPU_DoubleU u;
3540 float64 tmp;
3542 u.ll = val;
3543 /* NaN are not treated the same way IEEE 754 does */
3544 if (unlikely(float64_is_nan(u.d)))
3545 return 0;
3546 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3547 u.d = float64_mul(u.d, tmp, &env->vec_status);
3549 return float64_to_uint32(u.d, &env->vec_status);
3552 uint32_t helper_efscfd (uint64_t val)
3554 CPU_DoubleU u1;
3555 CPU_FloatU u2;
3557 u1.ll = val;
3558 u2.f = float64_to_float32(u1.d, &env->vec_status);
3560 return u2.l;
3563 uint64_t helper_efdcfs (uint32_t val)
3565 CPU_DoubleU u2;
3566 CPU_FloatU u1;
3568 u1.l = val;
3569 u2.d = float32_to_float64(u1.f, &env->vec_status);
3571 return u2.ll;
3574 /* Double precision fixed-point arithmetic */
3575 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3577 CPU_DoubleU u1, u2;
3578 u1.ll = op1;
3579 u2.ll = op2;
3580 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3581 return u1.ll;
3584 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3586 CPU_DoubleU u1, u2;
3587 u1.ll = op1;
3588 u2.ll = op2;
3589 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3590 return u1.ll;
3593 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3595 CPU_DoubleU u1, u2;
3596 u1.ll = op1;
3597 u2.ll = op2;
3598 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3599 return u1.ll;
3602 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3604 CPU_DoubleU u1, u2;
3605 u1.ll = op1;
3606 u2.ll = op2;
3607 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3608 return u1.ll;
3611 /* Double precision floating point helpers */
3612 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3614 CPU_DoubleU u1, u2;
3615 u1.ll = op1;
3616 u2.ll = op2;
3617 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3620 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3622 CPU_DoubleU u1, u2;
3623 u1.ll = op1;
3624 u2.ll = op2;
3625 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3628 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3630 CPU_DoubleU u1, u2;
3631 u1.ll = op1;
3632 u2.ll = op2;
3633 return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3636 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3638 /* XXX: TODO: test special values (NaN, infinites, ...) */
3639 return helper_efdtstlt(op1, op2);
3642 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3644 /* XXX: TODO: test special values (NaN, infinites, ...) */
3645 return helper_efdtstgt(op1, op2);
3648 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3650 /* XXX: TODO: test special values (NaN, infinites, ...) */
3651 return helper_efdtsteq(op1, op2);
3654 /*****************************************************************************/
3655 /* Softmmu support */
3656 #if !defined (CONFIG_USER_ONLY)
3658 #define MMUSUFFIX _mmu
3660 #define SHIFT 0
3661 #include "softmmu_template.h"
3663 #define SHIFT 1
3664 #include "softmmu_template.h"
3666 #define SHIFT 2
3667 #include "softmmu_template.h"
3669 #define SHIFT 3
3670 #include "softmmu_template.h"
3672 /* try to fill the TLB and return an exception if error. If retaddr is
3673 NULL, it means that the function was called in C code (i.e. not
3674 from generated code or from helper.c) */
3675 /* XXX: fix it to restore all registers */
3676 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3678 TranslationBlock *tb;
3679 CPUState *saved_env;
3680 unsigned long pc;
3681 int ret;
3683 /* XXX: hack to restore env in all cases, even if not called from
3684 generated code */
3685 saved_env = env;
3686 env = cpu_single_env;
3687 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3688 if (unlikely(ret != 0)) {
3689 if (likely(retaddr)) {
3690 /* now we have a real cpu fault */
3691 pc = (unsigned long)retaddr;
3692 tb = tb_find_pc(pc);
3693 if (likely(tb)) {
3694 /* the PC is inside the translated code. It means that we have
3695 a virtual CPU fault */
3696 cpu_restore_state(tb, env, pc, NULL);
3699 helper_raise_exception_err(env->exception_index, env->error_code);
3701 env = saved_env;
3704 /* Segment registers load and store */
3705 target_ulong helper_load_sr (target_ulong sr_num)
3707 return env->sr[sr_num];
3710 void helper_store_sr (target_ulong sr_num, target_ulong val)
3712 ppc_store_sr(env, sr_num, val);
3715 /* SLB management */
3716 #if defined(TARGET_PPC64)
3717 target_ulong helper_load_slb (target_ulong slb_nr)
3719 return ppc_load_slb(env, slb_nr);
3722 void helper_store_slb (target_ulong slb_nr, target_ulong rs)
3724 ppc_store_slb(env, slb_nr, rs);
3727 void helper_slbia (void)
3729 ppc_slb_invalidate_all(env);
3732 void helper_slbie (target_ulong addr)
3734 ppc_slb_invalidate_one(env, addr);
3737 #endif /* defined(TARGET_PPC64) */
3739 /* TLB management */
3740 void helper_tlbia (void)
3742 ppc_tlb_invalidate_all(env);
3745 void helper_tlbie (target_ulong addr)
3747 ppc_tlb_invalidate_one(env, addr);
3750 /* Software driven TLBs management */
3751 /* PowerPC 602/603 software TLB load instructions helpers */
3752 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3754 target_ulong RPN, CMP, EPN;
3755 int way;
3757 RPN = env->spr[SPR_RPA];
3758 if (is_code) {
3759 CMP = env->spr[SPR_ICMP];
3760 EPN = env->spr[SPR_IMISS];
3761 } else {
3762 CMP = env->spr[SPR_DCMP];
3763 EPN = env->spr[SPR_DMISS];
3765 way = (env->spr[SPR_SRR1] >> 17) & 1;
3766 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3767 " PTE1 " ADDRX " way %d\n",
3768 __func__, new_EPN, EPN, CMP, RPN, way);
3769 /* Store this TLB */
3770 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3771 way, is_code, CMP, RPN);
3774 void helper_6xx_tlbd (target_ulong EPN)
3776 do_6xx_tlb(EPN, 0);
3779 void helper_6xx_tlbi (target_ulong EPN)
3781 do_6xx_tlb(EPN, 1);
3784 /* PowerPC 74xx software TLB load instructions helpers */
3785 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3787 target_ulong RPN, CMP, EPN;
3788 int way;
3790 RPN = env->spr[SPR_PTELO];
3791 CMP = env->spr[SPR_PTEHI];
3792 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3793 way = env->spr[SPR_TLBMISS] & 0x3;
3794 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3795 " PTE1 " ADDRX " way %d\n",
3796 __func__, new_EPN, EPN, CMP, RPN, way);
3797 /* Store this TLB */
3798 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3799 way, is_code, CMP, RPN);
3802 void helper_74xx_tlbd (target_ulong EPN)
3804 do_74xx_tlb(EPN, 0);
3807 void helper_74xx_tlbi (target_ulong EPN)
3809 do_74xx_tlb(EPN, 1);
3812 static always_inline target_ulong booke_tlb_to_page_size (int size)
3814 return 1024 << (2 * size);
3817 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3819 int size;
3821 switch (page_size) {
3822 case 0x00000400UL:
3823 size = 0x0;
3824 break;
3825 case 0x00001000UL:
3826 size = 0x1;
3827 break;
3828 case 0x00004000UL:
3829 size = 0x2;
3830 break;
3831 case 0x00010000UL:
3832 size = 0x3;
3833 break;
3834 case 0x00040000UL:
3835 size = 0x4;
3836 break;
3837 case 0x00100000UL:
3838 size = 0x5;
3839 break;
3840 case 0x00400000UL:
3841 size = 0x6;
3842 break;
3843 case 0x01000000UL:
3844 size = 0x7;
3845 break;
3846 case 0x04000000UL:
3847 size = 0x8;
3848 break;
3849 case 0x10000000UL:
3850 size = 0x9;
3851 break;
3852 case 0x40000000UL:
3853 size = 0xA;
3854 break;
3855 #if defined (TARGET_PPC64)
3856 case 0x000100000000ULL:
3857 size = 0xB;
3858 break;
3859 case 0x000400000000ULL:
3860 size = 0xC;
3861 break;
3862 case 0x001000000000ULL:
3863 size = 0xD;
3864 break;
3865 case 0x004000000000ULL:
3866 size = 0xE;
3867 break;
3868 case 0x010000000000ULL:
3869 size = 0xF;
3870 break;
3871 #endif
3872 default:
3873 size = -1;
3874 break;
3877 return size;
3880 /* Helpers for 4xx TLB management */
3881 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3883 ppcemb_tlb_t *tlb;
3884 target_ulong ret;
3885 int size;
3887 entry &= 0x3F;
3888 tlb = &env->tlb[entry].tlbe;
3889 ret = tlb->EPN;
3890 if (tlb->prot & PAGE_VALID)
3891 ret |= 0x400;
3892 size = booke_page_size_to_tlb(tlb->size);
3893 if (size < 0 || size > 0x7)
3894 size = 1;
3895 ret |= size << 7;
3896 env->spr[SPR_40x_PID] = tlb->PID;
3897 return ret;
3900 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3902 ppcemb_tlb_t *tlb;
3903 target_ulong ret;
3905 entry &= 0x3F;
3906 tlb = &env->tlb[entry].tlbe;
3907 ret = tlb->RPN;
3908 if (tlb->prot & PAGE_EXEC)
3909 ret |= 0x200;
3910 if (tlb->prot & PAGE_WRITE)
3911 ret |= 0x100;
3912 return ret;
3915 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3917 ppcemb_tlb_t *tlb;
3918 target_ulong page, end;
3920 LOG_SWTLB("%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3921 entry &= 0x3F;
3922 tlb = &env->tlb[entry].tlbe;
3923 /* Invalidate previous TLB (if it's valid) */
3924 if (tlb->prot & PAGE_VALID) {
3925 end = tlb->EPN + tlb->size;
3926 LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
3927 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3928 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3929 tlb_flush_page(env, page);
3931 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3932 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3933 * If this ever occurs, one should use the ppcemb target instead
3934 * of the ppc or ppc64 one
3936 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3937 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3938 "are not supported (%d)\n",
3939 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3941 tlb->EPN = val & ~(tlb->size - 1);
3942 if (val & 0x40)
3943 tlb->prot |= PAGE_VALID;
3944 else
3945 tlb->prot &= ~PAGE_VALID;
3946 if (val & 0x20) {
3947 /* XXX: TO BE FIXED */
3948 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3950 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3951 tlb->attr = val & 0xFF;
3952 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3953 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3954 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3955 tlb->prot & PAGE_READ ? 'r' : '-',
3956 tlb->prot & PAGE_WRITE ? 'w' : '-',
3957 tlb->prot & PAGE_EXEC ? 'x' : '-',
3958 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3959 /* Invalidate new TLB (if valid) */
3960 if (tlb->prot & PAGE_VALID) {
3961 end = tlb->EPN + tlb->size;
3962 LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
3963 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3964 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3965 tlb_flush_page(env, page);
3969 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3971 ppcemb_tlb_t *tlb;
3973 LOG_SWTLB("%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3974 entry &= 0x3F;
3975 tlb = &env->tlb[entry].tlbe;
3976 tlb->RPN = val & 0xFFFFFC00;
3977 tlb->prot = PAGE_READ;
3978 if (val & 0x200)
3979 tlb->prot |= PAGE_EXEC;
3980 if (val & 0x100)
3981 tlb->prot |= PAGE_WRITE;
3982 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3983 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3984 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3985 tlb->prot & PAGE_READ ? 'r' : '-',
3986 tlb->prot & PAGE_WRITE ? 'w' : '-',
3987 tlb->prot & PAGE_EXEC ? 'x' : '-',
3988 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3991 target_ulong helper_4xx_tlbsx (target_ulong address)
3993 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3996 /* PowerPC 440 TLB management */
3997 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3999 ppcemb_tlb_t *tlb;
4000 target_ulong EPN, RPN, size;
4001 int do_flush_tlbs;
4003 LOG_SWTLB("%s word %d entry %d value " ADDRX "\n",
4004 __func__, word, (int)entry, value);
4005 do_flush_tlbs = 0;
4006 entry &= 0x3F;
4007 tlb = &env->tlb[entry].tlbe;
4008 switch (word) {
4009 default:
4010 /* Just here to please gcc */
4011 case 0:
4012 EPN = value & 0xFFFFFC00;
4013 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4014 do_flush_tlbs = 1;
4015 tlb->EPN = EPN;
4016 size = booke_tlb_to_page_size((value >> 4) & 0xF);
4017 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4018 do_flush_tlbs = 1;
4019 tlb->size = size;
4020 tlb->attr &= ~0x1;
4021 tlb->attr |= (value >> 8) & 1;
4022 if (value & 0x200) {
4023 tlb->prot |= PAGE_VALID;
4024 } else {
4025 if (tlb->prot & PAGE_VALID) {
4026 tlb->prot &= ~PAGE_VALID;
4027 do_flush_tlbs = 1;
4030 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4031 if (do_flush_tlbs)
4032 tlb_flush(env, 1);
4033 break;
4034 case 1:
4035 RPN = value & 0xFFFFFC0F;
4036 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4037 tlb_flush(env, 1);
4038 tlb->RPN = RPN;
4039 break;
4040 case 2:
4041 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4042 tlb->prot = tlb->prot & PAGE_VALID;
4043 if (value & 0x1)
4044 tlb->prot |= PAGE_READ << 4;
4045 if (value & 0x2)
4046 tlb->prot |= PAGE_WRITE << 4;
4047 if (value & 0x4)
4048 tlb->prot |= PAGE_EXEC << 4;
4049 if (value & 0x8)
4050 tlb->prot |= PAGE_READ;
4051 if (value & 0x10)
4052 tlb->prot |= PAGE_WRITE;
4053 if (value & 0x20)
4054 tlb->prot |= PAGE_EXEC;
4055 break;
4059 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4061 ppcemb_tlb_t *tlb;
4062 target_ulong ret;
4063 int size;
4065 entry &= 0x3F;
4066 tlb = &env->tlb[entry].tlbe;
4067 switch (word) {
4068 default:
4069 /* Just here to please gcc */
4070 case 0:
4071 ret = tlb->EPN;
4072 size = booke_page_size_to_tlb(tlb->size);
4073 if (size < 0 || size > 0xF)
4074 size = 1;
4075 ret |= size << 4;
4076 if (tlb->attr & 0x1)
4077 ret |= 0x100;
4078 if (tlb->prot & PAGE_VALID)
4079 ret |= 0x200;
4080 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4081 env->spr[SPR_440_MMUCR] |= tlb->PID;
4082 break;
4083 case 1:
4084 ret = tlb->RPN;
4085 break;
4086 case 2:
4087 ret = tlb->attr & ~0x1;
4088 if (tlb->prot & (PAGE_READ << 4))
4089 ret |= 0x1;
4090 if (tlb->prot & (PAGE_WRITE << 4))
4091 ret |= 0x2;
4092 if (tlb->prot & (PAGE_EXEC << 4))
4093 ret |= 0x4;
4094 if (tlb->prot & PAGE_READ)
4095 ret |= 0x8;
4096 if (tlb->prot & PAGE_WRITE)
4097 ret |= 0x10;
4098 if (tlb->prot & PAGE_EXEC)
4099 ret |= 0x20;
4100 break;
4102 return ret;
4105 target_ulong helper_440_tlbsx (target_ulong address)
4107 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4110 #endif /* !CONFIG_USER_ONLY */