gdbstub: Allow re-instantiation (Jan Kiszka)
[qemu-kvm/fedora.git] / target-ppc / op_helper.c
blob2c6a27fcba226b9fe2203e93b77e817850377919
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <string.h>
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "helper.h"
25 #include "helper_regs.h"
27 //#define DEBUG_OP
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
31 #ifdef DEBUG_SOFTWARE_TLB
32 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
33 #else
34 # define LOG_SWTLB(...) do { } while (0)
35 #endif
38 /*****************************************************************************/
39 /* Exceptions processing helpers */
41 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
43 #if 0
44 printf("Raise exception %3x code : %d\n", exception, error_code);
45 #endif
46 env->exception_index = exception;
47 env->error_code = error_code;
48 cpu_loop_exit();
51 void helper_raise_exception (uint32_t exception)
53 helper_raise_exception_err(exception, 0);
56 /*****************************************************************************/
57 /* SPR accesses */
58 void helper_load_dump_spr (uint32_t sprn)
60 qemu_log("Read SPR %d %03x => " ADDRX "\n",
61 sprn, sprn, env->spr[sprn]);
64 void helper_store_dump_spr (uint32_t sprn)
66 qemu_log("Write SPR %d %03x <= " ADDRX "\n",
67 sprn, sprn, env->spr[sprn]);
70 target_ulong helper_load_tbl (void)
72 return cpu_ppc_load_tbl(env);
75 target_ulong helper_load_tbu (void)
77 return cpu_ppc_load_tbu(env);
80 target_ulong helper_load_atbl (void)
82 return cpu_ppc_load_atbl(env);
85 target_ulong helper_load_atbu (void)
87 return cpu_ppc_load_atbu(env);
90 target_ulong helper_load_601_rtcl (void)
92 return cpu_ppc601_load_rtcl(env);
95 target_ulong helper_load_601_rtcu (void)
97 return cpu_ppc601_load_rtcu(env);
100 #if !defined(CONFIG_USER_ONLY)
101 #if defined (TARGET_PPC64)
102 void helper_store_asr (target_ulong val)
104 ppc_store_asr(env, val);
106 #endif
108 void helper_store_sdr1 (target_ulong val)
110 ppc_store_sdr1(env, val);
113 void helper_store_tbl (target_ulong val)
115 cpu_ppc_store_tbl(env, val);
118 void helper_store_tbu (target_ulong val)
120 cpu_ppc_store_tbu(env, val);
123 void helper_store_atbl (target_ulong val)
125 cpu_ppc_store_atbl(env, val);
128 void helper_store_atbu (target_ulong val)
130 cpu_ppc_store_atbu(env, val);
133 void helper_store_601_rtcl (target_ulong val)
135 cpu_ppc601_store_rtcl(env, val);
138 void helper_store_601_rtcu (target_ulong val)
140 cpu_ppc601_store_rtcu(env, val);
143 target_ulong helper_load_decr (void)
145 return cpu_ppc_load_decr(env);
148 void helper_store_decr (target_ulong val)
150 cpu_ppc_store_decr(env, val);
153 void helper_store_hid0_601 (target_ulong val)
155 target_ulong hid0;
157 hid0 = env->spr[SPR_HID0];
158 if ((val ^ hid0) & 0x00000008) {
159 /* Change current endianness */
160 env->hflags &= ~(1 << MSR_LE);
161 env->hflags_nmsr &= ~(1 << MSR_LE);
162 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
163 env->hflags |= env->hflags_nmsr;
164 qemu_log("%s: set endianness to %c => " ADDRX "\n",
165 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
167 env->spr[SPR_HID0] = (uint32_t)val;
170 void helper_store_403_pbr (uint32_t num, target_ulong value)
172 if (likely(env->pb[num] != value)) {
173 env->pb[num] = value;
174 /* Should be optimized */
175 tlb_flush(env, 1);
179 target_ulong helper_load_40x_pit (void)
181 return load_40x_pit(env);
184 void helper_store_40x_pit (target_ulong val)
186 store_40x_pit(env, val);
189 void helper_store_40x_dbcr0 (target_ulong val)
191 store_40x_dbcr0(env, val);
194 void helper_store_40x_sler (target_ulong val)
196 store_40x_sler(env, val);
199 void helper_store_booke_tcr (target_ulong val)
201 store_booke_tcr(env, val);
204 void helper_store_booke_tsr (target_ulong val)
206 store_booke_tsr(env, val);
209 void helper_store_ibatu (uint32_t nr, target_ulong val)
211 ppc_store_ibatu(env, nr, val);
214 void helper_store_ibatl (uint32_t nr, target_ulong val)
216 ppc_store_ibatl(env, nr, val);
219 void helper_store_dbatu (uint32_t nr, target_ulong val)
221 ppc_store_dbatu(env, nr, val);
224 void helper_store_dbatl (uint32_t nr, target_ulong val)
226 ppc_store_dbatl(env, nr, val);
229 void helper_store_601_batl (uint32_t nr, target_ulong val)
231 ppc_store_ibatl_601(env, nr, val);
234 void helper_store_601_batu (uint32_t nr, target_ulong val)
236 ppc_store_ibatu_601(env, nr, val);
238 #endif
240 /*****************************************************************************/
241 /* Memory load and stores */
243 static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
245 #if defined(TARGET_PPC64)
246 if (!msr_sf)
247 return (uint32_t)(addr + arg);
248 else
249 #endif
250 return addr + arg;
253 void helper_lmw (target_ulong addr, uint32_t reg)
255 for (; reg < 32; reg++) {
256 if (msr_le)
257 env->gpr[reg] = bswap32(ldl(addr));
258 else
259 env->gpr[reg] = ldl(addr);
260 addr = addr_add(addr, 4);
264 void helper_stmw (target_ulong addr, uint32_t reg)
266 for (; reg < 32; reg++) {
267 if (msr_le)
268 stl(addr, bswap32((uint32_t)env->gpr[reg]));
269 else
270 stl(addr, (uint32_t)env->gpr[reg]);
271 addr = addr_add(addr, 4);
275 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
277 int sh;
278 for (; nb > 3; nb -= 4) {
279 env->gpr[reg] = ldl(addr);
280 reg = (reg + 1) % 32;
281 addr = addr_add(addr, 4);
283 if (unlikely(nb > 0)) {
284 env->gpr[reg] = 0;
285 for (sh = 24; nb > 0; nb--, sh -= 8) {
286 env->gpr[reg] |= ldub(addr) << sh;
287 addr = addr_add(addr, 1);
291 /* PPC32 specification says we must generate an exception if
292 * rA is in the range of registers to be loaded.
293 * In an other hand, IBM says this is valid, but rA won't be loaded.
294 * For now, I'll follow the spec...
296 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
298 if (likely(xer_bc != 0)) {
299 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
300 (reg < rb && (reg + xer_bc) > rb))) {
301 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
302 POWERPC_EXCP_INVAL |
303 POWERPC_EXCP_INVAL_LSWX);
304 } else {
305 helper_lsw(addr, xer_bc, reg);
310 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
312 int sh;
313 for (; nb > 3; nb -= 4) {
314 stl(addr, env->gpr[reg]);
315 reg = (reg + 1) % 32;
316 addr = addr_add(addr, 4);
318 if (unlikely(nb > 0)) {
319 for (sh = 24; nb > 0; nb--, sh -= 8) {
320 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
321 addr = addr_add(addr, 1);
326 static void do_dcbz(target_ulong addr, int dcache_line_size)
328 addr &= ~(dcache_line_size - 1);
329 int i;
330 for (i = 0 ; i < dcache_line_size ; i += 4) {
331 stl(addr + i , 0);
333 if (env->reserve == addr)
334 env->reserve = (target_ulong)-1ULL;
337 void helper_dcbz(target_ulong addr)
339 do_dcbz(addr, env->dcache_line_size);
342 void helper_dcbz_970(target_ulong addr)
344 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
345 do_dcbz(addr, 32);
346 else
347 do_dcbz(addr, env->dcache_line_size);
350 void helper_icbi(target_ulong addr)
352 uint32_t tmp;
354 addr &= ~(env->dcache_line_size - 1);
355 /* Invalidate one cache line :
356 * PowerPC specification says this is to be treated like a load
357 * (not a fetch) by the MMU. To be sure it will be so,
358 * do the load "by hand".
360 tmp = ldl(addr);
361 tb_invalidate_page_range(addr, addr + env->icache_line_size);
364 // XXX: to be tested
365 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
367 int i, c, d;
368 d = 24;
369 for (i = 0; i < xer_bc; i++) {
370 c = ldub(addr);
371 addr = addr_add(addr, 1);
372 /* ra (if not 0) and rb are never modified */
373 if (likely(reg != rb && (ra == 0 || reg != ra))) {
374 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
376 if (unlikely(c == xer_cmp))
377 break;
378 if (likely(d != 0)) {
379 d -= 8;
380 } else {
381 d = 24;
382 reg++;
383 reg = reg & 0x1F;
386 return i;
389 /*****************************************************************************/
390 /* Fixed point operations helpers */
391 #if defined(TARGET_PPC64)
393 /* multiply high word */
394 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
396 uint64_t tl, th;
398 muls64(&tl, &th, arg1, arg2);
399 return th;
402 /* multiply high word unsigned */
403 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
405 uint64_t tl, th;
407 mulu64(&tl, &th, arg1, arg2);
408 return th;
411 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
413 int64_t th;
414 uint64_t tl;
416 muls64(&tl, (uint64_t *)&th, arg1, arg2);
417 /* If th != 0 && th != -1, then we had an overflow */
418 if (likely((uint64_t)(th + 1) <= 1)) {
419 env->xer &= ~(1 << XER_OV);
420 } else {
421 env->xer |= (1 << XER_OV) | (1 << XER_SO);
423 return (int64_t)tl;
425 #endif
427 target_ulong helper_cntlzw (target_ulong t)
429 return clz32(t);
432 #if defined(TARGET_PPC64)
433 target_ulong helper_cntlzd (target_ulong t)
435 return clz64(t);
437 #endif
439 /* shift right arithmetic helper */
440 target_ulong helper_sraw (target_ulong value, target_ulong shift)
442 int32_t ret;
444 if (likely(!(shift & 0x20))) {
445 if (likely((uint32_t)shift != 0)) {
446 shift &= 0x1f;
447 ret = (int32_t)value >> shift;
448 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
449 env->xer &= ~(1 << XER_CA);
450 } else {
451 env->xer |= (1 << XER_CA);
453 } else {
454 ret = (int32_t)value;
455 env->xer &= ~(1 << XER_CA);
457 } else {
458 ret = (int32_t)value >> 31;
459 if (ret) {
460 env->xer |= (1 << XER_CA);
461 } else {
462 env->xer &= ~(1 << XER_CA);
465 return (target_long)ret;
468 #if defined(TARGET_PPC64)
469 target_ulong helper_srad (target_ulong value, target_ulong shift)
471 int64_t ret;
473 if (likely(!(shift & 0x40))) {
474 if (likely((uint64_t)shift != 0)) {
475 shift &= 0x3f;
476 ret = (int64_t)value >> shift;
477 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
478 env->xer &= ~(1 << XER_CA);
479 } else {
480 env->xer |= (1 << XER_CA);
482 } else {
483 ret = (int64_t)value;
484 env->xer &= ~(1 << XER_CA);
486 } else {
487 ret = (int64_t)value >> 63;
488 if (ret) {
489 env->xer |= (1 << XER_CA);
490 } else {
491 env->xer &= ~(1 << XER_CA);
494 return ret;
496 #endif
498 target_ulong helper_popcntb (target_ulong val)
500 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
501 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
502 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
503 return val;
506 #if defined(TARGET_PPC64)
507 target_ulong helper_popcntb_64 (target_ulong val)
509 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
510 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
511 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
512 return val;
514 #endif
516 /*****************************************************************************/
517 /* Floating point operations helpers */
518 uint64_t helper_float32_to_float64(uint32_t arg)
520 CPU_FloatU f;
521 CPU_DoubleU d;
522 f.l = arg;
523 d.d = float32_to_float64(f.f, &env->fp_status);
524 return d.ll;
527 uint32_t helper_float64_to_float32(uint64_t arg)
529 CPU_FloatU f;
530 CPU_DoubleU d;
531 d.ll = arg;
532 f.f = float64_to_float32(d.d, &env->fp_status);
533 return f.l;
536 static always_inline int isden (float64 d)
538 CPU_DoubleU u;
540 u.d = d;
542 return ((u.ll >> 52) & 0x7FF) == 0;
545 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
547 CPU_DoubleU farg;
548 int isneg;
549 int ret;
550 farg.ll = arg;
551 isneg = float64_is_neg(farg.d);
552 if (unlikely(float64_is_nan(farg.d))) {
553 if (float64_is_signaling_nan(farg.d)) {
554 /* Signaling NaN: flags are undefined */
555 ret = 0x00;
556 } else {
557 /* Quiet NaN */
558 ret = 0x11;
560 } else if (unlikely(float64_is_infinity(farg.d))) {
561 /* +/- infinity */
562 if (isneg)
563 ret = 0x09;
564 else
565 ret = 0x05;
566 } else {
567 if (float64_is_zero(farg.d)) {
568 /* +/- zero */
569 if (isneg)
570 ret = 0x12;
571 else
572 ret = 0x02;
573 } else {
574 if (isden(farg.d)) {
575 /* Denormalized numbers */
576 ret = 0x10;
577 } else {
578 /* Normalized numbers */
579 ret = 0x00;
581 if (isneg) {
582 ret |= 0x08;
583 } else {
584 ret |= 0x04;
588 if (set_fprf) {
589 /* We update FPSCR_FPRF */
590 env->fpscr &= ~(0x1F << FPSCR_FPRF);
591 env->fpscr |= ret << FPSCR_FPRF;
593 /* We just need fpcc to update Rc1 */
594 return ret & 0xF;
597 /* Floating-point invalid operations exception */
598 static always_inline uint64_t fload_invalid_op_excp (int op)
600 uint64_t ret = 0;
601 int ve;
603 ve = fpscr_ve;
604 switch (op) {
605 case POWERPC_EXCP_FP_VXSNAN:
606 env->fpscr |= 1 << FPSCR_VXSNAN;
607 break;
608 case POWERPC_EXCP_FP_VXSOFT:
609 env->fpscr |= 1 << FPSCR_VXSOFT;
610 break;
611 case POWERPC_EXCP_FP_VXISI:
612 /* Magnitude subtraction of infinities */
613 env->fpscr |= 1 << FPSCR_VXISI;
614 goto update_arith;
615 case POWERPC_EXCP_FP_VXIDI:
616 /* Division of infinity by infinity */
617 env->fpscr |= 1 << FPSCR_VXIDI;
618 goto update_arith;
619 case POWERPC_EXCP_FP_VXZDZ:
620 /* Division of zero by zero */
621 env->fpscr |= 1 << FPSCR_VXZDZ;
622 goto update_arith;
623 case POWERPC_EXCP_FP_VXIMZ:
624 /* Multiplication of zero by infinity */
625 env->fpscr |= 1 << FPSCR_VXIMZ;
626 goto update_arith;
627 case POWERPC_EXCP_FP_VXVC:
628 /* Ordered comparison of NaN */
629 env->fpscr |= 1 << FPSCR_VXVC;
630 env->fpscr &= ~(0xF << FPSCR_FPCC);
631 env->fpscr |= 0x11 << FPSCR_FPCC;
632 /* We must update the target FPR before raising the exception */
633 if (ve != 0) {
634 env->exception_index = POWERPC_EXCP_PROGRAM;
635 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
636 /* Update the floating-point enabled exception summary */
637 env->fpscr |= 1 << FPSCR_FEX;
638 /* Exception is differed */
639 ve = 0;
641 break;
642 case POWERPC_EXCP_FP_VXSQRT:
643 /* Square root of a negative number */
644 env->fpscr |= 1 << FPSCR_VXSQRT;
645 update_arith:
646 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
647 if (ve == 0) {
648 /* Set the result to quiet NaN */
649 ret = 0xFFF8000000000000ULL;
650 env->fpscr &= ~(0xF << FPSCR_FPCC);
651 env->fpscr |= 0x11 << FPSCR_FPCC;
653 break;
654 case POWERPC_EXCP_FP_VXCVI:
655 /* Invalid conversion */
656 env->fpscr |= 1 << FPSCR_VXCVI;
657 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
658 if (ve == 0) {
659 /* Set the result to quiet NaN */
660 ret = 0xFFF8000000000000ULL;
661 env->fpscr &= ~(0xF << FPSCR_FPCC);
662 env->fpscr |= 0x11 << FPSCR_FPCC;
664 break;
666 /* Update the floating-point invalid operation summary */
667 env->fpscr |= 1 << FPSCR_VX;
668 /* Update the floating-point exception summary */
669 env->fpscr |= 1 << FPSCR_FX;
670 if (ve != 0) {
671 /* Update the floating-point enabled exception summary */
672 env->fpscr |= 1 << FPSCR_FEX;
673 if (msr_fe0 != 0 || msr_fe1 != 0)
674 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
676 return ret;
679 static always_inline void float_zero_divide_excp (void)
681 env->fpscr |= 1 << FPSCR_ZX;
682 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
683 /* Update the floating-point exception summary */
684 env->fpscr |= 1 << FPSCR_FX;
685 if (fpscr_ze != 0) {
686 /* Update the floating-point enabled exception summary */
687 env->fpscr |= 1 << FPSCR_FEX;
688 if (msr_fe0 != 0 || msr_fe1 != 0) {
689 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
690 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
695 static always_inline void float_overflow_excp (void)
697 env->fpscr |= 1 << FPSCR_OX;
698 /* Update the floating-point exception summary */
699 env->fpscr |= 1 << FPSCR_FX;
700 if (fpscr_oe != 0) {
701 /* XXX: should adjust the result */
702 /* Update the floating-point enabled exception summary */
703 env->fpscr |= 1 << FPSCR_FEX;
704 /* We must update the target FPR before raising the exception */
705 env->exception_index = POWERPC_EXCP_PROGRAM;
706 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
707 } else {
708 env->fpscr |= 1 << FPSCR_XX;
709 env->fpscr |= 1 << FPSCR_FI;
713 static always_inline void float_underflow_excp (void)
715 env->fpscr |= 1 << FPSCR_UX;
716 /* Update the floating-point exception summary */
717 env->fpscr |= 1 << FPSCR_FX;
718 if (fpscr_ue != 0) {
719 /* XXX: should adjust the result */
720 /* Update the floating-point enabled exception summary */
721 env->fpscr |= 1 << FPSCR_FEX;
722 /* We must update the target FPR before raising the exception */
723 env->exception_index = POWERPC_EXCP_PROGRAM;
724 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
728 static always_inline void float_inexact_excp (void)
730 env->fpscr |= 1 << FPSCR_XX;
731 /* Update the floating-point exception summary */
732 env->fpscr |= 1 << FPSCR_FX;
733 if (fpscr_xe != 0) {
734 /* Update the floating-point enabled exception summary */
735 env->fpscr |= 1 << FPSCR_FEX;
736 /* We must update the target FPR before raising the exception */
737 env->exception_index = POWERPC_EXCP_PROGRAM;
738 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
742 static always_inline void fpscr_set_rounding_mode (void)
744 int rnd_type;
746 /* Set rounding mode */
747 switch (fpscr_rn) {
748 case 0:
749 /* Best approximation (round to nearest) */
750 rnd_type = float_round_nearest_even;
751 break;
752 case 1:
753 /* Smaller magnitude (round toward zero) */
754 rnd_type = float_round_to_zero;
755 break;
756 case 2:
757 /* Round toward +infinite */
758 rnd_type = float_round_up;
759 break;
760 default:
761 case 3:
762 /* Round toward -infinite */
763 rnd_type = float_round_down;
764 break;
766 set_float_rounding_mode(rnd_type, &env->fp_status);
769 void helper_fpscr_clrbit (uint32_t bit)
771 int prev;
773 prev = (env->fpscr >> bit) & 1;
774 env->fpscr &= ~(1 << bit);
775 if (prev == 1) {
776 switch (bit) {
777 case FPSCR_RN1:
778 case FPSCR_RN:
779 fpscr_set_rounding_mode();
780 break;
781 default:
782 break;
787 void helper_fpscr_setbit (uint32_t bit)
789 int prev;
791 prev = (env->fpscr >> bit) & 1;
792 env->fpscr |= 1 << bit;
793 if (prev == 0) {
794 switch (bit) {
795 case FPSCR_VX:
796 env->fpscr |= 1 << FPSCR_FX;
797 if (fpscr_ve)
798 goto raise_ve;
799 case FPSCR_OX:
800 env->fpscr |= 1 << FPSCR_FX;
801 if (fpscr_oe)
802 goto raise_oe;
803 break;
804 case FPSCR_UX:
805 env->fpscr |= 1 << FPSCR_FX;
806 if (fpscr_ue)
807 goto raise_ue;
808 break;
809 case FPSCR_ZX:
810 env->fpscr |= 1 << FPSCR_FX;
811 if (fpscr_ze)
812 goto raise_ze;
813 break;
814 case FPSCR_XX:
815 env->fpscr |= 1 << FPSCR_FX;
816 if (fpscr_xe)
817 goto raise_xe;
818 break;
819 case FPSCR_VXSNAN:
820 case FPSCR_VXISI:
821 case FPSCR_VXIDI:
822 case FPSCR_VXZDZ:
823 case FPSCR_VXIMZ:
824 case FPSCR_VXVC:
825 case FPSCR_VXSOFT:
826 case FPSCR_VXSQRT:
827 case FPSCR_VXCVI:
828 env->fpscr |= 1 << FPSCR_VX;
829 env->fpscr |= 1 << FPSCR_FX;
830 if (fpscr_ve != 0)
831 goto raise_ve;
832 break;
833 case FPSCR_VE:
834 if (fpscr_vx != 0) {
835 raise_ve:
836 env->error_code = POWERPC_EXCP_FP;
837 if (fpscr_vxsnan)
838 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
839 if (fpscr_vxisi)
840 env->error_code |= POWERPC_EXCP_FP_VXISI;
841 if (fpscr_vxidi)
842 env->error_code |= POWERPC_EXCP_FP_VXIDI;
843 if (fpscr_vxzdz)
844 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
845 if (fpscr_vximz)
846 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
847 if (fpscr_vxvc)
848 env->error_code |= POWERPC_EXCP_FP_VXVC;
849 if (fpscr_vxsoft)
850 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
851 if (fpscr_vxsqrt)
852 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
853 if (fpscr_vxcvi)
854 env->error_code |= POWERPC_EXCP_FP_VXCVI;
855 goto raise_excp;
857 break;
858 case FPSCR_OE:
859 if (fpscr_ox != 0) {
860 raise_oe:
861 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
862 goto raise_excp;
864 break;
865 case FPSCR_UE:
866 if (fpscr_ux != 0) {
867 raise_ue:
868 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
869 goto raise_excp;
871 break;
872 case FPSCR_ZE:
873 if (fpscr_zx != 0) {
874 raise_ze:
875 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
876 goto raise_excp;
878 break;
879 case FPSCR_XE:
880 if (fpscr_xx != 0) {
881 raise_xe:
882 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
883 goto raise_excp;
885 break;
886 case FPSCR_RN1:
887 case FPSCR_RN:
888 fpscr_set_rounding_mode();
889 break;
890 default:
891 break;
892 raise_excp:
893 /* Update the floating-point enabled exception summary */
894 env->fpscr |= 1 << FPSCR_FEX;
895 /* We have to update Rc1 before raising the exception */
896 env->exception_index = POWERPC_EXCP_PROGRAM;
897 break;
902 void helper_store_fpscr (uint64_t arg, uint32_t mask)
905 * We use only the 32 LSB of the incoming fpr
907 uint32_t prev, new;
908 int i;
910 prev = env->fpscr;
911 new = (uint32_t)arg;
912 new &= ~0x60000000;
913 new |= prev & 0x60000000;
914 for (i = 0; i < 8; i++) {
915 if (mask & (1 << i)) {
916 env->fpscr &= ~(0xF << (4 * i));
917 env->fpscr |= new & (0xF << (4 * i));
920 /* Update VX and FEX */
921 if (fpscr_ix != 0)
922 env->fpscr |= 1 << FPSCR_VX;
923 else
924 env->fpscr &= ~(1 << FPSCR_VX);
925 if ((fpscr_ex & fpscr_eex) != 0) {
926 env->fpscr |= 1 << FPSCR_FEX;
927 env->exception_index = POWERPC_EXCP_PROGRAM;
928 /* XXX: we should compute it properly */
929 env->error_code = POWERPC_EXCP_FP;
931 else
932 env->fpscr &= ~(1 << FPSCR_FEX);
933 fpscr_set_rounding_mode();
936 void helper_float_check_status (void)
938 #ifdef CONFIG_SOFTFLOAT
939 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
940 (env->error_code & POWERPC_EXCP_FP)) {
941 /* Differred floating-point exception after target FPR update */
942 if (msr_fe0 != 0 || msr_fe1 != 0)
943 helper_raise_exception_err(env->exception_index, env->error_code);
944 } else {
945 int status = get_float_exception_flags(&env->fp_status);
946 if (status & float_flag_divbyzero) {
947 float_zero_divide_excp();
948 } else if (status & float_flag_overflow) {
949 float_overflow_excp();
950 } else if (status & float_flag_underflow) {
951 float_underflow_excp();
952 } else if (status & float_flag_inexact) {
953 float_inexact_excp();
956 #else
957 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
958 (env->error_code & POWERPC_EXCP_FP)) {
959 /* Differred floating-point exception after target FPR update */
960 if (msr_fe0 != 0 || msr_fe1 != 0)
961 helper_raise_exception_err(env->exception_index, env->error_code);
963 #endif
966 #ifdef CONFIG_SOFTFLOAT
967 void helper_reset_fpstatus (void)
969 set_float_exception_flags(0, &env->fp_status);
971 #endif
973 /* fadd - fadd. */
974 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
976 CPU_DoubleU farg1, farg2;
978 farg1.ll = arg1;
979 farg2.ll = arg2;
980 #if USE_PRECISE_EMULATION
981 if (unlikely(float64_is_signaling_nan(farg1.d) ||
982 float64_is_signaling_nan(farg2.d))) {
983 /* sNaN addition */
984 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
985 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
986 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
987 /* Magnitude subtraction of infinities */
988 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
989 } else {
990 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
992 #else
993 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
994 #endif
995 return farg1.ll;
998 /* fsub - fsub. */
999 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1001 CPU_DoubleU farg1, farg2;
1003 farg1.ll = arg1;
1004 farg2.ll = arg2;
1005 #if USE_PRECISE_EMULATION
1007 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1008 float64_is_signaling_nan(farg2.d))) {
1009 /* sNaN subtraction */
1010 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1011 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1012 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1013 /* Magnitude subtraction of infinities */
1014 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1015 } else {
1016 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1019 #else
1020 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1021 #endif
1022 return farg1.ll;
1025 /* fmul - fmul. */
1026 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1028 CPU_DoubleU farg1, farg2;
1030 farg1.ll = arg1;
1031 farg2.ll = arg2;
1032 #if USE_PRECISE_EMULATION
1033 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1034 float64_is_signaling_nan(farg2.d))) {
1035 /* sNaN multiplication */
1036 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1037 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1038 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1039 /* Multiplication of zero by infinity */
1040 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1041 } else {
1042 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1044 #else
1045 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1046 #endif
1047 return farg1.ll;
1050 /* fdiv - fdiv. */
1051 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1053 CPU_DoubleU farg1, farg2;
1055 farg1.ll = arg1;
1056 farg2.ll = arg2;
1057 #if USE_PRECISE_EMULATION
1058 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1059 float64_is_signaling_nan(farg2.d))) {
1060 /* sNaN division */
1061 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1062 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1063 /* Division of infinity by infinity */
1064 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1065 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1066 /* Division of zero by zero */
1067 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1068 } else {
1069 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1071 #else
1072 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1073 #endif
1074 return farg1.ll;
1077 /* fabs */
1078 uint64_t helper_fabs (uint64_t arg)
1080 CPU_DoubleU farg;
1082 farg.ll = arg;
1083 farg.d = float64_abs(farg.d);
1084 return farg.ll;
1087 /* fnabs */
1088 uint64_t helper_fnabs (uint64_t arg)
1090 CPU_DoubleU farg;
1092 farg.ll = arg;
1093 farg.d = float64_abs(farg.d);
1094 farg.d = float64_chs(farg.d);
1095 return farg.ll;
1098 /* fneg */
1099 uint64_t helper_fneg (uint64_t arg)
1101 CPU_DoubleU farg;
1103 farg.ll = arg;
1104 farg.d = float64_chs(farg.d);
1105 return farg.ll;
1108 /* fctiw - fctiw. */
1109 uint64_t helper_fctiw (uint64_t arg)
1111 CPU_DoubleU farg;
1112 farg.ll = arg;
1114 if (unlikely(float64_is_signaling_nan(farg.d))) {
1115 /* sNaN conversion */
1116 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1117 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1118 /* qNan / infinity conversion */
1119 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1120 } else {
1121 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1122 #if USE_PRECISE_EMULATION
1123 /* XXX: higher bits are not supposed to be significant.
1124 * to make tests easier, return the same as a real PowerPC 750
1126 farg.ll |= 0xFFF80000ULL << 32;
1127 #endif
1129 return farg.ll;
1132 /* fctiwz - fctiwz. */
1133 uint64_t helper_fctiwz (uint64_t arg)
1135 CPU_DoubleU farg;
1136 farg.ll = arg;
1138 if (unlikely(float64_is_signaling_nan(farg.d))) {
1139 /* sNaN conversion */
1140 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1141 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1142 /* qNan / infinity conversion */
1143 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1144 } else {
1145 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1146 #if USE_PRECISE_EMULATION
1147 /* XXX: higher bits are not supposed to be significant.
1148 * to make tests easier, return the same as a real PowerPC 750
1150 farg.ll |= 0xFFF80000ULL << 32;
1151 #endif
1153 return farg.ll;
1156 #if defined(TARGET_PPC64)
1157 /* fcfid - fcfid. */
1158 uint64_t helper_fcfid (uint64_t arg)
1160 CPU_DoubleU farg;
1161 farg.d = int64_to_float64(arg, &env->fp_status);
1162 return farg.ll;
1165 /* fctid - fctid. */
1166 uint64_t helper_fctid (uint64_t arg)
1168 CPU_DoubleU farg;
1169 farg.ll = arg;
1171 if (unlikely(float64_is_signaling_nan(farg.d))) {
1172 /* sNaN conversion */
1173 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1174 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1175 /* qNan / infinity conversion */
1176 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1177 } else {
1178 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1180 return farg.ll;
1183 /* fctidz - fctidz. */
1184 uint64_t helper_fctidz (uint64_t arg)
1186 CPU_DoubleU farg;
1187 farg.ll = arg;
1189 if (unlikely(float64_is_signaling_nan(farg.d))) {
1190 /* sNaN conversion */
1191 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1192 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1193 /* qNan / infinity conversion */
1194 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1195 } else {
1196 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1198 return farg.ll;
1201 #endif
1203 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1205 CPU_DoubleU farg;
1206 farg.ll = arg;
1208 if (unlikely(float64_is_signaling_nan(farg.d))) {
1209 /* sNaN round */
1210 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1211 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1212 /* qNan / infinity round */
1213 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1214 } else {
1215 set_float_rounding_mode(rounding_mode, &env->fp_status);
1216 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1217 /* Restore rounding mode from FPSCR */
1218 fpscr_set_rounding_mode();
1220 return farg.ll;
1223 uint64_t helper_frin (uint64_t arg)
1225 return do_fri(arg, float_round_nearest_even);
1228 uint64_t helper_friz (uint64_t arg)
1230 return do_fri(arg, float_round_to_zero);
1233 uint64_t helper_frip (uint64_t arg)
1235 return do_fri(arg, float_round_up);
1238 uint64_t helper_frim (uint64_t arg)
1240 return do_fri(arg, float_round_down);
1243 /* fmadd - fmadd. */
1244 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1246 CPU_DoubleU farg1, farg2, farg3;
1248 farg1.ll = arg1;
1249 farg2.ll = arg2;
1250 farg3.ll = arg3;
1251 #if USE_PRECISE_EMULATION
1252 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1253 float64_is_signaling_nan(farg2.d) ||
1254 float64_is_signaling_nan(farg3.d))) {
1255 /* sNaN operation */
1256 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1257 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1258 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1259 /* Multiplication of zero by infinity */
1260 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1261 } else {
1262 #ifdef FLOAT128
1263 /* This is the way the PowerPC specification defines it */
1264 float128 ft0_128, ft1_128;
1266 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1267 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1268 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1269 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1270 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1271 /* Magnitude subtraction of infinities */
1272 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1273 } else {
1274 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1275 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1276 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1278 #else
1279 /* This is OK on x86 hosts */
1280 farg1.d = (farg1.d * farg2.d) + farg3.d;
1281 #endif
1283 #else
1284 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1285 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1286 #endif
1287 return farg1.ll;
1290 /* fmsub - fmsub. */
1291 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1293 CPU_DoubleU farg1, farg2, farg3;
1295 farg1.ll = arg1;
1296 farg2.ll = arg2;
1297 farg3.ll = arg3;
1298 #if USE_PRECISE_EMULATION
1299 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1300 float64_is_signaling_nan(farg2.d) ||
1301 float64_is_signaling_nan(farg3.d))) {
1302 /* sNaN operation */
1303 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1304 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1305 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1306 /* Multiplication of zero by infinity */
1307 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1308 } else {
1309 #ifdef FLOAT128
1310 /* This is the way the PowerPC specification defines it */
1311 float128 ft0_128, ft1_128;
1313 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1314 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1315 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1316 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1317 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1318 /* Magnitude subtraction of infinities */
1319 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1320 } else {
1321 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1322 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1323 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1325 #else
1326 /* This is OK on x86 hosts */
1327 farg1.d = (farg1.d * farg2.d) - farg3.d;
1328 #endif
1330 #else
1331 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1332 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1333 #endif
1334 return farg1.ll;
1337 /* fnmadd - fnmadd. */
1338 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1340 CPU_DoubleU farg1, farg2, farg3;
1342 farg1.ll = arg1;
1343 farg2.ll = arg2;
1344 farg3.ll = arg3;
1346 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1347 float64_is_signaling_nan(farg2.d) ||
1348 float64_is_signaling_nan(farg3.d))) {
1349 /* sNaN operation */
1350 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1351 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1352 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1353 /* Multiplication of zero by infinity */
1354 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1355 } else {
1356 #if USE_PRECISE_EMULATION
1357 #ifdef FLOAT128
1358 /* This is the way the PowerPC specification defines it */
1359 float128 ft0_128, ft1_128;
1361 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1362 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1363 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1364 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1365 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1366 /* Magnitude subtraction of infinities */
1367 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1368 } else {
1369 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1370 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1371 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1373 #else
1374 /* This is OK on x86 hosts */
1375 farg1.d = (farg1.d * farg2.d) + farg3.d;
1376 #endif
1377 #else
1378 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1379 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1380 #endif
1381 if (likely(!float64_is_nan(farg1.d)))
1382 farg1.d = float64_chs(farg1.d);
1384 return farg1.ll;
1387 /* fnmsub - fnmsub. */
1388 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1390 CPU_DoubleU farg1, farg2, farg3;
1392 farg1.ll = arg1;
1393 farg2.ll = arg2;
1394 farg3.ll = arg3;
1396 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1397 float64_is_signaling_nan(farg2.d) ||
1398 float64_is_signaling_nan(farg3.d))) {
1399 /* sNaN operation */
1400 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1401 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1402 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1403 /* Multiplication of zero by infinity */
1404 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1405 } else {
1406 #if USE_PRECISE_EMULATION
1407 #ifdef FLOAT128
1408 /* This is the way the PowerPC specification defines it */
1409 float128 ft0_128, ft1_128;
1411 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1412 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1413 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1414 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1415 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1416 /* Magnitude subtraction of infinities */
1417 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1418 } else {
1419 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1420 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1421 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1423 #else
1424 /* This is OK on x86 hosts */
1425 farg1.d = (farg1.d * farg2.d) - farg3.d;
1426 #endif
1427 #else
1428 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1429 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1430 #endif
1431 if (likely(!float64_is_nan(farg1.d)))
1432 farg1.d = float64_chs(farg1.d);
1434 return farg1.ll;
1437 /* frsp - frsp. */
1438 uint64_t helper_frsp (uint64_t arg)
1440 CPU_DoubleU farg;
1441 float32 f32;
1442 farg.ll = arg;
1444 #if USE_PRECISE_EMULATION
1445 if (unlikely(float64_is_signaling_nan(farg.d))) {
1446 /* sNaN square root */
1447 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1448 } else {
1449 f32 = float64_to_float32(farg.d, &env->fp_status);
1450 farg.d = float32_to_float64(f32, &env->fp_status);
1452 #else
1453 f32 = float64_to_float32(farg.d, &env->fp_status);
1454 farg.d = float32_to_float64(f32, &env->fp_status);
1455 #endif
1456 return farg.ll;
1459 /* fsqrt - fsqrt. */
1460 uint64_t helper_fsqrt (uint64_t arg)
1462 CPU_DoubleU farg;
1463 farg.ll = arg;
1465 if (unlikely(float64_is_signaling_nan(farg.d))) {
1466 /* sNaN square root */
1467 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1468 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1469 /* Square root of a negative nonzero number */
1470 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1471 } else {
1472 farg.d = float64_sqrt(farg.d, &env->fp_status);
1474 return farg.ll;
1477 /* fre - fre. */
1478 uint64_t helper_fre (uint64_t arg)
1480 CPU_DoubleU farg;
1481 farg.ll = arg;
1483 if (unlikely(float64_is_signaling_nan(farg.d))) {
1484 /* sNaN reciprocal */
1485 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1486 } else {
1487 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1489 return farg.d;
1492 /* fres - fres. */
1493 uint64_t helper_fres (uint64_t arg)
1495 CPU_DoubleU farg;
1496 float32 f32;
1497 farg.ll = arg;
1499 if (unlikely(float64_is_signaling_nan(farg.d))) {
1500 /* sNaN reciprocal */
1501 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1502 } else {
1503 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1504 f32 = float64_to_float32(farg.d, &env->fp_status);
1505 farg.d = float32_to_float64(f32, &env->fp_status);
1507 return farg.ll;
1510 /* frsqrte - frsqrte. */
1511 uint64_t helper_frsqrte (uint64_t arg)
1513 CPU_DoubleU farg;
1514 float32 f32;
1515 farg.ll = arg;
1517 if (unlikely(float64_is_signaling_nan(farg.d))) {
1518 /* sNaN reciprocal square root */
1519 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1520 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1521 /* Reciprocal square root of a negative nonzero number */
1522 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1523 } else {
1524 farg.d = float64_sqrt(farg.d, &env->fp_status);
1525 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1526 f32 = float64_to_float32(farg.d, &env->fp_status);
1527 farg.d = float32_to_float64(f32, &env->fp_status);
1529 return farg.ll;
1532 /* fsel - fsel. */
1533 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1535 CPU_DoubleU farg1;
1537 farg1.ll = arg1;
1539 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1540 return arg2;
1541 else
1542 return arg3;
1545 void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1547 CPU_DoubleU farg1, farg2;
1548 uint32_t ret = 0;
1549 farg1.ll = arg1;
1550 farg2.ll = arg2;
1552 if (unlikely(float64_is_nan(farg1.d) ||
1553 float64_is_nan(farg2.d))) {
1554 ret = 0x01UL;
1555 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1556 ret = 0x08UL;
1557 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1558 ret = 0x04UL;
1559 } else {
1560 ret = 0x02UL;
1563 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1564 env->fpscr |= ret << FPSCR_FPRF;
1565 env->crf[crfD] = ret;
1566 if (unlikely(ret == 0x01UL
1567 && (float64_is_signaling_nan(farg1.d) ||
1568 float64_is_signaling_nan(farg2.d)))) {
1569 /* sNaN comparison */
1570 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1574 void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1576 CPU_DoubleU farg1, farg2;
1577 uint32_t ret = 0;
1578 farg1.ll = arg1;
1579 farg2.ll = arg2;
1581 if (unlikely(float64_is_nan(farg1.d) ||
1582 float64_is_nan(farg2.d))) {
1583 ret = 0x01UL;
1584 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1585 ret = 0x08UL;
1586 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1587 ret = 0x04UL;
1588 } else {
1589 ret = 0x02UL;
1592 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1593 env->fpscr |= ret << FPSCR_FPRF;
1594 env->crf[crfD] = ret;
1595 if (unlikely (ret == 0x01UL)) {
1596 if (float64_is_signaling_nan(farg1.d) ||
1597 float64_is_signaling_nan(farg2.d)) {
1598 /* sNaN comparison */
1599 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1600 POWERPC_EXCP_FP_VXVC);
1601 } else {
1602 /* qNaN comparison */
1603 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1608 #if !defined (CONFIG_USER_ONLY)
1609 void helper_store_msr (target_ulong val)
1611 val = hreg_store_msr(env, val, 0);
1612 if (val != 0) {
1613 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1614 helper_raise_exception(val);
1618 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1619 target_ulong msrm, int keep_msrh)
1621 #if defined(TARGET_PPC64)
1622 if (msr & (1ULL << MSR_SF)) {
1623 nip = (uint64_t)nip;
1624 msr &= (uint64_t)msrm;
1625 } else {
1626 nip = (uint32_t)nip;
1627 msr = (uint32_t)(msr & msrm);
1628 if (keep_msrh)
1629 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1631 #else
1632 nip = (uint32_t)nip;
1633 msr &= (uint32_t)msrm;
1634 #endif
1635 /* XXX: beware: this is false if VLE is supported */
1636 env->nip = nip & ~((target_ulong)0x00000003);
1637 hreg_store_msr(env, msr, 1);
1638 #if defined (DEBUG_OP)
1639 cpu_dump_rfi(env->nip, env->msr);
1640 #endif
1641 /* No need to raise an exception here,
1642 * as rfi is always the last insn of a TB
1644 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1647 void helper_rfi (void)
1649 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1650 ~((target_ulong)0x0), 1);
1653 #if defined(TARGET_PPC64)
1654 void helper_rfid (void)
1656 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1657 ~((target_ulong)0x0), 0);
1660 void helper_hrfid (void)
1662 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1663 ~((target_ulong)0x0), 0);
1665 #endif
1666 #endif
1668 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1670 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1671 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1672 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1673 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1674 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1675 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1679 #if defined(TARGET_PPC64)
1680 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1682 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1683 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1684 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1685 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1686 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1687 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1689 #endif
1691 /*****************************************************************************/
1692 /* PowerPC 601 specific instructions (POWER bridge) */
1694 target_ulong helper_clcs (uint32_t arg)
1696 switch (arg) {
1697 case 0x0CUL:
1698 /* Instruction cache line size */
1699 return env->icache_line_size;
1700 break;
1701 case 0x0DUL:
1702 /* Data cache line size */
1703 return env->dcache_line_size;
1704 break;
1705 case 0x0EUL:
1706 /* Minimum cache line size */
1707 return (env->icache_line_size < env->dcache_line_size) ?
1708 env->icache_line_size : env->dcache_line_size;
1709 break;
1710 case 0x0FUL:
1711 /* Maximum cache line size */
1712 return (env->icache_line_size > env->dcache_line_size) ?
1713 env->icache_line_size : env->dcache_line_size;
1714 break;
1715 default:
1716 /* Undefined */
1717 return 0;
1718 break;
1722 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1724 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1726 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1727 (int32_t)arg2 == 0) {
1728 env->spr[SPR_MQ] = 0;
1729 return INT32_MIN;
1730 } else {
1731 env->spr[SPR_MQ] = tmp % arg2;
1732 return tmp / (int32_t)arg2;
1736 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1738 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1740 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1741 (int32_t)arg2 == 0) {
1742 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1743 env->spr[SPR_MQ] = 0;
1744 return INT32_MIN;
1745 } else {
1746 env->spr[SPR_MQ] = tmp % arg2;
1747 tmp /= (int32_t)arg2;
1748 if ((int32_t)tmp != tmp) {
1749 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1750 } else {
1751 env->xer &= ~(1 << XER_OV);
1753 return tmp;
1757 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1759 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1760 (int32_t)arg2 == 0) {
1761 env->spr[SPR_MQ] = 0;
1762 return INT32_MIN;
1763 } else {
1764 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1765 return (int32_t)arg1 / (int32_t)arg2;
1769 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1771 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1772 (int32_t)arg2 == 0) {
1773 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1774 env->spr[SPR_MQ] = 0;
1775 return INT32_MIN;
1776 } else {
1777 env->xer &= ~(1 << XER_OV);
1778 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1779 return (int32_t)arg1 / (int32_t)arg2;
1783 #if !defined (CONFIG_USER_ONLY)
1784 target_ulong helper_rac (target_ulong addr)
1786 mmu_ctx_t ctx;
1787 int nb_BATs;
1788 target_ulong ret = 0;
1790 /* We don't have to generate many instances of this instruction,
1791 * as rac is supervisor only.
1793 /* XXX: FIX THIS: Pretend we have no BAT */
1794 nb_BATs = env->nb_BATs;
1795 env->nb_BATs = 0;
1796 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1797 ret = ctx.raddr;
1798 env->nb_BATs = nb_BATs;
1799 return ret;
1802 void helper_rfsvc (void)
1804 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1806 #endif
1808 /*****************************************************************************/
1809 /* 602 specific instructions */
1810 /* mfrom is the most crazy instruction ever seen, imho ! */
1811 /* Real implementation uses a ROM table. Do the same */
1812 /* Extremly decomposed:
1813 * -arg / 256
1814 * return 256 * log10(10 + 1.0) + 0.5
1816 #if !defined (CONFIG_USER_ONLY)
1817 target_ulong helper_602_mfrom (target_ulong arg)
1819 if (likely(arg < 602)) {
1820 #include "mfrom_table.c"
1821 return mfrom_ROM_table[arg];
1822 } else {
1823 return 0;
1826 #endif
1828 /*****************************************************************************/
1829 /* Embedded PowerPC specific helpers */
1831 /* XXX: to be improved to check access rights when in user-mode */
1832 target_ulong helper_load_dcr (target_ulong dcrn)
1834 target_ulong val = 0;
1836 if (unlikely(env->dcr_env == NULL)) {
1837 qemu_log("No DCR environment\n");
1838 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1839 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1840 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1841 qemu_log("DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1842 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1843 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1845 return val;
1848 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1850 if (unlikely(env->dcr_env == NULL)) {
1851 qemu_log("No DCR environment\n");
1852 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1853 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1854 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1855 qemu_log("DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1856 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1857 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1861 #if !defined(CONFIG_USER_ONLY)
1862 void helper_40x_rfci (void)
1864 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1865 ~((target_ulong)0xFFFF0000), 0);
1868 void helper_rfci (void)
1870 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1871 ~((target_ulong)0x3FFF0000), 0);
1874 void helper_rfdi (void)
1876 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1877 ~((target_ulong)0x3FFF0000), 0);
1880 void helper_rfmci (void)
1882 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1883 ~((target_ulong)0x3FFF0000), 0);
1885 #endif
1887 /* 440 specific */
1888 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1890 target_ulong mask;
1891 int i;
1893 i = 1;
1894 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1895 if ((high & mask) == 0) {
1896 if (update_Rc) {
1897 env->crf[0] = 0x4;
1899 goto done;
1901 i++;
1903 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1904 if ((low & mask) == 0) {
1905 if (update_Rc) {
1906 env->crf[0] = 0x8;
1908 goto done;
1910 i++;
1912 if (update_Rc) {
1913 env->crf[0] = 0x2;
1915 done:
1916 env->xer = (env->xer & ~0x7F) | i;
1917 if (update_Rc) {
1918 env->crf[0] |= xer_so;
1920 return i;
1923 /*****************************************************************************/
1924 /* Altivec extension helpers */
1925 #if defined(WORDS_BIGENDIAN)
1926 #define HI_IDX 0
1927 #define LO_IDX 1
1928 #else
1929 #define HI_IDX 1
1930 #define LO_IDX 0
1931 #endif
1933 #if defined(WORDS_BIGENDIAN)
1934 #define VECTOR_FOR_INORDER_I(index, element) \
1935 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1936 #else
1937 #define VECTOR_FOR_INORDER_I(index, element) \
1938 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1939 #endif
1941 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1942 * execute the following block. */
1943 #define DO_HANDLE_NAN(result, x) \
1944 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1945 CPU_FloatU __f; \
1946 __f.f = x; \
1947 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1948 result = __f.f; \
1949 } else
1951 #define HANDLE_NAN1(result, x) \
1952 DO_HANDLE_NAN(result, x)
1953 #define HANDLE_NAN2(result, x, y) \
1954 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1955 #define HANDLE_NAN3(result, x, y, z) \
1956 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1958 /* Saturating arithmetic helpers. */
1959 #define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1960 static always_inline to_type cvt##from##to (from_type x, int *sat) \
1962 to_type r; \
1963 if (use_min && x < min) { \
1964 r = min; \
1965 *sat = 1; \
1966 } else if (use_max && x > max) { \
1967 r = max; \
1968 *sat = 1; \
1969 } else { \
1970 r = x; \
1972 return r; \
1974 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1975 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
1976 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
1977 SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
1978 SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
1979 SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
1980 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
1981 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
1982 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
1983 #undef SATCVT
1985 #define LVE(name, access, swap, element) \
1986 void helper_##name (ppc_avr_t *r, target_ulong addr) \
1988 size_t n_elems = ARRAY_SIZE(r->element); \
1989 int adjust = HI_IDX*(n_elems-1); \
1990 int sh = sizeof(r->element[0]) >> 1; \
1991 int index = (addr & 0xf) >> sh; \
1992 if(msr_le) { \
1993 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
1994 } else { \
1995 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
1998 #define I(x) (x)
1999 LVE(lvebx, ldub, I, u8)
2000 LVE(lvehx, lduw, bswap16, u16)
2001 LVE(lvewx, ldl, bswap32, u32)
2002 #undef I
2003 #undef LVE
2005 void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2007 int i, j = (sh & 0xf);
2009 VECTOR_FOR_INORDER_I (i, u8) {
2010 r->u8[i] = j++;
2014 void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2016 int i, j = 0x10 - (sh & 0xf);
2018 VECTOR_FOR_INORDER_I (i, u8) {
2019 r->u8[i] = j++;
2023 #define STVE(name, access, swap, element) \
2024 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2026 size_t n_elems = ARRAY_SIZE(r->element); \
2027 int adjust = HI_IDX*(n_elems-1); \
2028 int sh = sizeof(r->element[0]) >> 1; \
2029 int index = (addr & 0xf) >> sh; \
2030 if(msr_le) { \
2031 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2032 } else { \
2033 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2036 #define I(x) (x)
2037 STVE(stvebx, stb, I, u8)
2038 STVE(stvehx, stw, bswap16, u16)
2039 STVE(stvewx, stl, bswap32, u32)
2040 #undef I
2041 #undef LVE
2043 void helper_mtvscr (ppc_avr_t *r)
2045 #if defined(WORDS_BIGENDIAN)
2046 env->vscr = r->u32[3];
2047 #else
2048 env->vscr = r->u32[0];
2049 #endif
2050 set_flush_to_zero(vscr_nj, &env->vec_status);
2053 void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2055 int i;
2056 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2057 r->u32[i] = ~a->u32[i] < b->u32[i];
2061 #define VARITH_DO(name, op, element) \
2062 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2064 int i; \
2065 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2066 r->element[i] = a->element[i] op b->element[i]; \
2069 #define VARITH(suffix, element) \
2070 VARITH_DO(add##suffix, +, element) \
2071 VARITH_DO(sub##suffix, -, element)
2072 VARITH(ubm, u8)
2073 VARITH(uhm, u16)
2074 VARITH(uwm, u32)
2075 #undef VARITH_DO
2076 #undef VARITH
2078 #define VARITHFP(suffix, func) \
2079 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2081 int i; \
2082 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2083 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2084 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2088 VARITHFP(addfp, float32_add)
2089 VARITHFP(subfp, float32_sub)
2090 #undef VARITHFP
2092 #define VARITHSAT_CASE(type, op, cvt, element) \
2094 type result = (type)a->element[i] op (type)b->element[i]; \
2095 r->element[i] = cvt(result, &sat); \
2098 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2099 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2101 int sat = 0; \
2102 int i; \
2103 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2104 switch (sizeof(r->element[0])) { \
2105 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2106 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2107 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2110 if (sat) { \
2111 env->vscr |= (1 << VSCR_SAT); \
2114 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2115 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2116 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2117 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2118 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2119 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2120 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2121 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2122 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2123 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2124 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2125 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2126 #undef VARITHSAT_CASE
2127 #undef VARITHSAT_DO
2128 #undef VARITHSAT_SIGNED
2129 #undef VARITHSAT_UNSIGNED
2131 #define VAVG_DO(name, element, etype) \
2132 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2134 int i; \
2135 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2136 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2137 r->element[i] = x >> 1; \
2141 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2142 VAVG_DO(avgs##type, signed_element, signed_type) \
2143 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2144 VAVG(b, s8, int16_t, u8, uint16_t)
2145 VAVG(h, s16, int32_t, u16, uint32_t)
2146 VAVG(w, s32, int64_t, u32, uint64_t)
2147 #undef VAVG_DO
2148 #undef VAVG
2150 #define VCF(suffix, cvt, element) \
2151 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2153 int i; \
2154 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2155 float32 t = cvt(b->element[i], &env->vec_status); \
2156 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2159 VCF(ux, uint32_to_float32, u32)
2160 VCF(sx, int32_to_float32, s32)
2161 #undef VCF
2163 #define VCMP_DO(suffix, compare, element, record) \
2164 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2166 uint32_t ones = (uint32_t)-1; \
2167 uint32_t all = ones; \
2168 uint32_t none = 0; \
2169 int i; \
2170 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2171 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2172 switch (sizeof (a->element[0])) { \
2173 case 4: r->u32[i] = result; break; \
2174 case 2: r->u16[i] = result; break; \
2175 case 1: r->u8[i] = result; break; \
2177 all &= result; \
2178 none |= result; \
2180 if (record) { \
2181 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2184 #define VCMP(suffix, compare, element) \
2185 VCMP_DO(suffix, compare, element, 0) \
2186 VCMP_DO(suffix##_dot, compare, element, 1)
2187 VCMP(equb, ==, u8)
2188 VCMP(equh, ==, u16)
2189 VCMP(equw, ==, u32)
2190 VCMP(gtub, >, u8)
2191 VCMP(gtuh, >, u16)
2192 VCMP(gtuw, >, u32)
2193 VCMP(gtsb, >, s8)
2194 VCMP(gtsh, >, s16)
2195 VCMP(gtsw, >, s32)
2196 #undef VCMP_DO
2197 #undef VCMP
2199 #define VCMPFP_DO(suffix, compare, order, record) \
2200 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2202 uint32_t ones = (uint32_t)-1; \
2203 uint32_t all = ones; \
2204 uint32_t none = 0; \
2205 int i; \
2206 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2207 uint32_t result; \
2208 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2209 if (rel == float_relation_unordered) { \
2210 result = 0; \
2211 } else if (rel compare order) { \
2212 result = ones; \
2213 } else { \
2214 result = 0; \
2216 r->u32[i] = result; \
2217 all &= result; \
2218 none |= result; \
2220 if (record) { \
2221 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2224 #define VCMPFP(suffix, compare, order) \
2225 VCMPFP_DO(suffix, compare, order, 0) \
2226 VCMPFP_DO(suffix##_dot, compare, order, 1)
2227 VCMPFP(eqfp, ==, float_relation_equal)
2228 VCMPFP(gefp, !=, float_relation_less)
2229 VCMPFP(gtfp, ==, float_relation_greater)
2230 #undef VCMPFP_DO
2231 #undef VCMPFP
2233 static always_inline void vcmpbfp_internal (ppc_avr_t *r, ppc_avr_t *a,
2234 ppc_avr_t *b, int record)
2236 int i;
2237 int all_in = 0;
2238 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2239 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2240 if (le_rel == float_relation_unordered) {
2241 r->u32[i] = 0xc0000000;
2242 /* ALL_IN does not need to be updated here. */
2243 } else {
2244 float32 bneg = float32_chs(b->f[i]);
2245 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2246 int le = le_rel != float_relation_greater;
2247 int ge = ge_rel != float_relation_less;
2248 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2249 all_in |= (!le | !ge);
2252 if (record) {
2253 env->crf[6] = (all_in == 0) << 1;
2257 void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2259 vcmpbfp_internal(r, a, b, 0);
2262 void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2264 vcmpbfp_internal(r, a, b, 1);
2267 #define VCT(suffix, satcvt, element) \
2268 void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2270 int i; \
2271 int sat = 0; \
2272 float_status s = env->vec_status; \
2273 set_float_rounding_mode(float_round_to_zero, &s); \
2274 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2275 if (float32_is_nan(b->f[i]) || \
2276 float32_is_signaling_nan(b->f[i])) { \
2277 r->element[i] = 0; \
2278 } else { \
2279 float64 t = float32_to_float64(b->f[i], &s); \
2280 int64_t j; \
2281 t = float64_scalbn(t, uim, &s); \
2282 j = float64_to_int64(t, &s); \
2283 r->element[i] = satcvt(j, &sat); \
2286 if (sat) { \
2287 env->vscr |= (1 << VSCR_SAT); \
2290 VCT(uxs, cvtsduw, u32)
2291 VCT(sxs, cvtsdsw, s32)
2292 #undef VCT
2294 void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2296 int i;
2297 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2298 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2299 /* Need to do the computation in higher precision and round
2300 * once at the end. */
2301 float64 af, bf, cf, t;
2302 af = float32_to_float64(a->f[i], &env->vec_status);
2303 bf = float32_to_float64(b->f[i], &env->vec_status);
2304 cf = float32_to_float64(c->f[i], &env->vec_status);
2305 t = float64_mul(af, cf, &env->vec_status);
2306 t = float64_add(t, bf, &env->vec_status);
2307 r->f[i] = float64_to_float32(t, &env->vec_status);
2312 void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2314 int sat = 0;
2315 int i;
2317 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2318 int32_t prod = a->s16[i] * b->s16[i];
2319 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2320 r->s16[i] = cvtswsh (t, &sat);
2323 if (sat) {
2324 env->vscr |= (1 << VSCR_SAT);
2328 void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2330 int sat = 0;
2331 int i;
2333 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2334 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2335 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2336 r->s16[i] = cvtswsh (t, &sat);
2339 if (sat) {
2340 env->vscr |= (1 << VSCR_SAT);
2344 #define VMINMAX_DO(name, compare, element) \
2345 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2347 int i; \
2348 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2349 if (a->element[i] compare b->element[i]) { \
2350 r->element[i] = b->element[i]; \
2351 } else { \
2352 r->element[i] = a->element[i]; \
2356 #define VMINMAX(suffix, element) \
2357 VMINMAX_DO(min##suffix, >, element) \
2358 VMINMAX_DO(max##suffix, <, element)
2359 VMINMAX(sb, s8)
2360 VMINMAX(sh, s16)
2361 VMINMAX(sw, s32)
2362 VMINMAX(ub, u8)
2363 VMINMAX(uh, u16)
2364 VMINMAX(uw, u32)
2365 #undef VMINMAX_DO
2366 #undef VMINMAX
2368 #define VMINMAXFP(suffix, rT, rF) \
2369 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2371 int i; \
2372 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2373 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2374 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2375 r->f[i] = rT->f[i]; \
2376 } else { \
2377 r->f[i] = rF->f[i]; \
2382 VMINMAXFP(minfp, a, b)
2383 VMINMAXFP(maxfp, b, a)
2384 #undef VMINMAXFP
2386 void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2388 int i;
2389 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2390 int32_t prod = a->s16[i] * b->s16[i];
2391 r->s16[i] = (int16_t) (prod + c->s16[i]);
2395 #define VMRG_DO(name, element, highp) \
2396 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2398 ppc_avr_t result; \
2399 int i; \
2400 size_t n_elems = ARRAY_SIZE(r->element); \
2401 for (i = 0; i < n_elems/2; i++) { \
2402 if (highp) { \
2403 result.element[i*2+HI_IDX] = a->element[i]; \
2404 result.element[i*2+LO_IDX] = b->element[i]; \
2405 } else { \
2406 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2407 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2410 *r = result; \
2412 #if defined(WORDS_BIGENDIAN)
2413 #define MRGHI 0
2414 #define MRGLO 1
2415 #else
2416 #define MRGHI 1
2417 #define MRGLO 0
2418 #endif
2419 #define VMRG(suffix, element) \
2420 VMRG_DO(mrgl##suffix, element, MRGHI) \
2421 VMRG_DO(mrgh##suffix, element, MRGLO)
2422 VMRG(b, u8)
2423 VMRG(h, u16)
2424 VMRG(w, u32)
2425 #undef VMRG_DO
2426 #undef VMRG
2427 #undef MRGHI
2428 #undef MRGLO
2430 void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2432 int32_t prod[16];
2433 int i;
2435 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2436 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2439 VECTOR_FOR_INORDER_I(i, s32) {
2440 r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2444 void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2446 int32_t prod[8];
2447 int i;
2449 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2450 prod[i] = a->s16[i] * b->s16[i];
2453 VECTOR_FOR_INORDER_I(i, s32) {
2454 r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2458 void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2460 int32_t prod[8];
2461 int i;
2462 int sat = 0;
2464 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2465 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2468 VECTOR_FOR_INORDER_I (i, s32) {
2469 int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2470 r->u32[i] = cvtsdsw(t, &sat);
2473 if (sat) {
2474 env->vscr |= (1 << VSCR_SAT);
2478 void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2480 uint16_t prod[16];
2481 int i;
2483 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2484 prod[i] = a->u8[i] * b->u8[i];
2487 VECTOR_FOR_INORDER_I(i, u32) {
2488 r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2492 void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2494 uint32_t prod[8];
2495 int i;
2497 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2498 prod[i] = a->u16[i] * b->u16[i];
2501 VECTOR_FOR_INORDER_I(i, u32) {
2502 r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2506 void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2508 uint32_t prod[8];
2509 int i;
2510 int sat = 0;
2512 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2513 prod[i] = a->u16[i] * b->u16[i];
2516 VECTOR_FOR_INORDER_I (i, s32) {
2517 uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2518 r->u32[i] = cvtuduw(t, &sat);
2521 if (sat) {
2522 env->vscr |= (1 << VSCR_SAT);
2526 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2527 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2529 int i; \
2530 VECTOR_FOR_INORDER_I(i, prod_element) { \
2531 if (evenp) { \
2532 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2533 } else { \
2534 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2538 #define VMUL(suffix, mul_element, prod_element) \
2539 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2540 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2541 VMUL(sb, s8, s16)
2542 VMUL(sh, s16, s32)
2543 VMUL(ub, u8, u16)
2544 VMUL(uh, u16, u32)
2545 #undef VMUL_DO
2546 #undef VMUL
2548 void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2550 int i;
2551 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2552 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2553 /* Need to do the computation is higher precision and round
2554 * once at the end. */
2555 float64 af, bf, cf, t;
2556 af = float32_to_float64(a->f[i], &env->vec_status);
2557 bf = float32_to_float64(b->f[i], &env->vec_status);
2558 cf = float32_to_float64(c->f[i], &env->vec_status);
2559 t = float64_mul(af, cf, &env->vec_status);
2560 t = float64_sub(t, bf, &env->vec_status);
2561 t = float64_chs(t);
2562 r->f[i] = float64_to_float32(t, &env->vec_status);
2567 void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2569 ppc_avr_t result;
2570 int i;
2571 VECTOR_FOR_INORDER_I (i, u8) {
2572 int s = c->u8[i] & 0x1f;
2573 #if defined(WORDS_BIGENDIAN)
2574 int index = s & 0xf;
2575 #else
2576 int index = 15 - (s & 0xf);
2577 #endif
2578 if (s & 0x10) {
2579 result.u8[i] = b->u8[index];
2580 } else {
2581 result.u8[i] = a->u8[index];
2584 *r = result;
2587 #if defined(WORDS_BIGENDIAN)
2588 #define PKBIG 1
2589 #else
2590 #define PKBIG 0
2591 #endif
2592 void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2594 int i, j;
2595 ppc_avr_t result;
2596 #if defined(WORDS_BIGENDIAN)
2597 const ppc_avr_t *x[2] = { a, b };
2598 #else
2599 const ppc_avr_t *x[2] = { b, a };
2600 #endif
2602 VECTOR_FOR_INORDER_I (i, u64) {
2603 VECTOR_FOR_INORDER_I (j, u32){
2604 uint32_t e = x[i]->u32[j];
2605 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2606 ((e >> 6) & 0x3e0) |
2607 ((e >> 3) & 0x1f));
2610 *r = result;
2613 #define VPK(suffix, from, to, cvt, dosat) \
2614 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2616 int i; \
2617 int sat = 0; \
2618 ppc_avr_t result; \
2619 ppc_avr_t *a0 = PKBIG ? a : b; \
2620 ppc_avr_t *a1 = PKBIG ? b : a; \
2621 VECTOR_FOR_INORDER_I (i, from) { \
2622 result.to[i] = cvt(a0->from[i], &sat); \
2623 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2625 *r = result; \
2626 if (dosat && sat) { \
2627 env->vscr |= (1 << VSCR_SAT); \
2630 #define I(x, y) (x)
2631 VPK(shss, s16, s8, cvtshsb, 1)
2632 VPK(shus, s16, u8, cvtshub, 1)
2633 VPK(swss, s32, s16, cvtswsh, 1)
2634 VPK(swus, s32, u16, cvtswuh, 1)
2635 VPK(uhus, u16, u8, cvtuhub, 1)
2636 VPK(uwus, u32, u16, cvtuwuh, 1)
2637 VPK(uhum, u16, u8, I, 0)
2638 VPK(uwum, u32, u16, I, 0)
2639 #undef I
2640 #undef VPK
2641 #undef PKBIG
2643 void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2645 int i;
2646 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2647 HANDLE_NAN1(r->f[i], b->f[i]) {
2648 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2653 #define VRFI(suffix, rounding) \
2654 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2656 int i; \
2657 float_status s = env->vec_status; \
2658 set_float_rounding_mode(rounding, &s); \
2659 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2660 HANDLE_NAN1(r->f[i], b->f[i]) { \
2661 r->f[i] = float32_round_to_int (b->f[i], &s); \
2665 VRFI(n, float_round_nearest_even)
2666 VRFI(m, float_round_down)
2667 VRFI(p, float_round_up)
2668 VRFI(z, float_round_to_zero)
2669 #undef VRFI
2671 #define VROTATE(suffix, element) \
2672 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2674 int i; \
2675 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2676 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2677 unsigned int shift = b->element[i] & mask; \
2678 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2681 VROTATE(b, u8)
2682 VROTATE(h, u16)
2683 VROTATE(w, u32)
2684 #undef VROTATE
2686 void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2688 int i;
2689 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2690 HANDLE_NAN1(r->f[i], b->f[i]) {
2691 float32 t = float32_sqrt(b->f[i], &env->vec_status);
2692 r->f[i] = float32_div(float32_one, t, &env->vec_status);
2697 void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2699 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2700 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2703 void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2705 int i;
2706 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2707 HANDLE_NAN1(r->f[i], b->f[i]) {
2708 r->f[i] = float32_log2(b->f[i], &env->vec_status);
2713 #if defined(WORDS_BIGENDIAN)
2714 #define LEFT 0
2715 #define RIGHT 1
2716 #else
2717 #define LEFT 1
2718 #define RIGHT 0
2719 #endif
2720 /* The specification says that the results are undefined if all of the
2721 * shift counts are not identical. We check to make sure that they are
2722 * to conform to what real hardware appears to do. */
2723 #define VSHIFT(suffix, leftp) \
2724 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2726 int shift = b->u8[LO_IDX*0x15] & 0x7; \
2727 int doit = 1; \
2728 int i; \
2729 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2730 doit = doit && ((b->u8[i] & 0x7) == shift); \
2732 if (doit) { \
2733 if (shift == 0) { \
2734 *r = *a; \
2735 } else if (leftp) { \
2736 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2737 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2738 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2739 } else { \
2740 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2741 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2742 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2746 VSHIFT(l, LEFT)
2747 VSHIFT(r, RIGHT)
2748 #undef VSHIFT
2749 #undef LEFT
2750 #undef RIGHT
2752 #define VSL(suffix, element) \
2753 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2755 int i; \
2756 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2757 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2758 unsigned int shift = b->element[i] & mask; \
2759 r->element[i] = a->element[i] << shift; \
2762 VSL(b, u8)
2763 VSL(h, u16)
2764 VSL(w, u32)
2765 #undef VSL
2767 void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2769 int sh = shift & 0xf;
2770 int i;
2771 ppc_avr_t result;
2773 #if defined(WORDS_BIGENDIAN)
2774 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2775 int index = sh + i;
2776 if (index > 0xf) {
2777 result.u8[i] = b->u8[index-0x10];
2778 } else {
2779 result.u8[i] = a->u8[index];
2782 #else
2783 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2784 int index = (16 - sh) + i;
2785 if (index > 0xf) {
2786 result.u8[i] = a->u8[index-0x10];
2787 } else {
2788 result.u8[i] = b->u8[index];
2791 #endif
2792 *r = result;
2795 void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2797 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2799 #if defined (WORDS_BIGENDIAN)
2800 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2801 memset (&r->u8[16-sh], 0, sh);
2802 #else
2803 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2804 memset (&r->u8[0], 0, sh);
2805 #endif
2808 /* Experimental testing shows that hardware masks the immediate. */
2809 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2810 #if defined(WORDS_BIGENDIAN)
2811 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2812 #else
2813 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2814 #endif
2815 #define VSPLT(suffix, element) \
2816 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2818 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2819 int i; \
2820 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2821 r->element[i] = s; \
2824 VSPLT(b, u8)
2825 VSPLT(h, u16)
2826 VSPLT(w, u32)
2827 #undef VSPLT
2828 #undef SPLAT_ELEMENT
2829 #undef _SPLAT_MASKED
2831 #define VSPLTI(suffix, element, splat_type) \
2832 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2834 splat_type x = (int8_t)(splat << 3) >> 3; \
2835 int i; \
2836 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2837 r->element[i] = x; \
2840 VSPLTI(b, s8, int8_t)
2841 VSPLTI(h, s16, int16_t)
2842 VSPLTI(w, s32, int32_t)
2843 #undef VSPLTI
2845 #define VSR(suffix, element) \
2846 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2848 int i; \
2849 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2850 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2851 unsigned int shift = b->element[i] & mask; \
2852 r->element[i] = a->element[i] >> shift; \
2855 VSR(ab, s8)
2856 VSR(ah, s16)
2857 VSR(aw, s32)
2858 VSR(b, u8)
2859 VSR(h, u16)
2860 VSR(w, u32)
2861 #undef VSR
2863 void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2865 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2867 #if defined (WORDS_BIGENDIAN)
2868 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2869 memset (&r->u8[0], 0, sh);
2870 #else
2871 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2872 memset (&r->u8[16-sh], 0, sh);
2873 #endif
2876 void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2878 int i;
2879 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2880 r->u32[i] = a->u32[i] >= b->u32[i];
2884 void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2886 int64_t t;
2887 int i, upper;
2888 ppc_avr_t result;
2889 int sat = 0;
2891 #if defined(WORDS_BIGENDIAN)
2892 upper = ARRAY_SIZE(r->s32)-1;
2893 #else
2894 upper = 0;
2895 #endif
2896 t = (int64_t)b->s32[upper];
2897 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2898 t += a->s32[i];
2899 result.s32[i] = 0;
2901 result.s32[upper] = cvtsdsw(t, &sat);
2902 *r = result;
2904 if (sat) {
2905 env->vscr |= (1 << VSCR_SAT);
2909 void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2911 int i, j, upper;
2912 ppc_avr_t result;
2913 int sat = 0;
2915 #if defined(WORDS_BIGENDIAN)
2916 upper = 1;
2917 #else
2918 upper = 0;
2919 #endif
2920 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2921 int64_t t = (int64_t)b->s32[upper+i*2];
2922 result.u64[i] = 0;
2923 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2924 t += a->s32[2*i+j];
2926 result.s32[upper+i*2] = cvtsdsw(t, &sat);
2929 *r = result;
2930 if (sat) {
2931 env->vscr |= (1 << VSCR_SAT);
2935 void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2937 int i, j;
2938 int sat = 0;
2940 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2941 int64_t t = (int64_t)b->s32[i];
2942 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2943 t += a->s8[4*i+j];
2945 r->s32[i] = cvtsdsw(t, &sat);
2948 if (sat) {
2949 env->vscr |= (1 << VSCR_SAT);
2953 void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2955 int sat = 0;
2956 int i;
2958 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2959 int64_t t = (int64_t)b->s32[i];
2960 t += a->s16[2*i] + a->s16[2*i+1];
2961 r->s32[i] = cvtsdsw(t, &sat);
2964 if (sat) {
2965 env->vscr |= (1 << VSCR_SAT);
2969 void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2971 int i, j;
2972 int sat = 0;
2974 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2975 uint64_t t = (uint64_t)b->u32[i];
2976 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2977 t += a->u8[4*i+j];
2979 r->u32[i] = cvtuduw(t, &sat);
2982 if (sat) {
2983 env->vscr |= (1 << VSCR_SAT);
2987 #if defined(WORDS_BIGENDIAN)
2988 #define UPKHI 1
2989 #define UPKLO 0
2990 #else
2991 #define UPKHI 0
2992 #define UPKLO 1
2993 #endif
2994 #define VUPKPX(suffix, hi) \
2995 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2997 int i; \
2998 ppc_avr_t result; \
2999 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3000 uint16_t e = b->u16[hi ? i : i+4]; \
3001 uint8_t a = (e >> 15) ? 0xff : 0; \
3002 uint8_t r = (e >> 10) & 0x1f; \
3003 uint8_t g = (e >> 5) & 0x1f; \
3004 uint8_t b = e & 0x1f; \
3005 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3007 *r = result; \
3009 VUPKPX(lpx, UPKLO)
3010 VUPKPX(hpx, UPKHI)
3011 #undef VUPKPX
3013 #define VUPK(suffix, unpacked, packee, hi) \
3014 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3016 int i; \
3017 ppc_avr_t result; \
3018 if (hi) { \
3019 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3020 result.unpacked[i] = b->packee[i]; \
3022 } else { \
3023 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3024 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3027 *r = result; \
3029 VUPK(hsb, s16, s8, UPKHI)
3030 VUPK(hsh, s32, s16, UPKHI)
3031 VUPK(lsb, s16, s8, UPKLO)
3032 VUPK(lsh, s32, s16, UPKLO)
3033 #undef VUPK
3034 #undef UPKHI
3035 #undef UPKLO
3037 #undef DO_HANDLE_NAN
3038 #undef HANDLE_NAN1
3039 #undef HANDLE_NAN2
3040 #undef HANDLE_NAN3
3041 #undef VECTOR_FOR_INORDER_I
3042 #undef HI_IDX
3043 #undef LO_IDX
3045 /*****************************************************************************/
3046 /* SPE extension helpers */
3047 /* Use a table to make this quicker */
3048 static uint8_t hbrev[16] = {
3049 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3050 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3053 static always_inline uint8_t byte_reverse (uint8_t val)
3055 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3058 static always_inline uint32_t word_reverse (uint32_t val)
3060 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3061 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3064 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3065 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3067 uint32_t a, b, d, mask;
3069 mask = UINT32_MAX >> (32 - MASKBITS);
3070 a = arg1 & mask;
3071 b = arg2 & mask;
3072 d = word_reverse(1 + word_reverse(a | ~b));
3073 return (arg1 & ~mask) | (d & b);
3076 uint32_t helper_cntlsw32 (uint32_t val)
3078 if (val & 0x80000000)
3079 return clz32(~val);
3080 else
3081 return clz32(val);
3084 uint32_t helper_cntlzw32 (uint32_t val)
3086 return clz32(val);
3089 /* Single-precision floating-point conversions */
3090 static always_inline uint32_t efscfsi (uint32_t val)
3092 CPU_FloatU u;
3094 u.f = int32_to_float32(val, &env->vec_status);
3096 return u.l;
3099 static always_inline uint32_t efscfui (uint32_t val)
3101 CPU_FloatU u;
3103 u.f = uint32_to_float32(val, &env->vec_status);
3105 return u.l;
3108 static always_inline int32_t efsctsi (uint32_t val)
3110 CPU_FloatU u;
3112 u.l = val;
3113 /* NaN are not treated the same way IEEE 754 does */
3114 if (unlikely(float32_is_nan(u.f)))
3115 return 0;
3117 return float32_to_int32(u.f, &env->vec_status);
3120 static always_inline uint32_t efsctui (uint32_t val)
3122 CPU_FloatU u;
3124 u.l = val;
3125 /* NaN are not treated the same way IEEE 754 does */
3126 if (unlikely(float32_is_nan(u.f)))
3127 return 0;
3129 return float32_to_uint32(u.f, &env->vec_status);
3132 static always_inline uint32_t efsctsiz (uint32_t val)
3134 CPU_FloatU u;
3136 u.l = val;
3137 /* NaN are not treated the same way IEEE 754 does */
3138 if (unlikely(float32_is_nan(u.f)))
3139 return 0;
3141 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3144 static always_inline uint32_t efsctuiz (uint32_t val)
3146 CPU_FloatU u;
3148 u.l = val;
3149 /* NaN are not treated the same way IEEE 754 does */
3150 if (unlikely(float32_is_nan(u.f)))
3151 return 0;
3153 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3156 static always_inline uint32_t efscfsf (uint32_t val)
3158 CPU_FloatU u;
3159 float32 tmp;
3161 u.f = int32_to_float32(val, &env->vec_status);
3162 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3163 u.f = float32_div(u.f, tmp, &env->vec_status);
3165 return u.l;
3168 static always_inline uint32_t efscfuf (uint32_t val)
3170 CPU_FloatU u;
3171 float32 tmp;
3173 u.f = uint32_to_float32(val, &env->vec_status);
3174 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3175 u.f = float32_div(u.f, tmp, &env->vec_status);
3177 return u.l;
3180 static always_inline uint32_t efsctsf (uint32_t val)
3182 CPU_FloatU u;
3183 float32 tmp;
3185 u.l = val;
3186 /* NaN are not treated the same way IEEE 754 does */
3187 if (unlikely(float32_is_nan(u.f)))
3188 return 0;
3189 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3190 u.f = float32_mul(u.f, tmp, &env->vec_status);
3192 return float32_to_int32(u.f, &env->vec_status);
3195 static always_inline uint32_t efsctuf (uint32_t val)
3197 CPU_FloatU u;
3198 float32 tmp;
3200 u.l = val;
3201 /* NaN are not treated the same way IEEE 754 does */
3202 if (unlikely(float32_is_nan(u.f)))
3203 return 0;
3204 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3205 u.f = float32_mul(u.f, tmp, &env->vec_status);
3207 return float32_to_uint32(u.f, &env->vec_status);
3210 #define HELPER_SPE_SINGLE_CONV(name) \
3211 uint32_t helper_e##name (uint32_t val) \
3213 return e##name(val); \
3215 /* efscfsi */
3216 HELPER_SPE_SINGLE_CONV(fscfsi);
3217 /* efscfui */
3218 HELPER_SPE_SINGLE_CONV(fscfui);
3219 /* efscfuf */
3220 HELPER_SPE_SINGLE_CONV(fscfuf);
3221 /* efscfsf */
3222 HELPER_SPE_SINGLE_CONV(fscfsf);
3223 /* efsctsi */
3224 HELPER_SPE_SINGLE_CONV(fsctsi);
3225 /* efsctui */
3226 HELPER_SPE_SINGLE_CONV(fsctui);
3227 /* efsctsiz */
3228 HELPER_SPE_SINGLE_CONV(fsctsiz);
3229 /* efsctuiz */
3230 HELPER_SPE_SINGLE_CONV(fsctuiz);
3231 /* efsctsf */
3232 HELPER_SPE_SINGLE_CONV(fsctsf);
3233 /* efsctuf */
3234 HELPER_SPE_SINGLE_CONV(fsctuf);
3236 #define HELPER_SPE_VECTOR_CONV(name) \
3237 uint64_t helper_ev##name (uint64_t val) \
3239 return ((uint64_t)e##name(val >> 32) << 32) | \
3240 (uint64_t)e##name(val); \
3242 /* evfscfsi */
3243 HELPER_SPE_VECTOR_CONV(fscfsi);
3244 /* evfscfui */
3245 HELPER_SPE_VECTOR_CONV(fscfui);
3246 /* evfscfuf */
3247 HELPER_SPE_VECTOR_CONV(fscfuf);
3248 /* evfscfsf */
3249 HELPER_SPE_VECTOR_CONV(fscfsf);
3250 /* evfsctsi */
3251 HELPER_SPE_VECTOR_CONV(fsctsi);
3252 /* evfsctui */
3253 HELPER_SPE_VECTOR_CONV(fsctui);
3254 /* evfsctsiz */
3255 HELPER_SPE_VECTOR_CONV(fsctsiz);
3256 /* evfsctuiz */
3257 HELPER_SPE_VECTOR_CONV(fsctuiz);
3258 /* evfsctsf */
3259 HELPER_SPE_VECTOR_CONV(fsctsf);
3260 /* evfsctuf */
3261 HELPER_SPE_VECTOR_CONV(fsctuf);
3263 /* Single-precision floating-point arithmetic */
3264 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
3266 CPU_FloatU u1, u2;
3267 u1.l = op1;
3268 u2.l = op2;
3269 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3270 return u1.l;
3273 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
3275 CPU_FloatU u1, u2;
3276 u1.l = op1;
3277 u2.l = op2;
3278 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3279 return u1.l;
3282 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
3284 CPU_FloatU u1, u2;
3285 u1.l = op1;
3286 u2.l = op2;
3287 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3288 return u1.l;
3291 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
3293 CPU_FloatU u1, u2;
3294 u1.l = op1;
3295 u2.l = op2;
3296 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3297 return u1.l;
3300 #define HELPER_SPE_SINGLE_ARITH(name) \
3301 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3303 return e##name(op1, op2); \
3305 /* efsadd */
3306 HELPER_SPE_SINGLE_ARITH(fsadd);
3307 /* efssub */
3308 HELPER_SPE_SINGLE_ARITH(fssub);
3309 /* efsmul */
3310 HELPER_SPE_SINGLE_ARITH(fsmul);
3311 /* efsdiv */
3312 HELPER_SPE_SINGLE_ARITH(fsdiv);
3314 #define HELPER_SPE_VECTOR_ARITH(name) \
3315 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3317 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3318 (uint64_t)e##name(op1, op2); \
3320 /* evfsadd */
3321 HELPER_SPE_VECTOR_ARITH(fsadd);
3322 /* evfssub */
3323 HELPER_SPE_VECTOR_ARITH(fssub);
3324 /* evfsmul */
3325 HELPER_SPE_VECTOR_ARITH(fsmul);
3326 /* evfsdiv */
3327 HELPER_SPE_VECTOR_ARITH(fsdiv);
3329 /* Single-precision floating-point comparisons */
3330 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
3332 CPU_FloatU u1, u2;
3333 u1.l = op1;
3334 u2.l = op2;
3335 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3338 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
3340 CPU_FloatU u1, u2;
3341 u1.l = op1;
3342 u2.l = op2;
3343 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3346 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
3348 CPU_FloatU u1, u2;
3349 u1.l = op1;
3350 u2.l = op2;
3351 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3354 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
3356 /* XXX: TODO: test special values (NaN, infinites, ...) */
3357 return efststlt(op1, op2);
3360 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
3362 /* XXX: TODO: test special values (NaN, infinites, ...) */
3363 return efststgt(op1, op2);
3366 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
3368 /* XXX: TODO: test special values (NaN, infinites, ...) */
3369 return efststeq(op1, op2);
3372 #define HELPER_SINGLE_SPE_CMP(name) \
3373 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3375 return e##name(op1, op2) << 2; \
3377 /* efststlt */
3378 HELPER_SINGLE_SPE_CMP(fststlt);
3379 /* efststgt */
3380 HELPER_SINGLE_SPE_CMP(fststgt);
3381 /* efststeq */
3382 HELPER_SINGLE_SPE_CMP(fststeq);
3383 /* efscmplt */
3384 HELPER_SINGLE_SPE_CMP(fscmplt);
3385 /* efscmpgt */
3386 HELPER_SINGLE_SPE_CMP(fscmpgt);
3387 /* efscmpeq */
3388 HELPER_SINGLE_SPE_CMP(fscmpeq);
3390 static always_inline uint32_t evcmp_merge (int t0, int t1)
3392 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3395 #define HELPER_VECTOR_SPE_CMP(name) \
3396 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3398 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3400 /* evfststlt */
3401 HELPER_VECTOR_SPE_CMP(fststlt);
3402 /* evfststgt */
3403 HELPER_VECTOR_SPE_CMP(fststgt);
3404 /* evfststeq */
3405 HELPER_VECTOR_SPE_CMP(fststeq);
3406 /* evfscmplt */
3407 HELPER_VECTOR_SPE_CMP(fscmplt);
3408 /* evfscmpgt */
3409 HELPER_VECTOR_SPE_CMP(fscmpgt);
3410 /* evfscmpeq */
3411 HELPER_VECTOR_SPE_CMP(fscmpeq);
3413 /* Double-precision floating-point conversion */
3414 uint64_t helper_efdcfsi (uint32_t val)
3416 CPU_DoubleU u;
3418 u.d = int32_to_float64(val, &env->vec_status);
3420 return u.ll;
3423 uint64_t helper_efdcfsid (uint64_t val)
3425 CPU_DoubleU u;
3427 u.d = int64_to_float64(val, &env->vec_status);
3429 return u.ll;
3432 uint64_t helper_efdcfui (uint32_t val)
3434 CPU_DoubleU u;
3436 u.d = uint32_to_float64(val, &env->vec_status);
3438 return u.ll;
3441 uint64_t helper_efdcfuid (uint64_t val)
3443 CPU_DoubleU u;
3445 u.d = uint64_to_float64(val, &env->vec_status);
3447 return u.ll;
3450 uint32_t helper_efdctsi (uint64_t val)
3452 CPU_DoubleU u;
3454 u.ll = val;
3455 /* NaN are not treated the same way IEEE 754 does */
3456 if (unlikely(float64_is_nan(u.d)))
3457 return 0;
3459 return float64_to_int32(u.d, &env->vec_status);
3462 uint32_t helper_efdctui (uint64_t val)
3464 CPU_DoubleU u;
3466 u.ll = val;
3467 /* NaN are not treated the same way IEEE 754 does */
3468 if (unlikely(float64_is_nan(u.d)))
3469 return 0;
3471 return float64_to_uint32(u.d, &env->vec_status);
3474 uint32_t helper_efdctsiz (uint64_t val)
3476 CPU_DoubleU u;
3478 u.ll = val;
3479 /* NaN are not treated the same way IEEE 754 does */
3480 if (unlikely(float64_is_nan(u.d)))
3481 return 0;
3483 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3486 uint64_t helper_efdctsidz (uint64_t val)
3488 CPU_DoubleU u;
3490 u.ll = val;
3491 /* NaN are not treated the same way IEEE 754 does */
3492 if (unlikely(float64_is_nan(u.d)))
3493 return 0;
3495 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3498 uint32_t helper_efdctuiz (uint64_t val)
3500 CPU_DoubleU u;
3502 u.ll = val;
3503 /* NaN are not treated the same way IEEE 754 does */
3504 if (unlikely(float64_is_nan(u.d)))
3505 return 0;
3507 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3510 uint64_t helper_efdctuidz (uint64_t val)
3512 CPU_DoubleU u;
3514 u.ll = val;
3515 /* NaN are not treated the same way IEEE 754 does */
3516 if (unlikely(float64_is_nan(u.d)))
3517 return 0;
3519 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3522 uint64_t helper_efdcfsf (uint32_t val)
3524 CPU_DoubleU u;
3525 float64 tmp;
3527 u.d = int32_to_float64(val, &env->vec_status);
3528 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3529 u.d = float64_div(u.d, tmp, &env->vec_status);
3531 return u.ll;
3534 uint64_t helper_efdcfuf (uint32_t val)
3536 CPU_DoubleU u;
3537 float64 tmp;
3539 u.d = uint32_to_float64(val, &env->vec_status);
3540 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3541 u.d = float64_div(u.d, tmp, &env->vec_status);
3543 return u.ll;
3546 uint32_t helper_efdctsf (uint64_t val)
3548 CPU_DoubleU u;
3549 float64 tmp;
3551 u.ll = val;
3552 /* NaN are not treated the same way IEEE 754 does */
3553 if (unlikely(float64_is_nan(u.d)))
3554 return 0;
3555 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3556 u.d = float64_mul(u.d, tmp, &env->vec_status);
3558 return float64_to_int32(u.d, &env->vec_status);
3561 uint32_t helper_efdctuf (uint64_t val)
3563 CPU_DoubleU u;
3564 float64 tmp;
3566 u.ll = val;
3567 /* NaN are not treated the same way IEEE 754 does */
3568 if (unlikely(float64_is_nan(u.d)))
3569 return 0;
3570 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3571 u.d = float64_mul(u.d, tmp, &env->vec_status);
3573 return float64_to_uint32(u.d, &env->vec_status);
3576 uint32_t helper_efscfd (uint64_t val)
3578 CPU_DoubleU u1;
3579 CPU_FloatU u2;
3581 u1.ll = val;
3582 u2.f = float64_to_float32(u1.d, &env->vec_status);
3584 return u2.l;
3587 uint64_t helper_efdcfs (uint32_t val)
3589 CPU_DoubleU u2;
3590 CPU_FloatU u1;
3592 u1.l = val;
3593 u2.d = float32_to_float64(u1.f, &env->vec_status);
3595 return u2.ll;
3598 /* Double precision fixed-point arithmetic */
3599 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3601 CPU_DoubleU u1, u2;
3602 u1.ll = op1;
3603 u2.ll = op2;
3604 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3605 return u1.ll;
3608 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3610 CPU_DoubleU u1, u2;
3611 u1.ll = op1;
3612 u2.ll = op2;
3613 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3614 return u1.ll;
3617 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3619 CPU_DoubleU u1, u2;
3620 u1.ll = op1;
3621 u2.ll = op2;
3622 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3623 return u1.ll;
3626 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3628 CPU_DoubleU u1, u2;
3629 u1.ll = op1;
3630 u2.ll = op2;
3631 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3632 return u1.ll;
3635 /* Double precision floating point helpers */
3636 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3638 CPU_DoubleU u1, u2;
3639 u1.ll = op1;
3640 u2.ll = op2;
3641 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3644 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3646 CPU_DoubleU u1, u2;
3647 u1.ll = op1;
3648 u2.ll = op2;
3649 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3652 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3654 CPU_DoubleU u1, u2;
3655 u1.ll = op1;
3656 u2.ll = op2;
3657 return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3660 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3662 /* XXX: TODO: test special values (NaN, infinites, ...) */
3663 return helper_efdtstlt(op1, op2);
3666 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3668 /* XXX: TODO: test special values (NaN, infinites, ...) */
3669 return helper_efdtstgt(op1, op2);
3672 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3674 /* XXX: TODO: test special values (NaN, infinites, ...) */
3675 return helper_efdtsteq(op1, op2);
3678 /*****************************************************************************/
3679 /* Softmmu support */
3680 #if !defined (CONFIG_USER_ONLY)
3682 #define MMUSUFFIX _mmu
3684 #define SHIFT 0
3685 #include "softmmu_template.h"
3687 #define SHIFT 1
3688 #include "softmmu_template.h"
3690 #define SHIFT 2
3691 #include "softmmu_template.h"
3693 #define SHIFT 3
3694 #include "softmmu_template.h"
3696 /* try to fill the TLB and return an exception if error. If retaddr is
3697 NULL, it means that the function was called in C code (i.e. not
3698 from generated code or from helper.c) */
3699 /* XXX: fix it to restore all registers */
3700 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3702 TranslationBlock *tb;
3703 CPUState *saved_env;
3704 unsigned long pc;
3705 int ret;
3707 /* XXX: hack to restore env in all cases, even if not called from
3708 generated code */
3709 saved_env = env;
3710 env = cpu_single_env;
3711 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3712 if (unlikely(ret != 0)) {
3713 if (likely(retaddr)) {
3714 /* now we have a real cpu fault */
3715 pc = (unsigned long)retaddr;
3716 tb = tb_find_pc(pc);
3717 if (likely(tb)) {
3718 /* the PC is inside the translated code. It means that we have
3719 a virtual CPU fault */
3720 cpu_restore_state(tb, env, pc, NULL);
3723 helper_raise_exception_err(env->exception_index, env->error_code);
3725 env = saved_env;
3728 /* Segment registers load and store */
3729 target_ulong helper_load_sr (target_ulong sr_num)
3731 #if defined(TARGET_PPC64)
3732 if (env->mmu_model & POWERPC_MMU_64)
3733 return ppc_load_sr(env, sr_num);
3734 #endif
3735 return env->sr[sr_num];
3738 void helper_store_sr (target_ulong sr_num, target_ulong val)
3740 ppc_store_sr(env, sr_num, val);
3743 /* SLB management */
3744 #if defined(TARGET_PPC64)
3745 target_ulong helper_load_slb (target_ulong slb_nr)
3747 return ppc_load_slb(env, slb_nr);
3750 void helper_store_slb (target_ulong rb, target_ulong rs)
3752 ppc_store_slb(env, rb, rs);
3755 void helper_slbia (void)
3757 ppc_slb_invalidate_all(env);
3760 void helper_slbie (target_ulong addr)
3762 ppc_slb_invalidate_one(env, addr);
3765 #endif /* defined(TARGET_PPC64) */
3767 /* TLB management */
3768 void helper_tlbia (void)
3770 ppc_tlb_invalidate_all(env);
3773 void helper_tlbie (target_ulong addr)
3775 ppc_tlb_invalidate_one(env, addr);
3778 /* Software driven TLBs management */
3779 /* PowerPC 602/603 software TLB load instructions helpers */
3780 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3782 target_ulong RPN, CMP, EPN;
3783 int way;
3785 RPN = env->spr[SPR_RPA];
3786 if (is_code) {
3787 CMP = env->spr[SPR_ICMP];
3788 EPN = env->spr[SPR_IMISS];
3789 } else {
3790 CMP = env->spr[SPR_DCMP];
3791 EPN = env->spr[SPR_DMISS];
3793 way = (env->spr[SPR_SRR1] >> 17) & 1;
3794 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3795 " PTE1 " ADDRX " way %d\n",
3796 __func__, new_EPN, EPN, CMP, RPN, way);
3797 /* Store this TLB */
3798 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3799 way, is_code, CMP, RPN);
3802 void helper_6xx_tlbd (target_ulong EPN)
3804 do_6xx_tlb(EPN, 0);
3807 void helper_6xx_tlbi (target_ulong EPN)
3809 do_6xx_tlb(EPN, 1);
3812 /* PowerPC 74xx software TLB load instructions helpers */
3813 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3815 target_ulong RPN, CMP, EPN;
3816 int way;
3818 RPN = env->spr[SPR_PTELO];
3819 CMP = env->spr[SPR_PTEHI];
3820 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3821 way = env->spr[SPR_TLBMISS] & 0x3;
3822 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3823 " PTE1 " ADDRX " way %d\n",
3824 __func__, new_EPN, EPN, CMP, RPN, way);
3825 /* Store this TLB */
3826 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3827 way, is_code, CMP, RPN);
3830 void helper_74xx_tlbd (target_ulong EPN)
3832 do_74xx_tlb(EPN, 0);
3835 void helper_74xx_tlbi (target_ulong EPN)
3837 do_74xx_tlb(EPN, 1);
3840 static always_inline target_ulong booke_tlb_to_page_size (int size)
3842 return 1024 << (2 * size);
3845 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3847 int size;
3849 switch (page_size) {
3850 case 0x00000400UL:
3851 size = 0x0;
3852 break;
3853 case 0x00001000UL:
3854 size = 0x1;
3855 break;
3856 case 0x00004000UL:
3857 size = 0x2;
3858 break;
3859 case 0x00010000UL:
3860 size = 0x3;
3861 break;
3862 case 0x00040000UL:
3863 size = 0x4;
3864 break;
3865 case 0x00100000UL:
3866 size = 0x5;
3867 break;
3868 case 0x00400000UL:
3869 size = 0x6;
3870 break;
3871 case 0x01000000UL:
3872 size = 0x7;
3873 break;
3874 case 0x04000000UL:
3875 size = 0x8;
3876 break;
3877 case 0x10000000UL:
3878 size = 0x9;
3879 break;
3880 case 0x40000000UL:
3881 size = 0xA;
3882 break;
3883 #if defined (TARGET_PPC64)
3884 case 0x000100000000ULL:
3885 size = 0xB;
3886 break;
3887 case 0x000400000000ULL:
3888 size = 0xC;
3889 break;
3890 case 0x001000000000ULL:
3891 size = 0xD;
3892 break;
3893 case 0x004000000000ULL:
3894 size = 0xE;
3895 break;
3896 case 0x010000000000ULL:
3897 size = 0xF;
3898 break;
3899 #endif
3900 default:
3901 size = -1;
3902 break;
3905 return size;
3908 /* Helpers for 4xx TLB management */
3909 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3911 ppcemb_tlb_t *tlb;
3912 target_ulong ret;
3913 int size;
3915 entry &= 0x3F;
3916 tlb = &env->tlb[entry].tlbe;
3917 ret = tlb->EPN;
3918 if (tlb->prot & PAGE_VALID)
3919 ret |= 0x400;
3920 size = booke_page_size_to_tlb(tlb->size);
3921 if (size < 0 || size > 0x7)
3922 size = 1;
3923 ret |= size << 7;
3924 env->spr[SPR_40x_PID] = tlb->PID;
3925 return ret;
3928 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3930 ppcemb_tlb_t *tlb;
3931 target_ulong ret;
3933 entry &= 0x3F;
3934 tlb = &env->tlb[entry].tlbe;
3935 ret = tlb->RPN;
3936 if (tlb->prot & PAGE_EXEC)
3937 ret |= 0x200;
3938 if (tlb->prot & PAGE_WRITE)
3939 ret |= 0x100;
3940 return ret;
3943 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3945 ppcemb_tlb_t *tlb;
3946 target_ulong page, end;
3948 LOG_SWTLB("%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3949 entry &= 0x3F;
3950 tlb = &env->tlb[entry].tlbe;
3951 /* Invalidate previous TLB (if it's valid) */
3952 if (tlb->prot & PAGE_VALID) {
3953 end = tlb->EPN + tlb->size;
3954 LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
3955 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3956 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3957 tlb_flush_page(env, page);
3959 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3960 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3961 * If this ever occurs, one should use the ppcemb target instead
3962 * of the ppc or ppc64 one
3964 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3965 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3966 "are not supported (%d)\n",
3967 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3969 tlb->EPN = val & ~(tlb->size - 1);
3970 if (val & 0x40)
3971 tlb->prot |= PAGE_VALID;
3972 else
3973 tlb->prot &= ~PAGE_VALID;
3974 if (val & 0x20) {
3975 /* XXX: TO BE FIXED */
3976 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3978 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3979 tlb->attr = val & 0xFF;
3980 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3981 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3982 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3983 tlb->prot & PAGE_READ ? 'r' : '-',
3984 tlb->prot & PAGE_WRITE ? 'w' : '-',
3985 tlb->prot & PAGE_EXEC ? 'x' : '-',
3986 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3987 /* Invalidate new TLB (if valid) */
3988 if (tlb->prot & PAGE_VALID) {
3989 end = tlb->EPN + tlb->size;
3990 LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
3991 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3992 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3993 tlb_flush_page(env, page);
3997 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3999 ppcemb_tlb_t *tlb;
4001 LOG_SWTLB("%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
4002 entry &= 0x3F;
4003 tlb = &env->tlb[entry].tlbe;
4004 tlb->RPN = val & 0xFFFFFC00;
4005 tlb->prot = PAGE_READ;
4006 if (val & 0x200)
4007 tlb->prot |= PAGE_EXEC;
4008 if (val & 0x100)
4009 tlb->prot |= PAGE_WRITE;
4010 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
4011 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
4012 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4013 tlb->prot & PAGE_READ ? 'r' : '-',
4014 tlb->prot & PAGE_WRITE ? 'w' : '-',
4015 tlb->prot & PAGE_EXEC ? 'x' : '-',
4016 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4019 target_ulong helper_4xx_tlbsx (target_ulong address)
4021 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4024 /* PowerPC 440 TLB management */
4025 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4027 ppcemb_tlb_t *tlb;
4028 target_ulong EPN, RPN, size;
4029 int do_flush_tlbs;
4031 LOG_SWTLB("%s word %d entry %d value " ADDRX "\n",
4032 __func__, word, (int)entry, value);
4033 do_flush_tlbs = 0;
4034 entry &= 0x3F;
4035 tlb = &env->tlb[entry].tlbe;
4036 switch (word) {
4037 default:
4038 /* Just here to please gcc */
4039 case 0:
4040 EPN = value & 0xFFFFFC00;
4041 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4042 do_flush_tlbs = 1;
4043 tlb->EPN = EPN;
4044 size = booke_tlb_to_page_size((value >> 4) & 0xF);
4045 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4046 do_flush_tlbs = 1;
4047 tlb->size = size;
4048 tlb->attr &= ~0x1;
4049 tlb->attr |= (value >> 8) & 1;
4050 if (value & 0x200) {
4051 tlb->prot |= PAGE_VALID;
4052 } else {
4053 if (tlb->prot & PAGE_VALID) {
4054 tlb->prot &= ~PAGE_VALID;
4055 do_flush_tlbs = 1;
4058 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4059 if (do_flush_tlbs)
4060 tlb_flush(env, 1);
4061 break;
4062 case 1:
4063 RPN = value & 0xFFFFFC0F;
4064 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4065 tlb_flush(env, 1);
4066 tlb->RPN = RPN;
4067 break;
4068 case 2:
4069 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4070 tlb->prot = tlb->prot & PAGE_VALID;
4071 if (value & 0x1)
4072 tlb->prot |= PAGE_READ << 4;
4073 if (value & 0x2)
4074 tlb->prot |= PAGE_WRITE << 4;
4075 if (value & 0x4)
4076 tlb->prot |= PAGE_EXEC << 4;
4077 if (value & 0x8)
4078 tlb->prot |= PAGE_READ;
4079 if (value & 0x10)
4080 tlb->prot |= PAGE_WRITE;
4081 if (value & 0x20)
4082 tlb->prot |= PAGE_EXEC;
4083 break;
4087 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4089 ppcemb_tlb_t *tlb;
4090 target_ulong ret;
4091 int size;
4093 entry &= 0x3F;
4094 tlb = &env->tlb[entry].tlbe;
4095 switch (word) {
4096 default:
4097 /* Just here to please gcc */
4098 case 0:
4099 ret = tlb->EPN;
4100 size = booke_page_size_to_tlb(tlb->size);
4101 if (size < 0 || size > 0xF)
4102 size = 1;
4103 ret |= size << 4;
4104 if (tlb->attr & 0x1)
4105 ret |= 0x100;
4106 if (tlb->prot & PAGE_VALID)
4107 ret |= 0x200;
4108 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4109 env->spr[SPR_440_MMUCR] |= tlb->PID;
4110 break;
4111 case 1:
4112 ret = tlb->RPN;
4113 break;
4114 case 2:
4115 ret = tlb->attr & ~0x1;
4116 if (tlb->prot & (PAGE_READ << 4))
4117 ret |= 0x1;
4118 if (tlb->prot & (PAGE_WRITE << 4))
4119 ret |= 0x2;
4120 if (tlb->prot & (PAGE_EXEC << 4))
4121 ret |= 0x4;
4122 if (tlb->prot & PAGE_READ)
4123 ret |= 0x8;
4124 if (tlb->prot & PAGE_WRITE)
4125 ret |= 0x10;
4126 if (tlb->prot & PAGE_EXEC)
4127 ret |= 0x20;
4128 break;
4130 return ret;
4133 target_ulong helper_440_tlbsx (target_ulong address)
4135 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4138 #endif /* !CONFIG_USER_ONLY */