blockdev: check dinfo ptr before using
[qemu.git] / target-ppc / op_helper.c
blobf32a5fffd6078797e7eef80fa90ab0497e61e1ce
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <string.h>
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "helper.h"
24 #include "helper_regs.h"
26 //#define DEBUG_OP
27 //#define DEBUG_EXCEPTIONS
28 //#define DEBUG_SOFTWARE_TLB
30 #ifdef DEBUG_SOFTWARE_TLB
31 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
32 #else
33 # define LOG_SWTLB(...) do { } while (0)
34 #endif
37 /*****************************************************************************/
38 /* Exceptions processing helpers */
40 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
42 #if 0
43 printf("Raise exception %3x code : %d\n", exception, error_code);
44 #endif
45 env->exception_index = exception;
46 env->error_code = error_code;
47 cpu_loop_exit();
50 void helper_raise_exception (uint32_t exception)
52 helper_raise_exception_err(exception, 0);
55 /*****************************************************************************/
56 /* SPR accesses */
57 void helper_load_dump_spr (uint32_t sprn)
59 qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
60 env->spr[sprn]);
63 void helper_store_dump_spr (uint32_t sprn)
65 qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
66 env->spr[sprn]);
69 target_ulong helper_load_tbl (void)
71 return (target_ulong)cpu_ppc_load_tbl(env);
74 target_ulong helper_load_tbu (void)
76 return cpu_ppc_load_tbu(env);
79 target_ulong helper_load_atbl (void)
81 return (target_ulong)cpu_ppc_load_atbl(env);
84 target_ulong helper_load_atbu (void)
86 return cpu_ppc_load_atbu(env);
89 target_ulong helper_load_601_rtcl (void)
91 return cpu_ppc601_load_rtcl(env);
94 target_ulong helper_load_601_rtcu (void)
96 return cpu_ppc601_load_rtcu(env);
99 #if !defined(CONFIG_USER_ONLY)
100 #if defined (TARGET_PPC64)
101 void helper_store_asr (target_ulong val)
103 ppc_store_asr(env, val);
105 #endif
107 void helper_store_sdr1 (target_ulong val)
109 ppc_store_sdr1(env, val);
112 void helper_store_tbl (target_ulong val)
114 cpu_ppc_store_tbl(env, val);
117 void helper_store_tbu (target_ulong val)
119 cpu_ppc_store_tbu(env, val);
122 void helper_store_atbl (target_ulong val)
124 cpu_ppc_store_atbl(env, val);
127 void helper_store_atbu (target_ulong val)
129 cpu_ppc_store_atbu(env, val);
132 void helper_store_601_rtcl (target_ulong val)
134 cpu_ppc601_store_rtcl(env, val);
137 void helper_store_601_rtcu (target_ulong val)
139 cpu_ppc601_store_rtcu(env, val);
142 target_ulong helper_load_decr (void)
144 return cpu_ppc_load_decr(env);
147 void helper_store_decr (target_ulong val)
149 cpu_ppc_store_decr(env, val);
152 void helper_store_hid0_601 (target_ulong val)
154 target_ulong hid0;
156 hid0 = env->spr[SPR_HID0];
157 if ((val ^ hid0) & 0x00000008) {
158 /* Change current endianness */
159 env->hflags &= ~(1 << MSR_LE);
160 env->hflags_nmsr &= ~(1 << MSR_LE);
161 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
162 env->hflags |= env->hflags_nmsr;
163 qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
164 val & 0x8 ? 'l' : 'b', env->hflags);
166 env->spr[SPR_HID0] = (uint32_t)val;
169 void helper_store_403_pbr (uint32_t num, target_ulong value)
171 if (likely(env->pb[num] != value)) {
172 env->pb[num] = value;
173 /* Should be optimized */
174 tlb_flush(env, 1);
178 target_ulong helper_load_40x_pit (void)
180 return load_40x_pit(env);
183 void helper_store_40x_pit (target_ulong val)
185 store_40x_pit(env, val);
188 void helper_store_40x_dbcr0 (target_ulong val)
190 store_40x_dbcr0(env, val);
193 void helper_store_40x_sler (target_ulong val)
195 store_40x_sler(env, val);
198 void helper_store_booke_tcr (target_ulong val)
200 store_booke_tcr(env, val);
203 void helper_store_booke_tsr (target_ulong val)
205 store_booke_tsr(env, val);
208 void helper_store_ibatu (uint32_t nr, target_ulong val)
210 ppc_store_ibatu(env, nr, val);
213 void helper_store_ibatl (uint32_t nr, target_ulong val)
215 ppc_store_ibatl(env, nr, val);
218 void helper_store_dbatu (uint32_t nr, target_ulong val)
220 ppc_store_dbatu(env, nr, val);
223 void helper_store_dbatl (uint32_t nr, target_ulong val)
225 ppc_store_dbatl(env, nr, val);
228 void helper_store_601_batl (uint32_t nr, target_ulong val)
230 ppc_store_ibatl_601(env, nr, val);
233 void helper_store_601_batu (uint32_t nr, target_ulong val)
235 ppc_store_ibatu_601(env, nr, val);
237 #endif
239 /*****************************************************************************/
240 /* Memory load and stores */
242 static inline target_ulong addr_add(target_ulong addr, target_long arg)
244 #if defined(TARGET_PPC64)
245 if (!msr_sf)
246 return (uint32_t)(addr + arg);
247 else
248 #endif
249 return addr + arg;
252 void helper_lmw (target_ulong addr, uint32_t reg)
254 for (; reg < 32; reg++) {
255 if (msr_le)
256 env->gpr[reg] = bswap32(ldl(addr));
257 else
258 env->gpr[reg] = ldl(addr);
259 addr = addr_add(addr, 4);
263 void helper_stmw (target_ulong addr, uint32_t reg)
265 for (; reg < 32; reg++) {
266 if (msr_le)
267 stl(addr, bswap32((uint32_t)env->gpr[reg]));
268 else
269 stl(addr, (uint32_t)env->gpr[reg]);
270 addr = addr_add(addr, 4);
274 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
276 int sh;
277 for (; nb > 3; nb -= 4) {
278 env->gpr[reg] = ldl(addr);
279 reg = (reg + 1) % 32;
280 addr = addr_add(addr, 4);
282 if (unlikely(nb > 0)) {
283 env->gpr[reg] = 0;
284 for (sh = 24; nb > 0; nb--, sh -= 8) {
285 env->gpr[reg] |= ldub(addr) << sh;
286 addr = addr_add(addr, 1);
290 /* PPC32 specification says we must generate an exception if
291 * rA is in the range of registers to be loaded.
292 * In an other hand, IBM says this is valid, but rA won't be loaded.
293 * For now, I'll follow the spec...
295 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
297 if (likely(xer_bc != 0)) {
298 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
299 (reg < rb && (reg + xer_bc) > rb))) {
300 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
301 POWERPC_EXCP_INVAL |
302 POWERPC_EXCP_INVAL_LSWX);
303 } else {
304 helper_lsw(addr, xer_bc, reg);
309 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
311 int sh;
312 for (; nb > 3; nb -= 4) {
313 stl(addr, env->gpr[reg]);
314 reg = (reg + 1) % 32;
315 addr = addr_add(addr, 4);
317 if (unlikely(nb > 0)) {
318 for (sh = 24; nb > 0; nb--, sh -= 8) {
319 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
320 addr = addr_add(addr, 1);
325 static void do_dcbz(target_ulong addr, int dcache_line_size)
327 addr &= ~(dcache_line_size - 1);
328 int i;
329 for (i = 0 ; i < dcache_line_size ; i += 4) {
330 stl(addr + i , 0);
332 if (env->reserve_addr == addr)
333 env->reserve_addr = (target_ulong)-1ULL;
336 void helper_dcbz(target_ulong addr)
338 do_dcbz(addr, env->dcache_line_size);
341 void helper_dcbz_970(target_ulong addr)
343 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
344 do_dcbz(addr, 32);
345 else
346 do_dcbz(addr, env->dcache_line_size);
349 void helper_icbi(target_ulong addr)
351 addr &= ~(env->dcache_line_size - 1);
352 /* Invalidate one cache line :
353 * PowerPC specification says this is to be treated like a load
354 * (not a fetch) by the MMU. To be sure it will be so,
355 * do the load "by hand".
357 ldl(addr);
358 tb_invalidate_page_range(addr, addr + env->icache_line_size);
361 // XXX: to be tested
362 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
364 int i, c, d;
365 d = 24;
366 for (i = 0; i < xer_bc; i++) {
367 c = ldub(addr);
368 addr = addr_add(addr, 1);
369 /* ra (if not 0) and rb are never modified */
370 if (likely(reg != rb && (ra == 0 || reg != ra))) {
371 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
373 if (unlikely(c == xer_cmp))
374 break;
375 if (likely(d != 0)) {
376 d -= 8;
377 } else {
378 d = 24;
379 reg++;
380 reg = reg & 0x1F;
383 return i;
386 /*****************************************************************************/
387 /* Fixed point operations helpers */
388 #if defined(TARGET_PPC64)
390 /* multiply high word */
391 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
393 uint64_t tl, th;
395 muls64(&tl, &th, arg1, arg2);
396 return th;
399 /* multiply high word unsigned */
400 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
402 uint64_t tl, th;
404 mulu64(&tl, &th, arg1, arg2);
405 return th;
408 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
410 int64_t th;
411 uint64_t tl;
413 muls64(&tl, (uint64_t *)&th, arg1, arg2);
414 /* If th != 0 && th != -1, then we had an overflow */
415 if (likely((uint64_t)(th + 1) <= 1)) {
416 env->xer &= ~(1 << XER_OV);
417 } else {
418 env->xer |= (1 << XER_OV) | (1 << XER_SO);
420 return (int64_t)tl;
422 #endif
424 target_ulong helper_cntlzw (target_ulong t)
426 return clz32(t);
429 #if defined(TARGET_PPC64)
430 target_ulong helper_cntlzd (target_ulong t)
432 return clz64(t);
434 #endif
436 /* shift right arithmetic helper */
437 target_ulong helper_sraw (target_ulong value, target_ulong shift)
439 int32_t ret;
441 if (likely(!(shift & 0x20))) {
442 if (likely((uint32_t)shift != 0)) {
443 shift &= 0x1f;
444 ret = (int32_t)value >> shift;
445 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
446 env->xer &= ~(1 << XER_CA);
447 } else {
448 env->xer |= (1 << XER_CA);
450 } else {
451 ret = (int32_t)value;
452 env->xer &= ~(1 << XER_CA);
454 } else {
455 ret = (int32_t)value >> 31;
456 if (ret) {
457 env->xer |= (1 << XER_CA);
458 } else {
459 env->xer &= ~(1 << XER_CA);
462 return (target_long)ret;
465 #if defined(TARGET_PPC64)
466 target_ulong helper_srad (target_ulong value, target_ulong shift)
468 int64_t ret;
470 if (likely(!(shift & 0x40))) {
471 if (likely((uint64_t)shift != 0)) {
472 shift &= 0x3f;
473 ret = (int64_t)value >> shift;
474 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
475 env->xer &= ~(1 << XER_CA);
476 } else {
477 env->xer |= (1 << XER_CA);
479 } else {
480 ret = (int64_t)value;
481 env->xer &= ~(1 << XER_CA);
483 } else {
484 ret = (int64_t)value >> 63;
485 if (ret) {
486 env->xer |= (1 << XER_CA);
487 } else {
488 env->xer &= ~(1 << XER_CA);
491 return ret;
493 #endif
495 target_ulong helper_popcntb (target_ulong val)
497 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
498 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
499 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
500 return val;
503 #if defined(TARGET_PPC64)
504 target_ulong helper_popcntb_64 (target_ulong val)
506 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
507 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
508 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
509 return val;
511 #endif
513 /*****************************************************************************/
514 /* Floating point operations helpers */
515 uint64_t helper_float32_to_float64(uint32_t arg)
517 CPU_FloatU f;
518 CPU_DoubleU d;
519 f.l = arg;
520 d.d = float32_to_float64(f.f, &env->fp_status);
521 return d.ll;
524 uint32_t helper_float64_to_float32(uint64_t arg)
526 CPU_FloatU f;
527 CPU_DoubleU d;
528 d.ll = arg;
529 f.f = float64_to_float32(d.d, &env->fp_status);
530 return f.l;
533 static inline int isden(float64 d)
535 CPU_DoubleU u;
537 u.d = d;
539 return ((u.ll >> 52) & 0x7FF) == 0;
542 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
544 CPU_DoubleU farg;
545 int isneg;
546 int ret;
547 farg.ll = arg;
548 isneg = float64_is_neg(farg.d);
549 if (unlikely(float64_is_nan(farg.d))) {
550 if (float64_is_signaling_nan(farg.d)) {
551 /* Signaling NaN: flags are undefined */
552 ret = 0x00;
553 } else {
554 /* Quiet NaN */
555 ret = 0x11;
557 } else if (unlikely(float64_is_infinity(farg.d))) {
558 /* +/- infinity */
559 if (isneg)
560 ret = 0x09;
561 else
562 ret = 0x05;
563 } else {
564 if (float64_is_zero(farg.d)) {
565 /* +/- zero */
566 if (isneg)
567 ret = 0x12;
568 else
569 ret = 0x02;
570 } else {
571 if (isden(farg.d)) {
572 /* Denormalized numbers */
573 ret = 0x10;
574 } else {
575 /* Normalized numbers */
576 ret = 0x00;
578 if (isneg) {
579 ret |= 0x08;
580 } else {
581 ret |= 0x04;
585 if (set_fprf) {
586 /* We update FPSCR_FPRF */
587 env->fpscr &= ~(0x1F << FPSCR_FPRF);
588 env->fpscr |= ret << FPSCR_FPRF;
590 /* We just need fpcc to update Rc1 */
591 return ret & 0xF;
594 /* Floating-point invalid operations exception */
595 static inline uint64_t fload_invalid_op_excp(int op)
597 uint64_t ret = 0;
598 int ve;
600 ve = fpscr_ve;
601 switch (op) {
602 case POWERPC_EXCP_FP_VXSNAN:
603 env->fpscr |= 1 << FPSCR_VXSNAN;
604 break;
605 case POWERPC_EXCP_FP_VXSOFT:
606 env->fpscr |= 1 << FPSCR_VXSOFT;
607 break;
608 case POWERPC_EXCP_FP_VXISI:
609 /* Magnitude subtraction of infinities */
610 env->fpscr |= 1 << FPSCR_VXISI;
611 goto update_arith;
612 case POWERPC_EXCP_FP_VXIDI:
613 /* Division of infinity by infinity */
614 env->fpscr |= 1 << FPSCR_VXIDI;
615 goto update_arith;
616 case POWERPC_EXCP_FP_VXZDZ:
617 /* Division of zero by zero */
618 env->fpscr |= 1 << FPSCR_VXZDZ;
619 goto update_arith;
620 case POWERPC_EXCP_FP_VXIMZ:
621 /* Multiplication of zero by infinity */
622 env->fpscr |= 1 << FPSCR_VXIMZ;
623 goto update_arith;
624 case POWERPC_EXCP_FP_VXVC:
625 /* Ordered comparison of NaN */
626 env->fpscr |= 1 << FPSCR_VXVC;
627 env->fpscr &= ~(0xF << FPSCR_FPCC);
628 env->fpscr |= 0x11 << FPSCR_FPCC;
629 /* We must update the target FPR before raising the exception */
630 if (ve != 0) {
631 env->exception_index = POWERPC_EXCP_PROGRAM;
632 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
633 /* Update the floating-point enabled exception summary */
634 env->fpscr |= 1 << FPSCR_FEX;
635 /* Exception is differed */
636 ve = 0;
638 break;
639 case POWERPC_EXCP_FP_VXSQRT:
640 /* Square root of a negative number */
641 env->fpscr |= 1 << FPSCR_VXSQRT;
642 update_arith:
643 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
644 if (ve == 0) {
645 /* Set the result to quiet NaN */
646 ret = 0xFFF8000000000000ULL;
647 env->fpscr &= ~(0xF << FPSCR_FPCC);
648 env->fpscr |= 0x11 << FPSCR_FPCC;
650 break;
651 case POWERPC_EXCP_FP_VXCVI:
652 /* Invalid conversion */
653 env->fpscr |= 1 << FPSCR_VXCVI;
654 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
655 if (ve == 0) {
656 /* Set the result to quiet NaN */
657 ret = 0xFFF8000000000000ULL;
658 env->fpscr &= ~(0xF << FPSCR_FPCC);
659 env->fpscr |= 0x11 << FPSCR_FPCC;
661 break;
663 /* Update the floating-point invalid operation summary */
664 env->fpscr |= 1 << FPSCR_VX;
665 /* Update the floating-point exception summary */
666 env->fpscr |= 1 << FPSCR_FX;
667 if (ve != 0) {
668 /* Update the floating-point enabled exception summary */
669 env->fpscr |= 1 << FPSCR_FEX;
670 if (msr_fe0 != 0 || msr_fe1 != 0)
671 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
673 return ret;
676 static inline void float_zero_divide_excp(void)
678 env->fpscr |= 1 << FPSCR_ZX;
679 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
680 /* Update the floating-point exception summary */
681 env->fpscr |= 1 << FPSCR_FX;
682 if (fpscr_ze != 0) {
683 /* Update the floating-point enabled exception summary */
684 env->fpscr |= 1 << FPSCR_FEX;
685 if (msr_fe0 != 0 || msr_fe1 != 0) {
686 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
687 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
692 static inline void float_overflow_excp(void)
694 env->fpscr |= 1 << FPSCR_OX;
695 /* Update the floating-point exception summary */
696 env->fpscr |= 1 << FPSCR_FX;
697 if (fpscr_oe != 0) {
698 /* XXX: should adjust the result */
699 /* Update the floating-point enabled exception summary */
700 env->fpscr |= 1 << FPSCR_FEX;
701 /* We must update the target FPR before raising the exception */
702 env->exception_index = POWERPC_EXCP_PROGRAM;
703 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
704 } else {
705 env->fpscr |= 1 << FPSCR_XX;
706 env->fpscr |= 1 << FPSCR_FI;
710 static inline void float_underflow_excp(void)
712 env->fpscr |= 1 << FPSCR_UX;
713 /* Update the floating-point exception summary */
714 env->fpscr |= 1 << FPSCR_FX;
715 if (fpscr_ue != 0) {
716 /* XXX: should adjust the result */
717 /* Update the floating-point enabled exception summary */
718 env->fpscr |= 1 << FPSCR_FEX;
719 /* We must update the target FPR before raising the exception */
720 env->exception_index = POWERPC_EXCP_PROGRAM;
721 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
725 static inline void float_inexact_excp(void)
727 env->fpscr |= 1 << FPSCR_XX;
728 /* Update the floating-point exception summary */
729 env->fpscr |= 1 << FPSCR_FX;
730 if (fpscr_xe != 0) {
731 /* Update the floating-point enabled exception summary */
732 env->fpscr |= 1 << FPSCR_FEX;
733 /* We must update the target FPR before raising the exception */
734 env->exception_index = POWERPC_EXCP_PROGRAM;
735 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
739 static inline void fpscr_set_rounding_mode(void)
741 int rnd_type;
743 /* Set rounding mode */
744 switch (fpscr_rn) {
745 case 0:
746 /* Best approximation (round to nearest) */
747 rnd_type = float_round_nearest_even;
748 break;
749 case 1:
750 /* Smaller magnitude (round toward zero) */
751 rnd_type = float_round_to_zero;
752 break;
753 case 2:
754 /* Round toward +infinite */
755 rnd_type = float_round_up;
756 break;
757 default:
758 case 3:
759 /* Round toward -infinite */
760 rnd_type = float_round_down;
761 break;
763 set_float_rounding_mode(rnd_type, &env->fp_status);
766 void helper_fpscr_clrbit (uint32_t bit)
768 int prev;
770 prev = (env->fpscr >> bit) & 1;
771 env->fpscr &= ~(1 << bit);
772 if (prev == 1) {
773 switch (bit) {
774 case FPSCR_RN1:
775 case FPSCR_RN:
776 fpscr_set_rounding_mode();
777 break;
778 default:
779 break;
784 void helper_fpscr_setbit (uint32_t bit)
786 int prev;
788 prev = (env->fpscr >> bit) & 1;
789 env->fpscr |= 1 << bit;
790 if (prev == 0) {
791 switch (bit) {
792 case FPSCR_VX:
793 env->fpscr |= 1 << FPSCR_FX;
794 if (fpscr_ve)
795 goto raise_ve;
796 case FPSCR_OX:
797 env->fpscr |= 1 << FPSCR_FX;
798 if (fpscr_oe)
799 goto raise_oe;
800 break;
801 case FPSCR_UX:
802 env->fpscr |= 1 << FPSCR_FX;
803 if (fpscr_ue)
804 goto raise_ue;
805 break;
806 case FPSCR_ZX:
807 env->fpscr |= 1 << FPSCR_FX;
808 if (fpscr_ze)
809 goto raise_ze;
810 break;
811 case FPSCR_XX:
812 env->fpscr |= 1 << FPSCR_FX;
813 if (fpscr_xe)
814 goto raise_xe;
815 break;
816 case FPSCR_VXSNAN:
817 case FPSCR_VXISI:
818 case FPSCR_VXIDI:
819 case FPSCR_VXZDZ:
820 case FPSCR_VXIMZ:
821 case FPSCR_VXVC:
822 case FPSCR_VXSOFT:
823 case FPSCR_VXSQRT:
824 case FPSCR_VXCVI:
825 env->fpscr |= 1 << FPSCR_VX;
826 env->fpscr |= 1 << FPSCR_FX;
827 if (fpscr_ve != 0)
828 goto raise_ve;
829 break;
830 case FPSCR_VE:
831 if (fpscr_vx != 0) {
832 raise_ve:
833 env->error_code = POWERPC_EXCP_FP;
834 if (fpscr_vxsnan)
835 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
836 if (fpscr_vxisi)
837 env->error_code |= POWERPC_EXCP_FP_VXISI;
838 if (fpscr_vxidi)
839 env->error_code |= POWERPC_EXCP_FP_VXIDI;
840 if (fpscr_vxzdz)
841 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
842 if (fpscr_vximz)
843 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
844 if (fpscr_vxvc)
845 env->error_code |= POWERPC_EXCP_FP_VXVC;
846 if (fpscr_vxsoft)
847 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
848 if (fpscr_vxsqrt)
849 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
850 if (fpscr_vxcvi)
851 env->error_code |= POWERPC_EXCP_FP_VXCVI;
852 goto raise_excp;
854 break;
855 case FPSCR_OE:
856 if (fpscr_ox != 0) {
857 raise_oe:
858 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
859 goto raise_excp;
861 break;
862 case FPSCR_UE:
863 if (fpscr_ux != 0) {
864 raise_ue:
865 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
866 goto raise_excp;
868 break;
869 case FPSCR_ZE:
870 if (fpscr_zx != 0) {
871 raise_ze:
872 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
873 goto raise_excp;
875 break;
876 case FPSCR_XE:
877 if (fpscr_xx != 0) {
878 raise_xe:
879 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
880 goto raise_excp;
882 break;
883 case FPSCR_RN1:
884 case FPSCR_RN:
885 fpscr_set_rounding_mode();
886 break;
887 default:
888 break;
889 raise_excp:
890 /* Update the floating-point enabled exception summary */
891 env->fpscr |= 1 << FPSCR_FEX;
892 /* We have to update Rc1 before raising the exception */
893 env->exception_index = POWERPC_EXCP_PROGRAM;
894 break;
899 void helper_store_fpscr (uint64_t arg, uint32_t mask)
902 * We use only the 32 LSB of the incoming fpr
904 uint32_t prev, new;
905 int i;
907 prev = env->fpscr;
908 new = (uint32_t)arg;
909 new &= ~0x60000000;
910 new |= prev & 0x60000000;
911 for (i = 0; i < 8; i++) {
912 if (mask & (1 << i)) {
913 env->fpscr &= ~(0xF << (4 * i));
914 env->fpscr |= new & (0xF << (4 * i));
917 /* Update VX and FEX */
918 if (fpscr_ix != 0)
919 env->fpscr |= 1 << FPSCR_VX;
920 else
921 env->fpscr &= ~(1 << FPSCR_VX);
922 if ((fpscr_ex & fpscr_eex) != 0) {
923 env->fpscr |= 1 << FPSCR_FEX;
924 env->exception_index = POWERPC_EXCP_PROGRAM;
925 /* XXX: we should compute it properly */
926 env->error_code = POWERPC_EXCP_FP;
928 else
929 env->fpscr &= ~(1 << FPSCR_FEX);
930 fpscr_set_rounding_mode();
933 void helper_float_check_status (void)
935 #ifdef CONFIG_SOFTFLOAT
936 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
937 (env->error_code & POWERPC_EXCP_FP)) {
938 /* Differred floating-point exception after target FPR update */
939 if (msr_fe0 != 0 || msr_fe1 != 0)
940 helper_raise_exception_err(env->exception_index, env->error_code);
941 } else {
942 int status = get_float_exception_flags(&env->fp_status);
943 if (status & float_flag_divbyzero) {
944 float_zero_divide_excp();
945 } else if (status & float_flag_overflow) {
946 float_overflow_excp();
947 } else if (status & float_flag_underflow) {
948 float_underflow_excp();
949 } else if (status & float_flag_inexact) {
950 float_inexact_excp();
953 #else
954 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
955 (env->error_code & POWERPC_EXCP_FP)) {
956 /* Differred floating-point exception after target FPR update */
957 if (msr_fe0 != 0 || msr_fe1 != 0)
958 helper_raise_exception_err(env->exception_index, env->error_code);
960 #endif
963 #ifdef CONFIG_SOFTFLOAT
964 void helper_reset_fpstatus (void)
966 set_float_exception_flags(0, &env->fp_status);
968 #endif
970 /* fadd - fadd. */
971 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
973 CPU_DoubleU farg1, farg2;
975 farg1.ll = arg1;
976 farg2.ll = arg2;
977 #if USE_PRECISE_EMULATION
978 if (unlikely(float64_is_signaling_nan(farg1.d) ||
979 float64_is_signaling_nan(farg2.d))) {
980 /* sNaN addition */
981 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
982 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
983 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
984 /* Magnitude subtraction of infinities */
985 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
986 } else {
987 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
989 #else
990 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
991 #endif
992 return farg1.ll;
995 /* fsub - fsub. */
996 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
998 CPU_DoubleU farg1, farg2;
1000 farg1.ll = arg1;
1001 farg2.ll = arg2;
1002 #if USE_PRECISE_EMULATION
1004 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1005 float64_is_signaling_nan(farg2.d))) {
1006 /* sNaN subtraction */
1007 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1008 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1009 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1010 /* Magnitude subtraction of infinities */
1011 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1012 } else {
1013 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1016 #else
1017 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1018 #endif
1019 return farg1.ll;
1022 /* fmul - fmul. */
1023 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1025 CPU_DoubleU farg1, farg2;
1027 farg1.ll = arg1;
1028 farg2.ll = arg2;
1029 #if USE_PRECISE_EMULATION
1030 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1031 float64_is_signaling_nan(farg2.d))) {
1032 /* sNaN multiplication */
1033 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1034 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1035 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1036 /* Multiplication of zero by infinity */
1037 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1038 } else {
1039 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1041 #else
1042 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1043 #endif
1044 return farg1.ll;
1047 /* fdiv - fdiv. */
1048 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1050 CPU_DoubleU farg1, farg2;
1052 farg1.ll = arg1;
1053 farg2.ll = arg2;
1054 #if USE_PRECISE_EMULATION
1055 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1056 float64_is_signaling_nan(farg2.d))) {
1057 /* sNaN division */
1058 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1059 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1060 /* Division of infinity by infinity */
1061 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1062 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1063 /* Division of zero by zero */
1064 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1065 } else {
1066 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1068 #else
1069 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1070 #endif
1071 return farg1.ll;
1074 /* fabs */
1075 uint64_t helper_fabs (uint64_t arg)
1077 CPU_DoubleU farg;
1079 farg.ll = arg;
1080 farg.d = float64_abs(farg.d);
1081 return farg.ll;
1084 /* fnabs */
1085 uint64_t helper_fnabs (uint64_t arg)
1087 CPU_DoubleU farg;
1089 farg.ll = arg;
1090 farg.d = float64_abs(farg.d);
1091 farg.d = float64_chs(farg.d);
1092 return farg.ll;
1095 /* fneg */
1096 uint64_t helper_fneg (uint64_t arg)
1098 CPU_DoubleU farg;
1100 farg.ll = arg;
1101 farg.d = float64_chs(farg.d);
1102 return farg.ll;
1105 /* fctiw - fctiw. */
1106 uint64_t helper_fctiw (uint64_t arg)
1108 CPU_DoubleU farg;
1109 farg.ll = arg;
1111 if (unlikely(float64_is_signaling_nan(farg.d))) {
1112 /* sNaN conversion */
1113 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1114 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1115 /* qNan / infinity conversion */
1116 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1117 } else {
1118 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1119 #if USE_PRECISE_EMULATION
1120 /* XXX: higher bits are not supposed to be significant.
1121 * to make tests easier, return the same as a real PowerPC 750
1123 farg.ll |= 0xFFF80000ULL << 32;
1124 #endif
1126 return farg.ll;
1129 /* fctiwz - fctiwz. */
1130 uint64_t helper_fctiwz (uint64_t arg)
1132 CPU_DoubleU farg;
1133 farg.ll = arg;
1135 if (unlikely(float64_is_signaling_nan(farg.d))) {
1136 /* sNaN conversion */
1137 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1138 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1139 /* qNan / infinity conversion */
1140 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1141 } else {
1142 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1143 #if USE_PRECISE_EMULATION
1144 /* XXX: higher bits are not supposed to be significant.
1145 * to make tests easier, return the same as a real PowerPC 750
1147 farg.ll |= 0xFFF80000ULL << 32;
1148 #endif
1150 return farg.ll;
1153 #if defined(TARGET_PPC64)
1154 /* fcfid - fcfid. */
1155 uint64_t helper_fcfid (uint64_t arg)
1157 CPU_DoubleU farg;
1158 farg.d = int64_to_float64(arg, &env->fp_status);
1159 return farg.ll;
1162 /* fctid - fctid. */
1163 uint64_t helper_fctid (uint64_t arg)
1165 CPU_DoubleU farg;
1166 farg.ll = arg;
1168 if (unlikely(float64_is_signaling_nan(farg.d))) {
1169 /* sNaN conversion */
1170 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1171 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1172 /* qNan / infinity conversion */
1173 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1174 } else {
1175 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1177 return farg.ll;
1180 /* fctidz - fctidz. */
1181 uint64_t helper_fctidz (uint64_t arg)
1183 CPU_DoubleU farg;
1184 farg.ll = arg;
1186 if (unlikely(float64_is_signaling_nan(farg.d))) {
1187 /* sNaN conversion */
1188 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1189 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1190 /* qNan / infinity conversion */
1191 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1192 } else {
1193 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1195 return farg.ll;
1198 #endif
1200 static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1202 CPU_DoubleU farg;
1203 farg.ll = arg;
1205 if (unlikely(float64_is_signaling_nan(farg.d))) {
1206 /* sNaN round */
1207 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1208 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1209 /* qNan / infinity round */
1210 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1211 } else {
1212 set_float_rounding_mode(rounding_mode, &env->fp_status);
1213 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1214 /* Restore rounding mode from FPSCR */
1215 fpscr_set_rounding_mode();
1217 return farg.ll;
1220 uint64_t helper_frin (uint64_t arg)
1222 return do_fri(arg, float_round_nearest_even);
1225 uint64_t helper_friz (uint64_t arg)
1227 return do_fri(arg, float_round_to_zero);
1230 uint64_t helper_frip (uint64_t arg)
1232 return do_fri(arg, float_round_up);
1235 uint64_t helper_frim (uint64_t arg)
1237 return do_fri(arg, float_round_down);
1240 /* fmadd - fmadd. */
1241 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1243 CPU_DoubleU farg1, farg2, farg3;
1245 farg1.ll = arg1;
1246 farg2.ll = arg2;
1247 farg3.ll = arg3;
1248 #if USE_PRECISE_EMULATION
1249 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1250 float64_is_signaling_nan(farg2.d) ||
1251 float64_is_signaling_nan(farg3.d))) {
1252 /* sNaN operation */
1253 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1254 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1255 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1256 /* Multiplication of zero by infinity */
1257 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1258 } else {
1259 #ifdef FLOAT128
1260 /* This is the way the PowerPC specification defines it */
1261 float128 ft0_128, ft1_128;
1263 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1264 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1265 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1266 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1267 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1268 /* Magnitude subtraction of infinities */
1269 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1270 } else {
1271 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1272 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1273 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1275 #else
1276 /* This is OK on x86 hosts */
1277 farg1.d = (farg1.d * farg2.d) + farg3.d;
1278 #endif
1280 #else
1281 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1282 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1283 #endif
1284 return farg1.ll;
1287 /* fmsub - fmsub. */
1288 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1290 CPU_DoubleU farg1, farg2, farg3;
1292 farg1.ll = arg1;
1293 farg2.ll = arg2;
1294 farg3.ll = arg3;
1295 #if USE_PRECISE_EMULATION
1296 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1297 float64_is_signaling_nan(farg2.d) ||
1298 float64_is_signaling_nan(farg3.d))) {
1299 /* sNaN operation */
1300 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1301 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1302 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1303 /* Multiplication of zero by infinity */
1304 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1305 } else {
1306 #ifdef FLOAT128
1307 /* This is the way the PowerPC specification defines it */
1308 float128 ft0_128, ft1_128;
1310 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1311 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1312 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1313 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1314 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1315 /* Magnitude subtraction of infinities */
1316 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1317 } else {
1318 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1319 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1320 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1322 #else
1323 /* This is OK on x86 hosts */
1324 farg1.d = (farg1.d * farg2.d) - farg3.d;
1325 #endif
1327 #else
1328 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1329 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1330 #endif
1331 return farg1.ll;
1334 /* fnmadd - fnmadd. */
1335 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1337 CPU_DoubleU farg1, farg2, farg3;
1339 farg1.ll = arg1;
1340 farg2.ll = arg2;
1341 farg3.ll = arg3;
1343 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1344 float64_is_signaling_nan(farg2.d) ||
1345 float64_is_signaling_nan(farg3.d))) {
1346 /* sNaN operation */
1347 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1348 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1349 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1350 /* Multiplication of zero by infinity */
1351 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1352 } else {
1353 #if USE_PRECISE_EMULATION
1354 #ifdef FLOAT128
1355 /* This is the way the PowerPC specification defines it */
1356 float128 ft0_128, ft1_128;
1358 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1359 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1360 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1361 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1362 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1363 /* Magnitude subtraction of infinities */
1364 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1365 } else {
1366 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1367 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1368 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1370 #else
1371 /* This is OK on x86 hosts */
1372 farg1.d = (farg1.d * farg2.d) + farg3.d;
1373 #endif
1374 #else
1375 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1376 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1377 #endif
1378 if (likely(!float64_is_nan(farg1.d)))
1379 farg1.d = float64_chs(farg1.d);
1381 return farg1.ll;
1384 /* fnmsub - fnmsub. */
1385 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1387 CPU_DoubleU farg1, farg2, farg3;
1389 farg1.ll = arg1;
1390 farg2.ll = arg2;
1391 farg3.ll = arg3;
1393 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1394 float64_is_signaling_nan(farg2.d) ||
1395 float64_is_signaling_nan(farg3.d))) {
1396 /* sNaN operation */
1397 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1398 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1399 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1400 /* Multiplication of zero by infinity */
1401 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1402 } else {
1403 #if USE_PRECISE_EMULATION
1404 #ifdef FLOAT128
1405 /* This is the way the PowerPC specification defines it */
1406 float128 ft0_128, ft1_128;
1408 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1409 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1410 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1411 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1412 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1413 /* Magnitude subtraction of infinities */
1414 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1415 } else {
1416 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1417 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1418 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1420 #else
1421 /* This is OK on x86 hosts */
1422 farg1.d = (farg1.d * farg2.d) - farg3.d;
1423 #endif
1424 #else
1425 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1426 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1427 #endif
1428 if (likely(!float64_is_nan(farg1.d)))
1429 farg1.d = float64_chs(farg1.d);
1431 return farg1.ll;
1434 /* frsp - frsp. */
1435 uint64_t helper_frsp (uint64_t arg)
1437 CPU_DoubleU farg;
1438 float32 f32;
1439 farg.ll = arg;
1441 #if USE_PRECISE_EMULATION
1442 if (unlikely(float64_is_signaling_nan(farg.d))) {
1443 /* sNaN square root */
1444 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1445 } else {
1446 f32 = float64_to_float32(farg.d, &env->fp_status);
1447 farg.d = float32_to_float64(f32, &env->fp_status);
1449 #else
1450 f32 = float64_to_float32(farg.d, &env->fp_status);
1451 farg.d = float32_to_float64(f32, &env->fp_status);
1452 #endif
1453 return farg.ll;
1456 /* fsqrt - fsqrt. */
1457 uint64_t helper_fsqrt (uint64_t arg)
1459 CPU_DoubleU farg;
1460 farg.ll = arg;
1462 if (unlikely(float64_is_signaling_nan(farg.d))) {
1463 /* sNaN square root */
1464 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1465 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1466 /* Square root of a negative nonzero number */
1467 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1468 } else {
1469 farg.d = float64_sqrt(farg.d, &env->fp_status);
1471 return farg.ll;
1474 /* fre - fre. */
1475 uint64_t helper_fre (uint64_t arg)
1477 CPU_DoubleU farg;
1478 farg.ll = arg;
1480 if (unlikely(float64_is_signaling_nan(farg.d))) {
1481 /* sNaN reciprocal */
1482 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1483 } else {
1484 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1486 return farg.d;
1489 /* fres - fres. */
1490 uint64_t helper_fres (uint64_t arg)
1492 CPU_DoubleU farg;
1493 float32 f32;
1494 farg.ll = arg;
1496 if (unlikely(float64_is_signaling_nan(farg.d))) {
1497 /* sNaN reciprocal */
1498 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1499 } else {
1500 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1501 f32 = float64_to_float32(farg.d, &env->fp_status);
1502 farg.d = float32_to_float64(f32, &env->fp_status);
1504 return farg.ll;
1507 /* frsqrte - frsqrte. */
1508 uint64_t helper_frsqrte (uint64_t arg)
1510 CPU_DoubleU farg;
1511 float32 f32;
1512 farg.ll = arg;
1514 if (unlikely(float64_is_signaling_nan(farg.d))) {
1515 /* sNaN reciprocal square root */
1516 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1517 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1518 /* Reciprocal square root of a negative nonzero number */
1519 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1520 } else {
1521 farg.d = float64_sqrt(farg.d, &env->fp_status);
1522 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1523 f32 = float64_to_float32(farg.d, &env->fp_status);
1524 farg.d = float32_to_float64(f32, &env->fp_status);
1526 return farg.ll;
1529 /* fsel - fsel. */
1530 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1532 CPU_DoubleU farg1;
1534 farg1.ll = arg1;
1536 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1537 return arg2;
1538 else
1539 return arg3;
1542 void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1544 CPU_DoubleU farg1, farg2;
1545 uint32_t ret = 0;
1546 farg1.ll = arg1;
1547 farg2.ll = arg2;
1549 if (unlikely(float64_is_nan(farg1.d) ||
1550 float64_is_nan(farg2.d))) {
1551 ret = 0x01UL;
1552 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1553 ret = 0x08UL;
1554 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1555 ret = 0x04UL;
1556 } else {
1557 ret = 0x02UL;
1560 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1561 env->fpscr |= ret << FPSCR_FPRF;
1562 env->crf[crfD] = ret;
1563 if (unlikely(ret == 0x01UL
1564 && (float64_is_signaling_nan(farg1.d) ||
1565 float64_is_signaling_nan(farg2.d)))) {
1566 /* sNaN comparison */
1567 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1571 void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1573 CPU_DoubleU farg1, farg2;
1574 uint32_t ret = 0;
1575 farg1.ll = arg1;
1576 farg2.ll = arg2;
1578 if (unlikely(float64_is_nan(farg1.d) ||
1579 float64_is_nan(farg2.d))) {
1580 ret = 0x01UL;
1581 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1582 ret = 0x08UL;
1583 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1584 ret = 0x04UL;
1585 } else {
1586 ret = 0x02UL;
1589 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1590 env->fpscr |= ret << FPSCR_FPRF;
1591 env->crf[crfD] = ret;
1592 if (unlikely (ret == 0x01UL)) {
1593 if (float64_is_signaling_nan(farg1.d) ||
1594 float64_is_signaling_nan(farg2.d)) {
1595 /* sNaN comparison */
1596 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1597 POWERPC_EXCP_FP_VXVC);
1598 } else {
1599 /* qNaN comparison */
1600 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1605 #if !defined (CONFIG_USER_ONLY)
1606 void helper_store_msr (target_ulong val)
1608 val = hreg_store_msr(env, val, 0);
1609 if (val != 0) {
1610 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1611 helper_raise_exception(val);
1615 static inline void do_rfi(target_ulong nip, target_ulong msr,
1616 target_ulong msrm, int keep_msrh)
1618 #if defined(TARGET_PPC64)
1619 if (msr & (1ULL << MSR_SF)) {
1620 nip = (uint64_t)nip;
1621 msr &= (uint64_t)msrm;
1622 } else {
1623 nip = (uint32_t)nip;
1624 msr = (uint32_t)(msr & msrm);
1625 if (keep_msrh)
1626 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1628 #else
1629 nip = (uint32_t)nip;
1630 msr &= (uint32_t)msrm;
1631 #endif
1632 /* XXX: beware: this is false if VLE is supported */
1633 env->nip = nip & ~((target_ulong)0x00000003);
1634 hreg_store_msr(env, msr, 1);
1635 #if defined (DEBUG_OP)
1636 cpu_dump_rfi(env->nip, env->msr);
1637 #endif
1638 /* No need to raise an exception here,
1639 * as rfi is always the last insn of a TB
1641 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1644 void helper_rfi (void)
1646 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1647 ~((target_ulong)0x783F0000), 1);
1650 #if defined(TARGET_PPC64)
1651 void helper_rfid (void)
1653 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1654 ~((target_ulong)0x783F0000), 0);
1657 void helper_hrfid (void)
1659 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1660 ~((target_ulong)0x783F0000), 0);
1662 #endif
1663 #endif
1665 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1667 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1668 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1669 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1670 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1671 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1672 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1676 #if defined(TARGET_PPC64)
1677 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1679 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1680 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1681 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1682 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1683 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1684 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1686 #endif
1688 /*****************************************************************************/
1689 /* PowerPC 601 specific instructions (POWER bridge) */
1691 target_ulong helper_clcs (uint32_t arg)
1693 switch (arg) {
1694 case 0x0CUL:
1695 /* Instruction cache line size */
1696 return env->icache_line_size;
1697 break;
1698 case 0x0DUL:
1699 /* Data cache line size */
1700 return env->dcache_line_size;
1701 break;
1702 case 0x0EUL:
1703 /* Minimum cache line size */
1704 return (env->icache_line_size < env->dcache_line_size) ?
1705 env->icache_line_size : env->dcache_line_size;
1706 break;
1707 case 0x0FUL:
1708 /* Maximum cache line size */
1709 return (env->icache_line_size > env->dcache_line_size) ?
1710 env->icache_line_size : env->dcache_line_size;
1711 break;
1712 default:
1713 /* Undefined */
1714 return 0;
1715 break;
1719 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1721 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1723 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1724 (int32_t)arg2 == 0) {
1725 env->spr[SPR_MQ] = 0;
1726 return INT32_MIN;
1727 } else {
1728 env->spr[SPR_MQ] = tmp % arg2;
1729 return tmp / (int32_t)arg2;
1733 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1735 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1737 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1738 (int32_t)arg2 == 0) {
1739 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1740 env->spr[SPR_MQ] = 0;
1741 return INT32_MIN;
1742 } else {
1743 env->spr[SPR_MQ] = tmp % arg2;
1744 tmp /= (int32_t)arg2;
1745 if ((int32_t)tmp != tmp) {
1746 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1747 } else {
1748 env->xer &= ~(1 << XER_OV);
1750 return tmp;
1754 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1756 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1757 (int32_t)arg2 == 0) {
1758 env->spr[SPR_MQ] = 0;
1759 return INT32_MIN;
1760 } else {
1761 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1762 return (int32_t)arg1 / (int32_t)arg2;
1766 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1768 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1769 (int32_t)arg2 == 0) {
1770 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1771 env->spr[SPR_MQ] = 0;
1772 return INT32_MIN;
1773 } else {
1774 env->xer &= ~(1 << XER_OV);
1775 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1776 return (int32_t)arg1 / (int32_t)arg2;
1780 #if !defined (CONFIG_USER_ONLY)
1781 target_ulong helper_rac (target_ulong addr)
1783 mmu_ctx_t ctx;
1784 int nb_BATs;
1785 target_ulong ret = 0;
1787 /* We don't have to generate many instances of this instruction,
1788 * as rac is supervisor only.
1790 /* XXX: FIX THIS: Pretend we have no BAT */
1791 nb_BATs = env->nb_BATs;
1792 env->nb_BATs = 0;
1793 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1794 ret = ctx.raddr;
1795 env->nb_BATs = nb_BATs;
1796 return ret;
1799 void helper_rfsvc (void)
1801 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1803 #endif
1805 /*****************************************************************************/
1806 /* 602 specific instructions */
1807 /* mfrom is the most crazy instruction ever seen, imho ! */
1808 /* Real implementation uses a ROM table. Do the same */
1809 /* Extremly decomposed:
1810 * -arg / 256
1811 * return 256 * log10(10 + 1.0) + 0.5
1813 #if !defined (CONFIG_USER_ONLY)
1814 target_ulong helper_602_mfrom (target_ulong arg)
1816 if (likely(arg < 602)) {
1817 #include "mfrom_table.c"
1818 return mfrom_ROM_table[arg];
1819 } else {
1820 return 0;
1823 #endif
1825 /*****************************************************************************/
1826 /* Embedded PowerPC specific helpers */
1828 /* XXX: to be improved to check access rights when in user-mode */
1829 target_ulong helper_load_dcr (target_ulong dcrn)
1831 uint32_t val = 0;
1833 if (unlikely(env->dcr_env == NULL)) {
1834 qemu_log("No DCR environment\n");
1835 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1836 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1837 } else if (unlikely(ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val) != 0)) {
1838 qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1839 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1840 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1842 return val;
1845 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1847 if (unlikely(env->dcr_env == NULL)) {
1848 qemu_log("No DCR environment\n");
1849 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1850 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1851 } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val) != 0)) {
1852 qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1853 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1854 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1858 #if !defined(CONFIG_USER_ONLY)
1859 void helper_40x_rfci (void)
1861 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1862 ~((target_ulong)0xFFFF0000), 0);
1865 void helper_rfci (void)
1867 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1868 ~((target_ulong)0x3FFF0000), 0);
1871 void helper_rfdi (void)
1873 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1874 ~((target_ulong)0x3FFF0000), 0);
1877 void helper_rfmci (void)
1879 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1880 ~((target_ulong)0x3FFF0000), 0);
1882 #endif
1884 /* 440 specific */
1885 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1887 target_ulong mask;
1888 int i;
1890 i = 1;
1891 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1892 if ((high & mask) == 0) {
1893 if (update_Rc) {
1894 env->crf[0] = 0x4;
1896 goto done;
1898 i++;
1900 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1901 if ((low & mask) == 0) {
1902 if (update_Rc) {
1903 env->crf[0] = 0x8;
1905 goto done;
1907 i++;
1909 if (update_Rc) {
1910 env->crf[0] = 0x2;
1912 done:
1913 env->xer = (env->xer & ~0x7F) | i;
1914 if (update_Rc) {
1915 env->crf[0] |= xer_so;
1917 return i;
1920 /*****************************************************************************/
1921 /* Altivec extension helpers */
1922 #if defined(HOST_WORDS_BIGENDIAN)
1923 #define HI_IDX 0
1924 #define LO_IDX 1
1925 #else
1926 #define HI_IDX 1
1927 #define LO_IDX 0
1928 #endif
1930 #if defined(HOST_WORDS_BIGENDIAN)
1931 #define VECTOR_FOR_INORDER_I(index, element) \
1932 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1933 #else
1934 #define VECTOR_FOR_INORDER_I(index, element) \
1935 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1936 #endif
1938 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1939 * execute the following block. */
1940 #define DO_HANDLE_NAN(result, x) \
1941 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1942 CPU_FloatU __f; \
1943 __f.f = x; \
1944 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1945 result = __f.f; \
1946 } else
1948 #define HANDLE_NAN1(result, x) \
1949 DO_HANDLE_NAN(result, x)
1950 #define HANDLE_NAN2(result, x, y) \
1951 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1952 #define HANDLE_NAN3(result, x, y, z) \
1953 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1955 /* Saturating arithmetic helpers. */
1956 #define SATCVT(from, to, from_type, to_type, min, max) \
1957 static inline to_type cvt##from##to(from_type x, int *sat) \
1959 to_type r; \
1960 if (x < (from_type)min) { \
1961 r = min; \
1962 *sat = 1; \
1963 } else if (x > (from_type)max) { \
1964 r = max; \
1965 *sat = 1; \
1966 } else { \
1967 r = x; \
1969 return r; \
1971 #define SATCVTU(from, to, from_type, to_type, min, max) \
1972 static inline to_type cvt##from##to(from_type x, int *sat) \
1974 to_type r; \
1975 if (x > (from_type)max) { \
1976 r = max; \
1977 *sat = 1; \
1978 } else { \
1979 r = x; \
1981 return r; \
1983 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
1984 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
1985 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
1987 SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
1988 SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
1989 SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
1990 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
1991 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
1992 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
1993 #undef SATCVT
1994 #undef SATCVTU
1996 #define LVE(name, access, swap, element) \
1997 void helper_##name (ppc_avr_t *r, target_ulong addr) \
1999 size_t n_elems = ARRAY_SIZE(r->element); \
2000 int adjust = HI_IDX*(n_elems-1); \
2001 int sh = sizeof(r->element[0]) >> 1; \
2002 int index = (addr & 0xf) >> sh; \
2003 if(msr_le) { \
2004 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2005 } else { \
2006 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2009 #define I(x) (x)
2010 LVE(lvebx, ldub, I, u8)
2011 LVE(lvehx, lduw, bswap16, u16)
2012 LVE(lvewx, ldl, bswap32, u32)
2013 #undef I
2014 #undef LVE
2016 void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2018 int i, j = (sh & 0xf);
2020 VECTOR_FOR_INORDER_I (i, u8) {
2021 r->u8[i] = j++;
2025 void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2027 int i, j = 0x10 - (sh & 0xf);
2029 VECTOR_FOR_INORDER_I (i, u8) {
2030 r->u8[i] = j++;
2034 #define STVE(name, access, swap, element) \
2035 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2037 size_t n_elems = ARRAY_SIZE(r->element); \
2038 int adjust = HI_IDX*(n_elems-1); \
2039 int sh = sizeof(r->element[0]) >> 1; \
2040 int index = (addr & 0xf) >> sh; \
2041 if(msr_le) { \
2042 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2043 } else { \
2044 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2047 #define I(x) (x)
2048 STVE(stvebx, stb, I, u8)
2049 STVE(stvehx, stw, bswap16, u16)
2050 STVE(stvewx, stl, bswap32, u32)
2051 #undef I
2052 #undef LVE
2054 void helper_mtvscr (ppc_avr_t *r)
2056 #if defined(HOST_WORDS_BIGENDIAN)
2057 env->vscr = r->u32[3];
2058 #else
2059 env->vscr = r->u32[0];
2060 #endif
2061 set_flush_to_zero(vscr_nj, &env->vec_status);
2064 void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2066 int i;
2067 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2068 r->u32[i] = ~a->u32[i] < b->u32[i];
2072 #define VARITH_DO(name, op, element) \
2073 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2075 int i; \
2076 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2077 r->element[i] = a->element[i] op b->element[i]; \
2080 #define VARITH(suffix, element) \
2081 VARITH_DO(add##suffix, +, element) \
2082 VARITH_DO(sub##suffix, -, element)
2083 VARITH(ubm, u8)
2084 VARITH(uhm, u16)
2085 VARITH(uwm, u32)
2086 #undef VARITH_DO
2087 #undef VARITH
2089 #define VARITHFP(suffix, func) \
2090 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2092 int i; \
2093 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2094 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2095 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2099 VARITHFP(addfp, float32_add)
2100 VARITHFP(subfp, float32_sub)
2101 #undef VARITHFP
2103 #define VARITHSAT_CASE(type, op, cvt, element) \
2105 type result = (type)a->element[i] op (type)b->element[i]; \
2106 r->element[i] = cvt(result, &sat); \
2109 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2110 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2112 int sat = 0; \
2113 int i; \
2114 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2115 switch (sizeof(r->element[0])) { \
2116 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2117 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2118 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2121 if (sat) { \
2122 env->vscr |= (1 << VSCR_SAT); \
2125 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2126 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2127 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2128 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2129 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2130 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2131 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2132 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2133 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2134 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2135 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2136 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2137 #undef VARITHSAT_CASE
2138 #undef VARITHSAT_DO
2139 #undef VARITHSAT_SIGNED
2140 #undef VARITHSAT_UNSIGNED
2142 #define VAVG_DO(name, element, etype) \
2143 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2145 int i; \
2146 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2147 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2148 r->element[i] = x >> 1; \
2152 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2153 VAVG_DO(avgs##type, signed_element, signed_type) \
2154 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2155 VAVG(b, s8, int16_t, u8, uint16_t)
2156 VAVG(h, s16, int32_t, u16, uint32_t)
2157 VAVG(w, s32, int64_t, u32, uint64_t)
2158 #undef VAVG_DO
2159 #undef VAVG
2161 #define VCF(suffix, cvt, element) \
2162 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2164 int i; \
2165 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2166 float32 t = cvt(b->element[i], &env->vec_status); \
2167 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2170 VCF(ux, uint32_to_float32, u32)
2171 VCF(sx, int32_to_float32, s32)
2172 #undef VCF
2174 #define VCMP_DO(suffix, compare, element, record) \
2175 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2177 uint32_t ones = (uint32_t)-1; \
2178 uint32_t all = ones; \
2179 uint32_t none = 0; \
2180 int i; \
2181 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2182 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2183 switch (sizeof (a->element[0])) { \
2184 case 4: r->u32[i] = result; break; \
2185 case 2: r->u16[i] = result; break; \
2186 case 1: r->u8[i] = result; break; \
2188 all &= result; \
2189 none |= result; \
2191 if (record) { \
2192 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2195 #define VCMP(suffix, compare, element) \
2196 VCMP_DO(suffix, compare, element, 0) \
2197 VCMP_DO(suffix##_dot, compare, element, 1)
2198 VCMP(equb, ==, u8)
2199 VCMP(equh, ==, u16)
2200 VCMP(equw, ==, u32)
2201 VCMP(gtub, >, u8)
2202 VCMP(gtuh, >, u16)
2203 VCMP(gtuw, >, u32)
2204 VCMP(gtsb, >, s8)
2205 VCMP(gtsh, >, s16)
2206 VCMP(gtsw, >, s32)
2207 #undef VCMP_DO
2208 #undef VCMP
2210 #define VCMPFP_DO(suffix, compare, order, record) \
2211 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2213 uint32_t ones = (uint32_t)-1; \
2214 uint32_t all = ones; \
2215 uint32_t none = 0; \
2216 int i; \
2217 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2218 uint32_t result; \
2219 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2220 if (rel == float_relation_unordered) { \
2221 result = 0; \
2222 } else if (rel compare order) { \
2223 result = ones; \
2224 } else { \
2225 result = 0; \
2227 r->u32[i] = result; \
2228 all &= result; \
2229 none |= result; \
2231 if (record) { \
2232 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2235 #define VCMPFP(suffix, compare, order) \
2236 VCMPFP_DO(suffix, compare, order, 0) \
2237 VCMPFP_DO(suffix##_dot, compare, order, 1)
2238 VCMPFP(eqfp, ==, float_relation_equal)
2239 VCMPFP(gefp, !=, float_relation_less)
2240 VCMPFP(gtfp, ==, float_relation_greater)
2241 #undef VCMPFP_DO
2242 #undef VCMPFP
2244 static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2245 int record)
2247 int i;
2248 int all_in = 0;
2249 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2250 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2251 if (le_rel == float_relation_unordered) {
2252 r->u32[i] = 0xc0000000;
2253 /* ALL_IN does not need to be updated here. */
2254 } else {
2255 float32 bneg = float32_chs(b->f[i]);
2256 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2257 int le = le_rel != float_relation_greater;
2258 int ge = ge_rel != float_relation_less;
2259 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2260 all_in |= (!le | !ge);
2263 if (record) {
2264 env->crf[6] = (all_in == 0) << 1;
2268 void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2270 vcmpbfp_internal(r, a, b, 0);
2273 void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2275 vcmpbfp_internal(r, a, b, 1);
2278 #define VCT(suffix, satcvt, element) \
2279 void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2281 int i; \
2282 int sat = 0; \
2283 float_status s = env->vec_status; \
2284 set_float_rounding_mode(float_round_to_zero, &s); \
2285 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2286 if (float32_is_nan(b->f[i]) || \
2287 float32_is_signaling_nan(b->f[i])) { \
2288 r->element[i] = 0; \
2289 } else { \
2290 float64 t = float32_to_float64(b->f[i], &s); \
2291 int64_t j; \
2292 t = float64_scalbn(t, uim, &s); \
2293 j = float64_to_int64(t, &s); \
2294 r->element[i] = satcvt(j, &sat); \
2297 if (sat) { \
2298 env->vscr |= (1 << VSCR_SAT); \
2301 VCT(uxs, cvtsduw, u32)
2302 VCT(sxs, cvtsdsw, s32)
2303 #undef VCT
2305 void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2307 int i;
2308 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2309 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2310 /* Need to do the computation in higher precision and round
2311 * once at the end. */
2312 float64 af, bf, cf, t;
2313 af = float32_to_float64(a->f[i], &env->vec_status);
2314 bf = float32_to_float64(b->f[i], &env->vec_status);
2315 cf = float32_to_float64(c->f[i], &env->vec_status);
2316 t = float64_mul(af, cf, &env->vec_status);
2317 t = float64_add(t, bf, &env->vec_status);
2318 r->f[i] = float64_to_float32(t, &env->vec_status);
2323 void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2325 int sat = 0;
2326 int i;
2328 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2329 int32_t prod = a->s16[i] * b->s16[i];
2330 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2331 r->s16[i] = cvtswsh (t, &sat);
2334 if (sat) {
2335 env->vscr |= (1 << VSCR_SAT);
2339 void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2341 int sat = 0;
2342 int i;
2344 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2345 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2346 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2347 r->s16[i] = cvtswsh (t, &sat);
2350 if (sat) {
2351 env->vscr |= (1 << VSCR_SAT);
2355 #define VMINMAX_DO(name, compare, element) \
2356 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2358 int i; \
2359 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2360 if (a->element[i] compare b->element[i]) { \
2361 r->element[i] = b->element[i]; \
2362 } else { \
2363 r->element[i] = a->element[i]; \
2367 #define VMINMAX(suffix, element) \
2368 VMINMAX_DO(min##suffix, >, element) \
2369 VMINMAX_DO(max##suffix, <, element)
2370 VMINMAX(sb, s8)
2371 VMINMAX(sh, s16)
2372 VMINMAX(sw, s32)
2373 VMINMAX(ub, u8)
2374 VMINMAX(uh, u16)
2375 VMINMAX(uw, u32)
2376 #undef VMINMAX_DO
2377 #undef VMINMAX
2379 #define VMINMAXFP(suffix, rT, rF) \
2380 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2382 int i; \
2383 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2384 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2385 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2386 r->f[i] = rT->f[i]; \
2387 } else { \
2388 r->f[i] = rF->f[i]; \
2393 VMINMAXFP(minfp, a, b)
2394 VMINMAXFP(maxfp, b, a)
2395 #undef VMINMAXFP
2397 void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2399 int i;
2400 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2401 int32_t prod = a->s16[i] * b->s16[i];
2402 r->s16[i] = (int16_t) (prod + c->s16[i]);
2406 #define VMRG_DO(name, element, highp) \
2407 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2409 ppc_avr_t result; \
2410 int i; \
2411 size_t n_elems = ARRAY_SIZE(r->element); \
2412 for (i = 0; i < n_elems/2; i++) { \
2413 if (highp) { \
2414 result.element[i*2+HI_IDX] = a->element[i]; \
2415 result.element[i*2+LO_IDX] = b->element[i]; \
2416 } else { \
2417 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2418 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2421 *r = result; \
2423 #if defined(HOST_WORDS_BIGENDIAN)
2424 #define MRGHI 0
2425 #define MRGLO 1
2426 #else
2427 #define MRGHI 1
2428 #define MRGLO 0
2429 #endif
2430 #define VMRG(suffix, element) \
2431 VMRG_DO(mrgl##suffix, element, MRGHI) \
2432 VMRG_DO(mrgh##suffix, element, MRGLO)
2433 VMRG(b, u8)
2434 VMRG(h, u16)
2435 VMRG(w, u32)
2436 #undef VMRG_DO
2437 #undef VMRG
2438 #undef MRGHI
2439 #undef MRGLO
2441 void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2443 int32_t prod[16];
2444 int i;
2446 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2447 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2450 VECTOR_FOR_INORDER_I(i, s32) {
2451 r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2455 void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2457 int32_t prod[8];
2458 int i;
2460 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2461 prod[i] = a->s16[i] * b->s16[i];
2464 VECTOR_FOR_INORDER_I(i, s32) {
2465 r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2469 void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2471 int32_t prod[8];
2472 int i;
2473 int sat = 0;
2475 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2476 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2479 VECTOR_FOR_INORDER_I (i, s32) {
2480 int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2481 r->u32[i] = cvtsdsw(t, &sat);
2484 if (sat) {
2485 env->vscr |= (1 << VSCR_SAT);
2489 void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2491 uint16_t prod[16];
2492 int i;
2494 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2495 prod[i] = a->u8[i] * b->u8[i];
2498 VECTOR_FOR_INORDER_I(i, u32) {
2499 r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2503 void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2505 uint32_t prod[8];
2506 int i;
2508 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2509 prod[i] = a->u16[i] * b->u16[i];
2512 VECTOR_FOR_INORDER_I(i, u32) {
2513 r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2517 void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2519 uint32_t prod[8];
2520 int i;
2521 int sat = 0;
2523 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2524 prod[i] = a->u16[i] * b->u16[i];
2527 VECTOR_FOR_INORDER_I (i, s32) {
2528 uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2529 r->u32[i] = cvtuduw(t, &sat);
2532 if (sat) {
2533 env->vscr |= (1 << VSCR_SAT);
2537 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2538 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2540 int i; \
2541 VECTOR_FOR_INORDER_I(i, prod_element) { \
2542 if (evenp) { \
2543 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2544 } else { \
2545 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2549 #define VMUL(suffix, mul_element, prod_element) \
2550 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2551 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2552 VMUL(sb, s8, s16)
2553 VMUL(sh, s16, s32)
2554 VMUL(ub, u8, u16)
2555 VMUL(uh, u16, u32)
2556 #undef VMUL_DO
2557 #undef VMUL
2559 void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2561 int i;
2562 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2563 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2564 /* Need to do the computation is higher precision and round
2565 * once at the end. */
2566 float64 af, bf, cf, t;
2567 af = float32_to_float64(a->f[i], &env->vec_status);
2568 bf = float32_to_float64(b->f[i], &env->vec_status);
2569 cf = float32_to_float64(c->f[i], &env->vec_status);
2570 t = float64_mul(af, cf, &env->vec_status);
2571 t = float64_sub(t, bf, &env->vec_status);
2572 t = float64_chs(t);
2573 r->f[i] = float64_to_float32(t, &env->vec_status);
2578 void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2580 ppc_avr_t result;
2581 int i;
2582 VECTOR_FOR_INORDER_I (i, u8) {
2583 int s = c->u8[i] & 0x1f;
2584 #if defined(HOST_WORDS_BIGENDIAN)
2585 int index = s & 0xf;
2586 #else
2587 int index = 15 - (s & 0xf);
2588 #endif
2589 if (s & 0x10) {
2590 result.u8[i] = b->u8[index];
2591 } else {
2592 result.u8[i] = a->u8[index];
2595 *r = result;
2598 #if defined(HOST_WORDS_BIGENDIAN)
2599 #define PKBIG 1
2600 #else
2601 #define PKBIG 0
2602 #endif
2603 void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2605 int i, j;
2606 ppc_avr_t result;
2607 #if defined(HOST_WORDS_BIGENDIAN)
2608 const ppc_avr_t *x[2] = { a, b };
2609 #else
2610 const ppc_avr_t *x[2] = { b, a };
2611 #endif
2613 VECTOR_FOR_INORDER_I (i, u64) {
2614 VECTOR_FOR_INORDER_I (j, u32){
2615 uint32_t e = x[i]->u32[j];
2616 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2617 ((e >> 6) & 0x3e0) |
2618 ((e >> 3) & 0x1f));
2621 *r = result;
2624 #define VPK(suffix, from, to, cvt, dosat) \
2625 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2627 int i; \
2628 int sat = 0; \
2629 ppc_avr_t result; \
2630 ppc_avr_t *a0 = PKBIG ? a : b; \
2631 ppc_avr_t *a1 = PKBIG ? b : a; \
2632 VECTOR_FOR_INORDER_I (i, from) { \
2633 result.to[i] = cvt(a0->from[i], &sat); \
2634 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2636 *r = result; \
2637 if (dosat && sat) { \
2638 env->vscr |= (1 << VSCR_SAT); \
2641 #define I(x, y) (x)
2642 VPK(shss, s16, s8, cvtshsb, 1)
2643 VPK(shus, s16, u8, cvtshub, 1)
2644 VPK(swss, s32, s16, cvtswsh, 1)
2645 VPK(swus, s32, u16, cvtswuh, 1)
2646 VPK(uhus, u16, u8, cvtuhub, 1)
2647 VPK(uwus, u32, u16, cvtuwuh, 1)
2648 VPK(uhum, u16, u8, I, 0)
2649 VPK(uwum, u32, u16, I, 0)
2650 #undef I
2651 #undef VPK
2652 #undef PKBIG
2654 void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2656 int i;
2657 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2658 HANDLE_NAN1(r->f[i], b->f[i]) {
2659 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2664 #define VRFI(suffix, rounding) \
2665 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2667 int i; \
2668 float_status s = env->vec_status; \
2669 set_float_rounding_mode(rounding, &s); \
2670 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2671 HANDLE_NAN1(r->f[i], b->f[i]) { \
2672 r->f[i] = float32_round_to_int (b->f[i], &s); \
2676 VRFI(n, float_round_nearest_even)
2677 VRFI(m, float_round_down)
2678 VRFI(p, float_round_up)
2679 VRFI(z, float_round_to_zero)
2680 #undef VRFI
2682 #define VROTATE(suffix, element) \
2683 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2685 int i; \
2686 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2687 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2688 unsigned int shift = b->element[i] & mask; \
2689 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2692 VROTATE(b, u8)
2693 VROTATE(h, u16)
2694 VROTATE(w, u32)
2695 #undef VROTATE
2697 void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2699 int i;
2700 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2701 HANDLE_NAN1(r->f[i], b->f[i]) {
2702 float32 t = float32_sqrt(b->f[i], &env->vec_status);
2703 r->f[i] = float32_div(float32_one, t, &env->vec_status);
2708 void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2710 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2711 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2714 void helper_vexptefp (ppc_avr_t *r, ppc_avr_t *b)
2716 int i;
2717 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2718 HANDLE_NAN1(r->f[i], b->f[i]) {
2719 r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2724 void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
2726 int i;
2727 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2728 HANDLE_NAN1(r->f[i], b->f[i]) {
2729 r->f[i] = float32_log2(b->f[i], &env->vec_status);
2734 #if defined(HOST_WORDS_BIGENDIAN)
2735 #define LEFT 0
2736 #define RIGHT 1
2737 #else
2738 #define LEFT 1
2739 #define RIGHT 0
2740 #endif
2741 /* The specification says that the results are undefined if all of the
2742 * shift counts are not identical. We check to make sure that they are
2743 * to conform to what real hardware appears to do. */
2744 #define VSHIFT(suffix, leftp) \
2745 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2747 int shift = b->u8[LO_IDX*15] & 0x7; \
2748 int doit = 1; \
2749 int i; \
2750 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2751 doit = doit && ((b->u8[i] & 0x7) == shift); \
2753 if (doit) { \
2754 if (shift == 0) { \
2755 *r = *a; \
2756 } else if (leftp) { \
2757 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2758 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2759 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2760 } else { \
2761 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2762 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2763 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2767 VSHIFT(l, LEFT)
2768 VSHIFT(r, RIGHT)
2769 #undef VSHIFT
2770 #undef LEFT
2771 #undef RIGHT
2773 #define VSL(suffix, element) \
2774 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2776 int i; \
2777 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2778 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2779 unsigned int shift = b->element[i] & mask; \
2780 r->element[i] = a->element[i] << shift; \
2783 VSL(b, u8)
2784 VSL(h, u16)
2785 VSL(w, u32)
2786 #undef VSL
2788 void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2790 int sh = shift & 0xf;
2791 int i;
2792 ppc_avr_t result;
2794 #if defined(HOST_WORDS_BIGENDIAN)
2795 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2796 int index = sh + i;
2797 if (index > 0xf) {
2798 result.u8[i] = b->u8[index-0x10];
2799 } else {
2800 result.u8[i] = a->u8[index];
2803 #else
2804 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2805 int index = (16 - sh) + i;
2806 if (index > 0xf) {
2807 result.u8[i] = a->u8[index-0x10];
2808 } else {
2809 result.u8[i] = b->u8[index];
2812 #endif
2813 *r = result;
2816 void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2818 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2820 #if defined (HOST_WORDS_BIGENDIAN)
2821 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2822 memset (&r->u8[16-sh], 0, sh);
2823 #else
2824 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2825 memset (&r->u8[0], 0, sh);
2826 #endif
2829 /* Experimental testing shows that hardware masks the immediate. */
2830 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2831 #if defined(HOST_WORDS_BIGENDIAN)
2832 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2833 #else
2834 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2835 #endif
2836 #define VSPLT(suffix, element) \
2837 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2839 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2840 int i; \
2841 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2842 r->element[i] = s; \
2845 VSPLT(b, u8)
2846 VSPLT(h, u16)
2847 VSPLT(w, u32)
2848 #undef VSPLT
2849 #undef SPLAT_ELEMENT
2850 #undef _SPLAT_MASKED
2852 #define VSPLTI(suffix, element, splat_type) \
2853 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2855 splat_type x = (int8_t)(splat << 3) >> 3; \
2856 int i; \
2857 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2858 r->element[i] = x; \
2861 VSPLTI(b, s8, int8_t)
2862 VSPLTI(h, s16, int16_t)
2863 VSPLTI(w, s32, int32_t)
2864 #undef VSPLTI
2866 #define VSR(suffix, element) \
2867 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2869 int i; \
2870 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2871 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2872 unsigned int shift = b->element[i] & mask; \
2873 r->element[i] = a->element[i] >> shift; \
2876 VSR(ab, s8)
2877 VSR(ah, s16)
2878 VSR(aw, s32)
2879 VSR(b, u8)
2880 VSR(h, u16)
2881 VSR(w, u32)
2882 #undef VSR
2884 void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2886 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2888 #if defined (HOST_WORDS_BIGENDIAN)
2889 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2890 memset (&r->u8[0], 0, sh);
2891 #else
2892 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2893 memset (&r->u8[16-sh], 0, sh);
2894 #endif
2897 void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2899 int i;
2900 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2901 r->u32[i] = a->u32[i] >= b->u32[i];
2905 void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2907 int64_t t;
2908 int i, upper;
2909 ppc_avr_t result;
2910 int sat = 0;
2912 #if defined(HOST_WORDS_BIGENDIAN)
2913 upper = ARRAY_SIZE(r->s32)-1;
2914 #else
2915 upper = 0;
2916 #endif
2917 t = (int64_t)b->s32[upper];
2918 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2919 t += a->s32[i];
2920 result.s32[i] = 0;
2922 result.s32[upper] = cvtsdsw(t, &sat);
2923 *r = result;
2925 if (sat) {
2926 env->vscr |= (1 << VSCR_SAT);
2930 void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2932 int i, j, upper;
2933 ppc_avr_t result;
2934 int sat = 0;
2936 #if defined(HOST_WORDS_BIGENDIAN)
2937 upper = 1;
2938 #else
2939 upper = 0;
2940 #endif
2941 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2942 int64_t t = (int64_t)b->s32[upper+i*2];
2943 result.u64[i] = 0;
2944 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2945 t += a->s32[2*i+j];
2947 result.s32[upper+i*2] = cvtsdsw(t, &sat);
2950 *r = result;
2951 if (sat) {
2952 env->vscr |= (1 << VSCR_SAT);
2956 void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2958 int i, j;
2959 int sat = 0;
2961 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2962 int64_t t = (int64_t)b->s32[i];
2963 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2964 t += a->s8[4*i+j];
2966 r->s32[i] = cvtsdsw(t, &sat);
2969 if (sat) {
2970 env->vscr |= (1 << VSCR_SAT);
2974 void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2976 int sat = 0;
2977 int i;
2979 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2980 int64_t t = (int64_t)b->s32[i];
2981 t += a->s16[2*i] + a->s16[2*i+1];
2982 r->s32[i] = cvtsdsw(t, &sat);
2985 if (sat) {
2986 env->vscr |= (1 << VSCR_SAT);
2990 void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2992 int i, j;
2993 int sat = 0;
2995 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2996 uint64_t t = (uint64_t)b->u32[i];
2997 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2998 t += a->u8[4*i+j];
3000 r->u32[i] = cvtuduw(t, &sat);
3003 if (sat) {
3004 env->vscr |= (1 << VSCR_SAT);
3008 #if defined(HOST_WORDS_BIGENDIAN)
3009 #define UPKHI 1
3010 #define UPKLO 0
3011 #else
3012 #define UPKHI 0
3013 #define UPKLO 1
3014 #endif
3015 #define VUPKPX(suffix, hi) \
3016 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3018 int i; \
3019 ppc_avr_t result; \
3020 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3021 uint16_t e = b->u16[hi ? i : i+4]; \
3022 uint8_t a = (e >> 15) ? 0xff : 0; \
3023 uint8_t r = (e >> 10) & 0x1f; \
3024 uint8_t g = (e >> 5) & 0x1f; \
3025 uint8_t b = e & 0x1f; \
3026 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3028 *r = result; \
3030 VUPKPX(lpx, UPKLO)
3031 VUPKPX(hpx, UPKHI)
3032 #undef VUPKPX
3034 #define VUPK(suffix, unpacked, packee, hi) \
3035 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3037 int i; \
3038 ppc_avr_t result; \
3039 if (hi) { \
3040 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3041 result.unpacked[i] = b->packee[i]; \
3043 } else { \
3044 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3045 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3048 *r = result; \
3050 VUPK(hsb, s16, s8, UPKHI)
3051 VUPK(hsh, s32, s16, UPKHI)
3052 VUPK(lsb, s16, s8, UPKLO)
3053 VUPK(lsh, s32, s16, UPKLO)
3054 #undef VUPK
3055 #undef UPKHI
3056 #undef UPKLO
3058 #undef DO_HANDLE_NAN
3059 #undef HANDLE_NAN1
3060 #undef HANDLE_NAN2
3061 #undef HANDLE_NAN3
3062 #undef VECTOR_FOR_INORDER_I
3063 #undef HI_IDX
3064 #undef LO_IDX
3066 /*****************************************************************************/
3067 /* SPE extension helpers */
3068 /* Use a table to make this quicker */
3069 static uint8_t hbrev[16] = {
3070 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3071 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3074 static inline uint8_t byte_reverse(uint8_t val)
3076 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3079 static inline uint32_t word_reverse(uint32_t val)
3081 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3082 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3085 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3086 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
3088 uint32_t a, b, d, mask;
3090 mask = UINT32_MAX >> (32 - MASKBITS);
3091 a = arg1 & mask;
3092 b = arg2 & mask;
3093 d = word_reverse(1 + word_reverse(a | ~b));
3094 return (arg1 & ~mask) | (d & b);
3097 uint32_t helper_cntlsw32 (uint32_t val)
3099 if (val & 0x80000000)
3100 return clz32(~val);
3101 else
3102 return clz32(val);
3105 uint32_t helper_cntlzw32 (uint32_t val)
3107 return clz32(val);
3110 /* Single-precision floating-point conversions */
3111 static inline uint32_t efscfsi(uint32_t val)
3113 CPU_FloatU u;
3115 u.f = int32_to_float32(val, &env->vec_status);
3117 return u.l;
3120 static inline uint32_t efscfui(uint32_t val)
3122 CPU_FloatU u;
3124 u.f = uint32_to_float32(val, &env->vec_status);
3126 return u.l;
3129 static inline int32_t efsctsi(uint32_t val)
3131 CPU_FloatU u;
3133 u.l = val;
3134 /* NaN are not treated the same way IEEE 754 does */
3135 if (unlikely(float32_is_nan(u.f)))
3136 return 0;
3138 return float32_to_int32(u.f, &env->vec_status);
3141 static inline uint32_t efsctui(uint32_t val)
3143 CPU_FloatU u;
3145 u.l = val;
3146 /* NaN are not treated the same way IEEE 754 does */
3147 if (unlikely(float32_is_nan(u.f)))
3148 return 0;
3150 return float32_to_uint32(u.f, &env->vec_status);
3153 static inline uint32_t efsctsiz(uint32_t val)
3155 CPU_FloatU u;
3157 u.l = val;
3158 /* NaN are not treated the same way IEEE 754 does */
3159 if (unlikely(float32_is_nan(u.f)))
3160 return 0;
3162 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3165 static inline uint32_t efsctuiz(uint32_t val)
3167 CPU_FloatU u;
3169 u.l = val;
3170 /* NaN are not treated the same way IEEE 754 does */
3171 if (unlikely(float32_is_nan(u.f)))
3172 return 0;
3174 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3177 static inline uint32_t efscfsf(uint32_t val)
3179 CPU_FloatU u;
3180 float32 tmp;
3182 u.f = int32_to_float32(val, &env->vec_status);
3183 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3184 u.f = float32_div(u.f, tmp, &env->vec_status);
3186 return u.l;
3189 static inline uint32_t efscfuf(uint32_t val)
3191 CPU_FloatU u;
3192 float32 tmp;
3194 u.f = uint32_to_float32(val, &env->vec_status);
3195 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3196 u.f = float32_div(u.f, tmp, &env->vec_status);
3198 return u.l;
3201 static inline uint32_t efsctsf(uint32_t val)
3203 CPU_FloatU u;
3204 float32 tmp;
3206 u.l = val;
3207 /* NaN are not treated the same way IEEE 754 does */
3208 if (unlikely(float32_is_nan(u.f)))
3209 return 0;
3210 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3211 u.f = float32_mul(u.f, tmp, &env->vec_status);
3213 return float32_to_int32(u.f, &env->vec_status);
3216 static inline uint32_t efsctuf(uint32_t val)
3218 CPU_FloatU u;
3219 float32 tmp;
3221 u.l = val;
3222 /* NaN are not treated the same way IEEE 754 does */
3223 if (unlikely(float32_is_nan(u.f)))
3224 return 0;
3225 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3226 u.f = float32_mul(u.f, tmp, &env->vec_status);
3228 return float32_to_uint32(u.f, &env->vec_status);
3231 #define HELPER_SPE_SINGLE_CONV(name) \
3232 uint32_t helper_e##name (uint32_t val) \
3234 return e##name(val); \
3236 /* efscfsi */
3237 HELPER_SPE_SINGLE_CONV(fscfsi);
3238 /* efscfui */
3239 HELPER_SPE_SINGLE_CONV(fscfui);
3240 /* efscfuf */
3241 HELPER_SPE_SINGLE_CONV(fscfuf);
3242 /* efscfsf */
3243 HELPER_SPE_SINGLE_CONV(fscfsf);
3244 /* efsctsi */
3245 HELPER_SPE_SINGLE_CONV(fsctsi);
3246 /* efsctui */
3247 HELPER_SPE_SINGLE_CONV(fsctui);
3248 /* efsctsiz */
3249 HELPER_SPE_SINGLE_CONV(fsctsiz);
3250 /* efsctuiz */
3251 HELPER_SPE_SINGLE_CONV(fsctuiz);
3252 /* efsctsf */
3253 HELPER_SPE_SINGLE_CONV(fsctsf);
3254 /* efsctuf */
3255 HELPER_SPE_SINGLE_CONV(fsctuf);
3257 #define HELPER_SPE_VECTOR_CONV(name) \
3258 uint64_t helper_ev##name (uint64_t val) \
3260 return ((uint64_t)e##name(val >> 32) << 32) | \
3261 (uint64_t)e##name(val); \
3263 /* evfscfsi */
3264 HELPER_SPE_VECTOR_CONV(fscfsi);
3265 /* evfscfui */
3266 HELPER_SPE_VECTOR_CONV(fscfui);
3267 /* evfscfuf */
3268 HELPER_SPE_VECTOR_CONV(fscfuf);
3269 /* evfscfsf */
3270 HELPER_SPE_VECTOR_CONV(fscfsf);
3271 /* evfsctsi */
3272 HELPER_SPE_VECTOR_CONV(fsctsi);
3273 /* evfsctui */
3274 HELPER_SPE_VECTOR_CONV(fsctui);
3275 /* evfsctsiz */
3276 HELPER_SPE_VECTOR_CONV(fsctsiz);
3277 /* evfsctuiz */
3278 HELPER_SPE_VECTOR_CONV(fsctuiz);
3279 /* evfsctsf */
3280 HELPER_SPE_VECTOR_CONV(fsctsf);
3281 /* evfsctuf */
3282 HELPER_SPE_VECTOR_CONV(fsctuf);
3284 /* Single-precision floating-point arithmetic */
3285 static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3287 CPU_FloatU u1, u2;
3288 u1.l = op1;
3289 u2.l = op2;
3290 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3291 return u1.l;
3294 static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3296 CPU_FloatU u1, u2;
3297 u1.l = op1;
3298 u2.l = op2;
3299 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3300 return u1.l;
3303 static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3305 CPU_FloatU u1, u2;
3306 u1.l = op1;
3307 u2.l = op2;
3308 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3309 return u1.l;
3312 static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3314 CPU_FloatU u1, u2;
3315 u1.l = op1;
3316 u2.l = op2;
3317 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3318 return u1.l;
3321 #define HELPER_SPE_SINGLE_ARITH(name) \
3322 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3324 return e##name(op1, op2); \
3326 /* efsadd */
3327 HELPER_SPE_SINGLE_ARITH(fsadd);
3328 /* efssub */
3329 HELPER_SPE_SINGLE_ARITH(fssub);
3330 /* efsmul */
3331 HELPER_SPE_SINGLE_ARITH(fsmul);
3332 /* efsdiv */
3333 HELPER_SPE_SINGLE_ARITH(fsdiv);
3335 #define HELPER_SPE_VECTOR_ARITH(name) \
3336 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3338 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3339 (uint64_t)e##name(op1, op2); \
3341 /* evfsadd */
3342 HELPER_SPE_VECTOR_ARITH(fsadd);
3343 /* evfssub */
3344 HELPER_SPE_VECTOR_ARITH(fssub);
3345 /* evfsmul */
3346 HELPER_SPE_VECTOR_ARITH(fsmul);
3347 /* evfsdiv */
3348 HELPER_SPE_VECTOR_ARITH(fsdiv);
3350 /* Single-precision floating-point comparisons */
3351 static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3353 CPU_FloatU u1, u2;
3354 u1.l = op1;
3355 u2.l = op2;
3356 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3359 static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3361 CPU_FloatU u1, u2;
3362 u1.l = op1;
3363 u2.l = op2;
3364 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3367 static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3369 CPU_FloatU u1, u2;
3370 u1.l = op1;
3371 u2.l = op2;
3372 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3375 static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3377 /* XXX: TODO: test special values (NaN, infinites, ...) */
3378 return efststlt(op1, op2);
3381 static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3383 /* XXX: TODO: test special values (NaN, infinites, ...) */
3384 return efststgt(op1, op2);
3387 static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3389 /* XXX: TODO: test special values (NaN, infinites, ...) */
3390 return efststeq(op1, op2);
3393 #define HELPER_SINGLE_SPE_CMP(name) \
3394 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3396 return e##name(op1, op2) << 2; \
3398 /* efststlt */
3399 HELPER_SINGLE_SPE_CMP(fststlt);
3400 /* efststgt */
3401 HELPER_SINGLE_SPE_CMP(fststgt);
3402 /* efststeq */
3403 HELPER_SINGLE_SPE_CMP(fststeq);
3404 /* efscmplt */
3405 HELPER_SINGLE_SPE_CMP(fscmplt);
3406 /* efscmpgt */
3407 HELPER_SINGLE_SPE_CMP(fscmpgt);
3408 /* efscmpeq */
3409 HELPER_SINGLE_SPE_CMP(fscmpeq);
3411 static inline uint32_t evcmp_merge(int t0, int t1)
3413 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3416 #define HELPER_VECTOR_SPE_CMP(name) \
3417 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3419 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3421 /* evfststlt */
3422 HELPER_VECTOR_SPE_CMP(fststlt);
3423 /* evfststgt */
3424 HELPER_VECTOR_SPE_CMP(fststgt);
3425 /* evfststeq */
3426 HELPER_VECTOR_SPE_CMP(fststeq);
3427 /* evfscmplt */
3428 HELPER_VECTOR_SPE_CMP(fscmplt);
3429 /* evfscmpgt */
3430 HELPER_VECTOR_SPE_CMP(fscmpgt);
3431 /* evfscmpeq */
3432 HELPER_VECTOR_SPE_CMP(fscmpeq);
3434 /* Double-precision floating-point conversion */
3435 uint64_t helper_efdcfsi (uint32_t val)
3437 CPU_DoubleU u;
3439 u.d = int32_to_float64(val, &env->vec_status);
3441 return u.ll;
3444 uint64_t helper_efdcfsid (uint64_t val)
3446 CPU_DoubleU u;
3448 u.d = int64_to_float64(val, &env->vec_status);
3450 return u.ll;
3453 uint64_t helper_efdcfui (uint32_t val)
3455 CPU_DoubleU u;
3457 u.d = uint32_to_float64(val, &env->vec_status);
3459 return u.ll;
3462 uint64_t helper_efdcfuid (uint64_t val)
3464 CPU_DoubleU u;
3466 u.d = uint64_to_float64(val, &env->vec_status);
3468 return u.ll;
3471 uint32_t helper_efdctsi (uint64_t val)
3473 CPU_DoubleU u;
3475 u.ll = val;
3476 /* NaN are not treated the same way IEEE 754 does */
3477 if (unlikely(float64_is_nan(u.d)))
3478 return 0;
3480 return float64_to_int32(u.d, &env->vec_status);
3483 uint32_t helper_efdctui (uint64_t val)
3485 CPU_DoubleU u;
3487 u.ll = val;
3488 /* NaN are not treated the same way IEEE 754 does */
3489 if (unlikely(float64_is_nan(u.d)))
3490 return 0;
3492 return float64_to_uint32(u.d, &env->vec_status);
3495 uint32_t helper_efdctsiz (uint64_t val)
3497 CPU_DoubleU u;
3499 u.ll = val;
3500 /* NaN are not treated the same way IEEE 754 does */
3501 if (unlikely(float64_is_nan(u.d)))
3502 return 0;
3504 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3507 uint64_t helper_efdctsidz (uint64_t val)
3509 CPU_DoubleU u;
3511 u.ll = val;
3512 /* NaN are not treated the same way IEEE 754 does */
3513 if (unlikely(float64_is_nan(u.d)))
3514 return 0;
3516 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3519 uint32_t helper_efdctuiz (uint64_t val)
3521 CPU_DoubleU u;
3523 u.ll = val;
3524 /* NaN are not treated the same way IEEE 754 does */
3525 if (unlikely(float64_is_nan(u.d)))
3526 return 0;
3528 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3531 uint64_t helper_efdctuidz (uint64_t val)
3533 CPU_DoubleU u;
3535 u.ll = val;
3536 /* NaN are not treated the same way IEEE 754 does */
3537 if (unlikely(float64_is_nan(u.d)))
3538 return 0;
3540 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3543 uint64_t helper_efdcfsf (uint32_t val)
3545 CPU_DoubleU u;
3546 float64 tmp;
3548 u.d = int32_to_float64(val, &env->vec_status);
3549 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3550 u.d = float64_div(u.d, tmp, &env->vec_status);
3552 return u.ll;
3555 uint64_t helper_efdcfuf (uint32_t val)
3557 CPU_DoubleU u;
3558 float64 tmp;
3560 u.d = uint32_to_float64(val, &env->vec_status);
3561 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3562 u.d = float64_div(u.d, tmp, &env->vec_status);
3564 return u.ll;
3567 uint32_t helper_efdctsf (uint64_t val)
3569 CPU_DoubleU u;
3570 float64 tmp;
3572 u.ll = val;
3573 /* NaN are not treated the same way IEEE 754 does */
3574 if (unlikely(float64_is_nan(u.d)))
3575 return 0;
3576 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3577 u.d = float64_mul(u.d, tmp, &env->vec_status);
3579 return float64_to_int32(u.d, &env->vec_status);
3582 uint32_t helper_efdctuf (uint64_t val)
3584 CPU_DoubleU u;
3585 float64 tmp;
3587 u.ll = val;
3588 /* NaN are not treated the same way IEEE 754 does */
3589 if (unlikely(float64_is_nan(u.d)))
3590 return 0;
3591 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3592 u.d = float64_mul(u.d, tmp, &env->vec_status);
3594 return float64_to_uint32(u.d, &env->vec_status);
3597 uint32_t helper_efscfd (uint64_t val)
3599 CPU_DoubleU u1;
3600 CPU_FloatU u2;
3602 u1.ll = val;
3603 u2.f = float64_to_float32(u1.d, &env->vec_status);
3605 return u2.l;
3608 uint64_t helper_efdcfs (uint32_t val)
3610 CPU_DoubleU u2;
3611 CPU_FloatU u1;
3613 u1.l = val;
3614 u2.d = float32_to_float64(u1.f, &env->vec_status);
3616 return u2.ll;
3619 /* Double precision fixed-point arithmetic */
3620 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
3622 CPU_DoubleU u1, u2;
3623 u1.ll = op1;
3624 u2.ll = op2;
3625 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3626 return u1.ll;
3629 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
3631 CPU_DoubleU u1, u2;
3632 u1.ll = op1;
3633 u2.ll = op2;
3634 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3635 return u1.ll;
3638 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
3640 CPU_DoubleU u1, u2;
3641 u1.ll = op1;
3642 u2.ll = op2;
3643 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3644 return u1.ll;
3647 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
3649 CPU_DoubleU u1, u2;
3650 u1.ll = op1;
3651 u2.ll = op2;
3652 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3653 return u1.ll;
3656 /* Double precision floating point helpers */
3657 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
3659 CPU_DoubleU u1, u2;
3660 u1.ll = op1;
3661 u2.ll = op2;
3662 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3665 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
3667 CPU_DoubleU u1, u2;
3668 u1.ll = op1;
3669 u2.ll = op2;
3670 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3673 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
3675 CPU_DoubleU u1, u2;
3676 u1.ll = op1;
3677 u2.ll = op2;
3678 return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3681 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
3683 /* XXX: TODO: test special values (NaN, infinites, ...) */
3684 return helper_efdtstlt(op1, op2);
3687 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3689 /* XXX: TODO: test special values (NaN, infinites, ...) */
3690 return helper_efdtstgt(op1, op2);
3693 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3695 /* XXX: TODO: test special values (NaN, infinites, ...) */
3696 return helper_efdtsteq(op1, op2);
3699 /*****************************************************************************/
3700 /* Softmmu support */
3701 #if !defined (CONFIG_USER_ONLY)
3703 #define MMUSUFFIX _mmu
3705 #define SHIFT 0
3706 #include "softmmu_template.h"
3708 #define SHIFT 1
3709 #include "softmmu_template.h"
3711 #define SHIFT 2
3712 #include "softmmu_template.h"
3714 #define SHIFT 3
3715 #include "softmmu_template.h"
3717 /* try to fill the TLB and return an exception if error. If retaddr is
3718 NULL, it means that the function was called in C code (i.e. not
3719 from generated code or from helper.c) */
3720 /* XXX: fix it to restore all registers */
3721 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3723 TranslationBlock *tb;
3724 CPUState *saved_env;
3725 unsigned long pc;
3726 int ret;
3728 /* XXX: hack to restore env in all cases, even if not called from
3729 generated code */
3730 saved_env = env;
3731 env = cpu_single_env;
3732 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3733 if (unlikely(ret != 0)) {
3734 if (likely(retaddr)) {
3735 /* now we have a real cpu fault */
3736 pc = (unsigned long)retaddr;
3737 tb = tb_find_pc(pc);
3738 if (likely(tb)) {
3739 /* the PC is inside the translated code. It means that we have
3740 a virtual CPU fault */
3741 cpu_restore_state(tb, env, pc, NULL);
3744 helper_raise_exception_err(env->exception_index, env->error_code);
3746 env = saved_env;
3749 /* Segment registers load and store */
3750 target_ulong helper_load_sr (target_ulong sr_num)
3752 #if defined(TARGET_PPC64)
3753 if (env->mmu_model & POWERPC_MMU_64)
3754 return ppc_load_sr(env, sr_num);
3755 #endif
3756 return env->sr[sr_num];
3759 void helper_store_sr (target_ulong sr_num, target_ulong val)
3761 ppc_store_sr(env, sr_num, val);
3764 /* SLB management */
3765 #if defined(TARGET_PPC64)
3766 target_ulong helper_load_slb (target_ulong slb_nr)
3768 return ppc_load_slb(env, slb_nr);
3771 void helper_store_slb (target_ulong rb, target_ulong rs)
3773 ppc_store_slb(env, rb, rs);
3776 void helper_slbia (void)
3778 ppc_slb_invalidate_all(env);
3781 void helper_slbie (target_ulong addr)
3783 ppc_slb_invalidate_one(env, addr);
3786 #endif /* defined(TARGET_PPC64) */
3788 /* TLB management */
3789 void helper_tlbia (void)
3791 ppc_tlb_invalidate_all(env);
3794 void helper_tlbie (target_ulong addr)
3796 ppc_tlb_invalidate_one(env, addr);
3799 /* Software driven TLBs management */
3800 /* PowerPC 602/603 software TLB load instructions helpers */
3801 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3803 target_ulong RPN, CMP, EPN;
3804 int way;
3806 RPN = env->spr[SPR_RPA];
3807 if (is_code) {
3808 CMP = env->spr[SPR_ICMP];
3809 EPN = env->spr[SPR_IMISS];
3810 } else {
3811 CMP = env->spr[SPR_DCMP];
3812 EPN = env->spr[SPR_DMISS];
3814 way = (env->spr[SPR_SRR1] >> 17) & 1;
3815 (void)EPN; /* avoid a compiler warning */
3816 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3817 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3818 RPN, way);
3819 /* Store this TLB */
3820 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3821 way, is_code, CMP, RPN);
3824 void helper_6xx_tlbd (target_ulong EPN)
3826 do_6xx_tlb(EPN, 0);
3829 void helper_6xx_tlbi (target_ulong EPN)
3831 do_6xx_tlb(EPN, 1);
3834 /* PowerPC 74xx software TLB load instructions helpers */
3835 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3837 target_ulong RPN, CMP, EPN;
3838 int way;
3840 RPN = env->spr[SPR_PTELO];
3841 CMP = env->spr[SPR_PTEHI];
3842 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3843 way = env->spr[SPR_TLBMISS] & 0x3;
3844 (void)EPN; /* avoid a compiler warning */
3845 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
3846 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
3847 RPN, way);
3848 /* Store this TLB */
3849 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3850 way, is_code, CMP, RPN);
3853 void helper_74xx_tlbd (target_ulong EPN)
3855 do_74xx_tlb(EPN, 0);
3858 void helper_74xx_tlbi (target_ulong EPN)
3860 do_74xx_tlb(EPN, 1);
3863 static inline target_ulong booke_tlb_to_page_size(int size)
3865 return 1024 << (2 * size);
3868 static inline int booke_page_size_to_tlb(target_ulong page_size)
3870 int size;
3872 switch (page_size) {
3873 case 0x00000400UL:
3874 size = 0x0;
3875 break;
3876 case 0x00001000UL:
3877 size = 0x1;
3878 break;
3879 case 0x00004000UL:
3880 size = 0x2;
3881 break;
3882 case 0x00010000UL:
3883 size = 0x3;
3884 break;
3885 case 0x00040000UL:
3886 size = 0x4;
3887 break;
3888 case 0x00100000UL:
3889 size = 0x5;
3890 break;
3891 case 0x00400000UL:
3892 size = 0x6;
3893 break;
3894 case 0x01000000UL:
3895 size = 0x7;
3896 break;
3897 case 0x04000000UL:
3898 size = 0x8;
3899 break;
3900 case 0x10000000UL:
3901 size = 0x9;
3902 break;
3903 case 0x40000000UL:
3904 size = 0xA;
3905 break;
3906 #if defined (TARGET_PPC64)
3907 case 0x000100000000ULL:
3908 size = 0xB;
3909 break;
3910 case 0x000400000000ULL:
3911 size = 0xC;
3912 break;
3913 case 0x001000000000ULL:
3914 size = 0xD;
3915 break;
3916 case 0x004000000000ULL:
3917 size = 0xE;
3918 break;
3919 case 0x010000000000ULL:
3920 size = 0xF;
3921 break;
3922 #endif
3923 default:
3924 size = -1;
3925 break;
3928 return size;
3931 /* Helpers for 4xx TLB management */
3932 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
3934 #define PPC4XX_TLBHI_V 0x00000040
3935 #define PPC4XX_TLBHI_E 0x00000020
3936 #define PPC4XX_TLBHI_SIZE_MIN 0
3937 #define PPC4XX_TLBHI_SIZE_MAX 7
3938 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
3939 #define PPC4XX_TLBHI_SIZE_SHIFT 7
3940 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
3942 #define PPC4XX_TLBLO_EX 0x00000200
3943 #define PPC4XX_TLBLO_WR 0x00000100
3944 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
3945 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
3947 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3949 ppcemb_tlb_t *tlb;
3950 target_ulong ret;
3951 int size;
3953 entry &= PPC4XX_TLB_ENTRY_MASK;
3954 tlb = &env->tlb[entry].tlbe;
3955 ret = tlb->EPN;
3956 if (tlb->prot & PAGE_VALID) {
3957 ret |= PPC4XX_TLBHI_V;
3959 size = booke_page_size_to_tlb(tlb->size);
3960 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
3961 size = PPC4XX_TLBHI_SIZE_DEFAULT;
3963 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
3964 env->spr[SPR_40x_PID] = tlb->PID;
3965 return ret;
3968 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3970 ppcemb_tlb_t *tlb;
3971 target_ulong ret;
3973 entry &= PPC4XX_TLB_ENTRY_MASK;
3974 tlb = &env->tlb[entry].tlbe;
3975 ret = tlb->RPN;
3976 if (tlb->prot & PAGE_EXEC) {
3977 ret |= PPC4XX_TLBLO_EX;
3979 if (tlb->prot & PAGE_WRITE) {
3980 ret |= PPC4XX_TLBLO_WR;
3982 return ret;
3985 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3987 ppcemb_tlb_t *tlb;
3988 target_ulong page, end;
3990 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
3991 val);
3992 entry &= PPC4XX_TLB_ENTRY_MASK;
3993 tlb = &env->tlb[entry].tlbe;
3994 /* Invalidate previous TLB (if it's valid) */
3995 if (tlb->prot & PAGE_VALID) {
3996 end = tlb->EPN + tlb->size;
3997 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
3998 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
3999 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4000 tlb_flush_page(env, page);
4003 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4004 & PPC4XX_TLBHI_SIZE_MASK);
4005 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
4006 * If this ever occurs, one should use the ppcemb target instead
4007 * of the ppc or ppc64 one
4009 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4010 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4011 "are not supported (%d)\n",
4012 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4014 tlb->EPN = val & ~(tlb->size - 1);
4015 if (val & PPC4XX_TLBHI_V) {
4016 tlb->prot |= PAGE_VALID;
4017 if (val & PPC4XX_TLBHI_E) {
4018 /* XXX: TO BE FIXED */
4019 cpu_abort(env,
4020 "Little-endian TLB entries are not supported by now\n");
4022 } else {
4023 tlb->prot &= ~PAGE_VALID;
4025 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4026 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4027 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4028 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4029 tlb->prot & PAGE_READ ? 'r' : '-',
4030 tlb->prot & PAGE_WRITE ? 'w' : '-',
4031 tlb->prot & PAGE_EXEC ? 'x' : '-',
4032 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4033 /* Invalidate new TLB (if valid) */
4034 if (tlb->prot & PAGE_VALID) {
4035 end = tlb->EPN + tlb->size;
4036 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4037 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4038 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4039 tlb_flush_page(env, page);
4044 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
4046 ppcemb_tlb_t *tlb;
4048 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4049 val);
4050 entry &= PPC4XX_TLB_ENTRY_MASK;
4051 tlb = &env->tlb[entry].tlbe;
4052 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4053 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4054 tlb->prot = PAGE_READ;
4055 if (val & PPC4XX_TLBLO_EX) {
4056 tlb->prot |= PAGE_EXEC;
4058 if (val & PPC4XX_TLBLO_WR) {
4059 tlb->prot |= PAGE_WRITE;
4061 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4062 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4063 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4064 tlb->prot & PAGE_READ ? 'r' : '-',
4065 tlb->prot & PAGE_WRITE ? 'w' : '-',
4066 tlb->prot & PAGE_EXEC ? 'x' : '-',
4067 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4070 target_ulong helper_4xx_tlbsx (target_ulong address)
4072 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4075 /* PowerPC 440 TLB management */
4076 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
4078 ppcemb_tlb_t *tlb;
4079 target_ulong EPN, RPN, size;
4080 int do_flush_tlbs;
4082 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4083 __func__, word, (int)entry, value);
4084 do_flush_tlbs = 0;
4085 entry &= 0x3F;
4086 tlb = &env->tlb[entry].tlbe;
4087 switch (word) {
4088 default:
4089 /* Just here to please gcc */
4090 case 0:
4091 EPN = value & 0xFFFFFC00;
4092 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
4093 do_flush_tlbs = 1;
4094 tlb->EPN = EPN;
4095 size = booke_tlb_to_page_size((value >> 4) & 0xF);
4096 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4097 do_flush_tlbs = 1;
4098 tlb->size = size;
4099 tlb->attr &= ~0x1;
4100 tlb->attr |= (value >> 8) & 1;
4101 if (value & 0x200) {
4102 tlb->prot |= PAGE_VALID;
4103 } else {
4104 if (tlb->prot & PAGE_VALID) {
4105 tlb->prot &= ~PAGE_VALID;
4106 do_flush_tlbs = 1;
4109 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4110 if (do_flush_tlbs)
4111 tlb_flush(env, 1);
4112 break;
4113 case 1:
4114 RPN = value & 0xFFFFFC0F;
4115 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4116 tlb_flush(env, 1);
4117 tlb->RPN = RPN;
4118 break;
4119 case 2:
4120 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4121 tlb->prot = tlb->prot & PAGE_VALID;
4122 if (value & 0x1)
4123 tlb->prot |= PAGE_READ << 4;
4124 if (value & 0x2)
4125 tlb->prot |= PAGE_WRITE << 4;
4126 if (value & 0x4)
4127 tlb->prot |= PAGE_EXEC << 4;
4128 if (value & 0x8)
4129 tlb->prot |= PAGE_READ;
4130 if (value & 0x10)
4131 tlb->prot |= PAGE_WRITE;
4132 if (value & 0x20)
4133 tlb->prot |= PAGE_EXEC;
4134 break;
4138 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
4140 ppcemb_tlb_t *tlb;
4141 target_ulong ret;
4142 int size;
4144 entry &= 0x3F;
4145 tlb = &env->tlb[entry].tlbe;
4146 switch (word) {
4147 default:
4148 /* Just here to please gcc */
4149 case 0:
4150 ret = tlb->EPN;
4151 size = booke_page_size_to_tlb(tlb->size);
4152 if (size < 0 || size > 0xF)
4153 size = 1;
4154 ret |= size << 4;
4155 if (tlb->attr & 0x1)
4156 ret |= 0x100;
4157 if (tlb->prot & PAGE_VALID)
4158 ret |= 0x200;
4159 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4160 env->spr[SPR_440_MMUCR] |= tlb->PID;
4161 break;
4162 case 1:
4163 ret = tlb->RPN;
4164 break;
4165 case 2:
4166 ret = tlb->attr & ~0x1;
4167 if (tlb->prot & (PAGE_READ << 4))
4168 ret |= 0x1;
4169 if (tlb->prot & (PAGE_WRITE << 4))
4170 ret |= 0x2;
4171 if (tlb->prot & (PAGE_EXEC << 4))
4172 ret |= 0x4;
4173 if (tlb->prot & PAGE_READ)
4174 ret |= 0x8;
4175 if (tlb->prot & PAGE_WRITE)
4176 ret |= 0x10;
4177 if (tlb->prot & PAGE_EXEC)
4178 ret |= 0x20;
4179 break;
4181 return ret;
4184 target_ulong helper_440_tlbsx (target_ulong address)
4186 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4189 #endif /* !CONFIG_USER_ONLY */