Quote configure's arguments and location while storing them in config_host.mak
[qemu/mini2440.git] / target-ppc / op_helper.c
blobfd635a36d005da8603c66252a14278fa54681dd9
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "helper.h"
24 #include "helper_regs.h"
26 //#define DEBUG_OP
27 //#define DEBUG_EXCEPTIONS
28 //#define DEBUG_SOFTWARE_TLB
30 /*****************************************************************************/
31 /* Exceptions processing helpers */
33 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
35 raise_exception_err(env, exception, error_code);
38 void helper_raise_debug (void)
40 raise_exception(env, EXCP_DEBUG);
43 /*****************************************************************************/
44 /* Registers load and stores */
45 target_ulong helper_load_cr (void)
47 return (env->crf[0] << 28) |
48 (env->crf[1] << 24) |
49 (env->crf[2] << 20) |
50 (env->crf[3] << 16) |
51 (env->crf[4] << 12) |
52 (env->crf[5] << 8) |
53 (env->crf[6] << 4) |
54 (env->crf[7] << 0);
57 void helper_store_cr (target_ulong val, uint32_t mask)
59 int i, sh;
61 for (i = 0, sh = 7; i < 8; i++, sh--) {
62 if (mask & (1 << sh))
63 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
67 /*****************************************************************************/
68 /* SPR accesses */
69 void helper_load_dump_spr (uint32_t sprn)
71 if (loglevel != 0) {
72 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
73 sprn, sprn, env->spr[sprn]);
77 void helper_store_dump_spr (uint32_t sprn)
79 if (loglevel != 0) {
80 fprintf(logfile, "Write SPR %d %03x <= " ADDRX "\n",
81 sprn, sprn, env->spr[sprn]);
85 target_ulong helper_load_tbl (void)
87 return cpu_ppc_load_tbl(env);
90 target_ulong helper_load_tbu (void)
92 return cpu_ppc_load_tbu(env);
95 target_ulong helper_load_atbl (void)
97 return cpu_ppc_load_atbl(env);
100 target_ulong helper_load_atbu (void)
102 return cpu_ppc_load_atbu(env);
105 target_ulong helper_load_601_rtcl (void)
107 return cpu_ppc601_load_rtcl(env);
110 target_ulong helper_load_601_rtcu (void)
112 return cpu_ppc601_load_rtcu(env);
115 #if !defined(CONFIG_USER_ONLY)
116 #if defined (TARGET_PPC64)
117 void helper_store_asr (target_ulong val)
119 ppc_store_asr(env, val);
121 #endif
123 void helper_store_sdr1 (target_ulong val)
125 ppc_store_sdr1(env, val);
128 void helper_store_tbl (target_ulong val)
130 cpu_ppc_store_tbl(env, val);
133 void helper_store_tbu (target_ulong val)
135 cpu_ppc_store_tbu(env, val);
138 void helper_store_atbl (target_ulong val)
140 cpu_ppc_store_atbl(env, val);
143 void helper_store_atbu (target_ulong val)
145 cpu_ppc_store_atbu(env, val);
148 void helper_store_601_rtcl (target_ulong val)
150 cpu_ppc601_store_rtcl(env, val);
153 void helper_store_601_rtcu (target_ulong val)
155 cpu_ppc601_store_rtcu(env, val);
158 target_ulong helper_load_decr (void)
160 return cpu_ppc_load_decr(env);
163 void helper_store_decr (target_ulong val)
165 cpu_ppc_store_decr(env, val);
168 void helper_store_hid0_601 (target_ulong val)
170 target_ulong hid0;
172 hid0 = env->spr[SPR_HID0];
173 if ((val ^ hid0) & 0x00000008) {
174 /* Change current endianness */
175 env->hflags &= ~(1 << MSR_LE);
176 env->hflags_nmsr &= ~(1 << MSR_LE);
177 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
178 env->hflags |= env->hflags_nmsr;
179 if (loglevel != 0) {
180 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
181 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
184 env->spr[SPR_HID0] = (uint32_t)val;
187 void helper_store_403_pbr (uint32_t num, target_ulong value)
189 if (likely(env->pb[num] != value)) {
190 env->pb[num] = value;
191 /* Should be optimized */
192 tlb_flush(env, 1);
196 target_ulong helper_load_40x_pit (void)
198 return load_40x_pit(env);
201 void helper_store_40x_pit (target_ulong val)
203 store_40x_pit(env, val);
206 void helper_store_40x_dbcr0 (target_ulong val)
208 store_40x_dbcr0(env, val);
211 void helper_store_40x_sler (target_ulong val)
213 store_40x_sler(env, val);
216 void helper_store_booke_tcr (target_ulong val)
218 store_booke_tcr(env, val);
221 void helper_store_booke_tsr (target_ulong val)
223 store_booke_tsr(env, val);
226 void helper_store_ibatu (uint32_t nr, target_ulong val)
228 ppc_store_ibatu(env, nr, val);
231 void helper_store_ibatl (uint32_t nr, target_ulong val)
233 ppc_store_ibatl(env, nr, val);
236 void helper_store_dbatu (uint32_t nr, target_ulong val)
238 ppc_store_dbatu(env, nr, val);
241 void helper_store_dbatl (uint32_t nr, target_ulong val)
243 ppc_store_dbatl(env, nr, val);
246 void helper_store_601_batl (uint32_t nr, target_ulong val)
248 ppc_store_ibatl_601(env, nr, val);
251 void helper_store_601_batu (uint32_t nr, target_ulong val)
253 ppc_store_ibatu_601(env, nr, val);
255 #endif
257 /*****************************************************************************/
258 /* Memory load and stores */
260 static always_inline target_ulong get_addr(target_ulong addr)
262 #if defined(TARGET_PPC64)
263 if (msr_sf)
264 return addr;
265 else
266 #endif
267 return (uint32_t)addr;
270 void helper_lmw (target_ulong addr, uint32_t reg)
272 for (; reg < 32; reg++, addr += 4) {
273 if (msr_le)
274 env->gpr[reg] = bswap32(ldl(get_addr(addr)));
275 else
276 env->gpr[reg] = ldl(get_addr(addr));
280 void helper_stmw (target_ulong addr, uint32_t reg)
282 for (; reg < 32; reg++, addr += 4) {
283 if (msr_le)
284 stl(get_addr(addr), bswap32((uint32_t)env->gpr[reg]));
285 else
286 stl(get_addr(addr), (uint32_t)env->gpr[reg]);
290 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
292 int sh;
293 for (; nb > 3; nb -= 4, addr += 4) {
294 env->gpr[reg] = ldl(get_addr(addr));
295 reg = (reg + 1) % 32;
297 if (unlikely(nb > 0)) {
298 env->gpr[reg] = 0;
299 for (sh = 24; nb > 0; nb--, addr++, sh -= 8) {
300 env->gpr[reg] |= ldub(get_addr(addr)) << sh;
304 /* PPC32 specification says we must generate an exception if
305 * rA is in the range of registers to be loaded.
306 * In an other hand, IBM says this is valid, but rA won't be loaded.
307 * For now, I'll follow the spec...
309 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
311 if (likely(xer_bc != 0)) {
312 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
313 (reg < rb && (reg + xer_bc) > rb))) {
314 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
315 POWERPC_EXCP_INVAL |
316 POWERPC_EXCP_INVAL_LSWX);
317 } else {
318 helper_lsw(addr, xer_bc, reg);
323 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
325 int sh;
326 for (; nb > 3; nb -= 4, addr += 4) {
327 stl(get_addr(addr), env->gpr[reg]);
328 reg = (reg + 1) % 32;
330 if (unlikely(nb > 0)) {
331 for (sh = 24; nb > 0; nb--, addr++, sh -= 8)
332 stb(get_addr(addr), (env->gpr[reg] >> sh) & 0xFF);
336 static void do_dcbz(target_ulong addr, int dcache_line_size)
338 target_long mask = get_addr(~(dcache_line_size - 1));
339 int i;
340 addr &= mask;
341 for (i = 0 ; i < dcache_line_size ; i += 4) {
342 stl(addr + i , 0);
344 if ((env->reserve & mask) == addr)
345 env->reserve = (target_ulong)-1ULL;
348 void helper_dcbz(target_ulong addr)
350 do_dcbz(addr, env->dcache_line_size);
353 void helper_dcbz_970(target_ulong addr)
355 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
356 do_dcbz(addr, 32);
357 else
358 do_dcbz(addr, env->dcache_line_size);
361 void helper_icbi(target_ulong addr)
363 uint32_t tmp;
365 addr = get_addr(addr & ~(env->dcache_line_size - 1));
366 /* Invalidate one cache line :
367 * PowerPC specification says this is to be treated like a load
368 * (not a fetch) by the MMU. To be sure it will be so,
369 * do the load "by hand".
371 tmp = ldl(addr);
372 tb_invalidate_page_range(addr, addr + env->icache_line_size);
375 // XXX: to be tested
376 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
378 int i, c, d;
379 d = 24;
380 for (i = 0; i < xer_bc; i++) {
381 c = ldub((uint32_t)addr++);
382 /* ra (if not 0) and rb are never modified */
383 if (likely(reg != rb && (ra == 0 || reg != ra))) {
384 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
386 if (unlikely(c == xer_cmp))
387 break;
388 if (likely(d != 0)) {
389 d -= 8;
390 } else {
391 d = 24;
392 reg++;
393 reg = reg & 0x1F;
396 return i;
399 /*****************************************************************************/
400 /* Fixed point operations helpers */
401 #if defined(TARGET_PPC64)
403 /* multiply high word */
404 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
406 uint64_t tl, th;
408 muls64(&tl, &th, arg1, arg2);
409 return th;
412 /* multiply high word unsigned */
413 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
415 uint64_t tl, th;
417 mulu64(&tl, &th, arg1, arg2);
418 return th;
421 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
423 int64_t th;
424 uint64_t tl;
426 muls64(&tl, (uint64_t *)&th, arg1, arg2);
427 /* If th != 0 && th != -1, then we had an overflow */
428 if (likely((uint64_t)(th + 1) <= 1)) {
429 env->xer &= ~(1 << XER_OV);
430 } else {
431 env->xer |= (1 << XER_OV) | (1 << XER_SO);
433 return (int64_t)tl;
435 #endif
437 target_ulong helper_cntlzw (target_ulong t)
439 return clz32(t);
442 #if defined(TARGET_PPC64)
443 target_ulong helper_cntlzd (target_ulong t)
445 return clz64(t);
447 #endif
449 /* shift right arithmetic helper */
450 target_ulong helper_sraw (target_ulong value, target_ulong shift)
452 int32_t ret;
454 if (likely(!(shift & 0x20))) {
455 if (likely((uint32_t)shift != 0)) {
456 shift &= 0x1f;
457 ret = (int32_t)value >> shift;
458 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
459 env->xer &= ~(1 << XER_CA);
460 } else {
461 env->xer |= (1 << XER_CA);
463 } else {
464 ret = (int32_t)value;
465 env->xer &= ~(1 << XER_CA);
467 } else {
468 ret = (int32_t)value >> 31;
469 if (ret) {
470 env->xer |= (1 << XER_CA);
471 } else {
472 env->xer &= ~(1 << XER_CA);
475 return (target_long)ret;
478 #if defined(TARGET_PPC64)
479 target_ulong helper_srad (target_ulong value, target_ulong shift)
481 int64_t ret;
483 if (likely(!(shift & 0x40))) {
484 if (likely((uint64_t)shift != 0)) {
485 shift &= 0x3f;
486 ret = (int64_t)value >> shift;
487 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
488 env->xer &= ~(1 << XER_CA);
489 } else {
490 env->xer |= (1 << XER_CA);
492 } else {
493 ret = (int64_t)value;
494 env->xer &= ~(1 << XER_CA);
496 } else {
497 ret = (int64_t)value >> 63;
498 if (ret) {
499 env->xer |= (1 << XER_CA);
500 } else {
501 env->xer &= ~(1 << XER_CA);
504 return ret;
506 #endif
508 target_ulong helper_popcntb (target_ulong val)
510 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
511 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
512 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
513 return val;
516 #if defined(TARGET_PPC64)
517 target_ulong helper_popcntb_64 (target_ulong val)
519 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
520 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
521 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
522 return val;
524 #endif
526 /*****************************************************************************/
527 /* Floating point operations helpers */
528 uint64_t helper_float32_to_float64(uint32_t arg)
530 CPU_FloatU f;
531 CPU_DoubleU d;
532 f.l = arg;
533 d.d = float32_to_float64(f.f, &env->fp_status);
534 return d.ll;
537 uint32_t helper_float64_to_float32(uint64_t arg)
539 CPU_FloatU f;
540 CPU_DoubleU d;
541 d.ll = arg;
542 f.f = float64_to_float32(d.d, &env->fp_status);
543 return f.l;
546 static always_inline int fpisneg (float64 d)
548 CPU_DoubleU u;
550 u.d = d;
552 return u.ll >> 63 != 0;
555 static always_inline int isden (float64 d)
557 CPU_DoubleU u;
559 u.d = d;
561 return ((u.ll >> 52) & 0x7FF) == 0;
564 static always_inline int iszero (float64 d)
566 CPU_DoubleU u;
568 u.d = d;
570 return (u.ll & ~0x8000000000000000ULL) == 0;
573 static always_inline int isinfinity (float64 d)
575 CPU_DoubleU u;
577 u.d = d;
579 return ((u.ll >> 52) & 0x7FF) == 0x7FF &&
580 (u.ll & 0x000FFFFFFFFFFFFFULL) == 0;
583 #ifdef CONFIG_SOFTFLOAT
584 static always_inline int isfinite (float64 d)
586 CPU_DoubleU u;
588 u.d = d;
590 return (((u.ll >> 52) & 0x7FF) != 0x7FF);
593 static always_inline int isnormal (float64 d)
595 CPU_DoubleU u;
597 u.d = d;
599 uint32_t exp = (u.ll >> 52) & 0x7FF;
600 return ((0 < exp) && (exp < 0x7FF));
602 #endif
604 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
606 CPU_DoubleU farg;
607 int isneg;
608 int ret;
609 farg.ll = arg;
610 isneg = fpisneg(farg.d);
611 if (unlikely(float64_is_nan(farg.d))) {
612 if (float64_is_signaling_nan(farg.d)) {
613 /* Signaling NaN: flags are undefined */
614 ret = 0x00;
615 } else {
616 /* Quiet NaN */
617 ret = 0x11;
619 } else if (unlikely(isinfinity(farg.d))) {
620 /* +/- infinity */
621 if (isneg)
622 ret = 0x09;
623 else
624 ret = 0x05;
625 } else {
626 if (iszero(farg.d)) {
627 /* +/- zero */
628 if (isneg)
629 ret = 0x12;
630 else
631 ret = 0x02;
632 } else {
633 if (isden(farg.d)) {
634 /* Denormalized numbers */
635 ret = 0x10;
636 } else {
637 /* Normalized numbers */
638 ret = 0x00;
640 if (isneg) {
641 ret |= 0x08;
642 } else {
643 ret |= 0x04;
647 if (set_fprf) {
648 /* We update FPSCR_FPRF */
649 env->fpscr &= ~(0x1F << FPSCR_FPRF);
650 env->fpscr |= ret << FPSCR_FPRF;
652 /* We just need fpcc to update Rc1 */
653 return ret & 0xF;
656 /* Floating-point invalid operations exception */
657 static always_inline uint64_t fload_invalid_op_excp (int op)
659 uint64_t ret = 0;
660 int ve;
662 ve = fpscr_ve;
663 if (op & POWERPC_EXCP_FP_VXSNAN) {
664 /* Operation on signaling NaN */
665 env->fpscr |= 1 << FPSCR_VXSNAN;
667 if (op & POWERPC_EXCP_FP_VXSOFT) {
668 /* Software-defined condition */
669 env->fpscr |= 1 << FPSCR_VXSOFT;
671 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
672 case POWERPC_EXCP_FP_VXISI:
673 /* Magnitude subtraction of infinities */
674 env->fpscr |= 1 << FPSCR_VXISI;
675 goto update_arith;
676 case POWERPC_EXCP_FP_VXIDI:
677 /* Division of infinity by infinity */
678 env->fpscr |= 1 << FPSCR_VXIDI;
679 goto update_arith;
680 case POWERPC_EXCP_FP_VXZDZ:
681 /* Division of zero by zero */
682 env->fpscr |= 1 << FPSCR_VXZDZ;
683 goto update_arith;
684 case POWERPC_EXCP_FP_VXIMZ:
685 /* Multiplication of zero by infinity */
686 env->fpscr |= 1 << FPSCR_VXIMZ;
687 goto update_arith;
688 case POWERPC_EXCP_FP_VXVC:
689 /* Ordered comparison of NaN */
690 env->fpscr |= 1 << FPSCR_VXVC;
691 env->fpscr &= ~(0xF << FPSCR_FPCC);
692 env->fpscr |= 0x11 << FPSCR_FPCC;
693 /* We must update the target FPR before raising the exception */
694 if (ve != 0) {
695 env->exception_index = POWERPC_EXCP_PROGRAM;
696 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
697 /* Update the floating-point enabled exception summary */
698 env->fpscr |= 1 << FPSCR_FEX;
699 /* Exception is differed */
700 ve = 0;
702 break;
703 case POWERPC_EXCP_FP_VXSQRT:
704 /* Square root of a negative number */
705 env->fpscr |= 1 << FPSCR_VXSQRT;
706 update_arith:
707 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
708 if (ve == 0) {
709 /* Set the result to quiet NaN */
710 ret = UINT64_MAX;
711 env->fpscr &= ~(0xF << FPSCR_FPCC);
712 env->fpscr |= 0x11 << FPSCR_FPCC;
714 break;
715 case POWERPC_EXCP_FP_VXCVI:
716 /* Invalid conversion */
717 env->fpscr |= 1 << FPSCR_VXCVI;
718 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
719 if (ve == 0) {
720 /* Set the result to quiet NaN */
721 ret = UINT64_MAX;
722 env->fpscr &= ~(0xF << FPSCR_FPCC);
723 env->fpscr |= 0x11 << FPSCR_FPCC;
725 break;
727 /* Update the floating-point invalid operation summary */
728 env->fpscr |= 1 << FPSCR_VX;
729 /* Update the floating-point exception summary */
730 env->fpscr |= 1 << FPSCR_FX;
731 if (ve != 0) {
732 /* Update the floating-point enabled exception summary */
733 env->fpscr |= 1 << FPSCR_FEX;
734 if (msr_fe0 != 0 || msr_fe1 != 0)
735 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
737 return ret;
740 static always_inline uint64_t float_zero_divide_excp (uint64_t arg1, uint64_t arg2)
742 env->fpscr |= 1 << FPSCR_ZX;
743 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
744 /* Update the floating-point exception summary */
745 env->fpscr |= 1 << FPSCR_FX;
746 if (fpscr_ze != 0) {
747 /* Update the floating-point enabled exception summary */
748 env->fpscr |= 1 << FPSCR_FEX;
749 if (msr_fe0 != 0 || msr_fe1 != 0) {
750 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
751 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
753 } else {
754 /* Set the result to infinity */
755 arg1 = ((arg1 ^ arg2) & 0x8000000000000000ULL);
756 arg1 |= 0x7FFULL << 52;
758 return arg1;
761 static always_inline void float_overflow_excp (void)
763 env->fpscr |= 1 << FPSCR_OX;
764 /* Update the floating-point exception summary */
765 env->fpscr |= 1 << FPSCR_FX;
766 if (fpscr_oe != 0) {
767 /* XXX: should adjust the result */
768 /* Update the floating-point enabled exception summary */
769 env->fpscr |= 1 << FPSCR_FEX;
770 /* We must update the target FPR before raising the exception */
771 env->exception_index = POWERPC_EXCP_PROGRAM;
772 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
773 } else {
774 env->fpscr |= 1 << FPSCR_XX;
775 env->fpscr |= 1 << FPSCR_FI;
779 static always_inline void float_underflow_excp (void)
781 env->fpscr |= 1 << FPSCR_UX;
782 /* Update the floating-point exception summary */
783 env->fpscr |= 1 << FPSCR_FX;
784 if (fpscr_ue != 0) {
785 /* XXX: should adjust the result */
786 /* Update the floating-point enabled exception summary */
787 env->fpscr |= 1 << FPSCR_FEX;
788 /* We must update the target FPR before raising the exception */
789 env->exception_index = POWERPC_EXCP_PROGRAM;
790 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
794 static always_inline void float_inexact_excp (void)
796 env->fpscr |= 1 << FPSCR_XX;
797 /* Update the floating-point exception summary */
798 env->fpscr |= 1 << FPSCR_FX;
799 if (fpscr_xe != 0) {
800 /* Update the floating-point enabled exception summary */
801 env->fpscr |= 1 << FPSCR_FEX;
802 /* We must update the target FPR before raising the exception */
803 env->exception_index = POWERPC_EXCP_PROGRAM;
804 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
808 static always_inline void fpscr_set_rounding_mode (void)
810 int rnd_type;
812 /* Set rounding mode */
813 switch (fpscr_rn) {
814 case 0:
815 /* Best approximation (round to nearest) */
816 rnd_type = float_round_nearest_even;
817 break;
818 case 1:
819 /* Smaller magnitude (round toward zero) */
820 rnd_type = float_round_to_zero;
821 break;
822 case 2:
823 /* Round toward +infinite */
824 rnd_type = float_round_up;
825 break;
826 default:
827 case 3:
828 /* Round toward -infinite */
829 rnd_type = float_round_down;
830 break;
832 set_float_rounding_mode(rnd_type, &env->fp_status);
835 void helper_fpscr_setbit (uint32_t bit)
837 int prev;
839 prev = (env->fpscr >> bit) & 1;
840 env->fpscr |= 1 << bit;
841 if (prev == 0) {
842 switch (bit) {
843 case FPSCR_VX:
844 env->fpscr |= 1 << FPSCR_FX;
845 if (fpscr_ve)
846 goto raise_ve;
847 case FPSCR_OX:
848 env->fpscr |= 1 << FPSCR_FX;
849 if (fpscr_oe)
850 goto raise_oe;
851 break;
852 case FPSCR_UX:
853 env->fpscr |= 1 << FPSCR_FX;
854 if (fpscr_ue)
855 goto raise_ue;
856 break;
857 case FPSCR_ZX:
858 env->fpscr |= 1 << FPSCR_FX;
859 if (fpscr_ze)
860 goto raise_ze;
861 break;
862 case FPSCR_XX:
863 env->fpscr |= 1 << FPSCR_FX;
864 if (fpscr_xe)
865 goto raise_xe;
866 break;
867 case FPSCR_VXSNAN:
868 case FPSCR_VXISI:
869 case FPSCR_VXIDI:
870 case FPSCR_VXZDZ:
871 case FPSCR_VXIMZ:
872 case FPSCR_VXVC:
873 case FPSCR_VXSOFT:
874 case FPSCR_VXSQRT:
875 case FPSCR_VXCVI:
876 env->fpscr |= 1 << FPSCR_VX;
877 env->fpscr |= 1 << FPSCR_FX;
878 if (fpscr_ve != 0)
879 goto raise_ve;
880 break;
881 case FPSCR_VE:
882 if (fpscr_vx != 0) {
883 raise_ve:
884 env->error_code = POWERPC_EXCP_FP;
885 if (fpscr_vxsnan)
886 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
887 if (fpscr_vxisi)
888 env->error_code |= POWERPC_EXCP_FP_VXISI;
889 if (fpscr_vxidi)
890 env->error_code |= POWERPC_EXCP_FP_VXIDI;
891 if (fpscr_vxzdz)
892 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
893 if (fpscr_vximz)
894 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
895 if (fpscr_vxvc)
896 env->error_code |= POWERPC_EXCP_FP_VXVC;
897 if (fpscr_vxsoft)
898 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
899 if (fpscr_vxsqrt)
900 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
901 if (fpscr_vxcvi)
902 env->error_code |= POWERPC_EXCP_FP_VXCVI;
903 goto raise_excp;
905 break;
906 case FPSCR_OE:
907 if (fpscr_ox != 0) {
908 raise_oe:
909 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
910 goto raise_excp;
912 break;
913 case FPSCR_UE:
914 if (fpscr_ux != 0) {
915 raise_ue:
916 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
917 goto raise_excp;
919 break;
920 case FPSCR_ZE:
921 if (fpscr_zx != 0) {
922 raise_ze:
923 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
924 goto raise_excp;
926 break;
927 case FPSCR_XE:
928 if (fpscr_xx != 0) {
929 raise_xe:
930 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
931 goto raise_excp;
933 break;
934 case FPSCR_RN1:
935 case FPSCR_RN:
936 fpscr_set_rounding_mode();
937 break;
938 default:
939 break;
940 raise_excp:
941 /* Update the floating-point enabled exception summary */
942 env->fpscr |= 1 << FPSCR_FEX;
943 /* We have to update Rc1 before raising the exception */
944 env->exception_index = POWERPC_EXCP_PROGRAM;
945 break;
950 void helper_store_fpscr (uint64_t arg, uint32_t mask)
953 * We use only the 32 LSB of the incoming fpr
955 uint32_t prev, new;
956 int i;
958 prev = env->fpscr;
959 new = (uint32_t)arg;
960 new &= ~0x90000000;
961 new |= prev & 0x90000000;
962 for (i = 0; i < 7; i++) {
963 if (mask & (1 << i)) {
964 env->fpscr &= ~(0xF << (4 * i));
965 env->fpscr |= new & (0xF << (4 * i));
968 /* Update VX and FEX */
969 if (fpscr_ix != 0)
970 env->fpscr |= 1 << FPSCR_VX;
971 else
972 env->fpscr &= ~(1 << FPSCR_VX);
973 if ((fpscr_ex & fpscr_eex) != 0) {
974 env->fpscr |= 1 << FPSCR_FEX;
975 env->exception_index = POWERPC_EXCP_PROGRAM;
976 /* XXX: we should compute it properly */
977 env->error_code = POWERPC_EXCP_FP;
979 else
980 env->fpscr &= ~(1 << FPSCR_FEX);
981 fpscr_set_rounding_mode();
984 void helper_float_check_status (void)
986 #ifdef CONFIG_SOFTFLOAT
987 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
988 (env->error_code & POWERPC_EXCP_FP)) {
989 /* Differred floating-point exception after target FPR update */
990 if (msr_fe0 != 0 || msr_fe1 != 0)
991 raise_exception_err(env, env->exception_index, env->error_code);
992 } else if (env->fp_status.float_exception_flags & float_flag_overflow) {
993 float_overflow_excp();
994 } else if (env->fp_status.float_exception_flags & float_flag_underflow) {
995 float_underflow_excp();
996 } else if (env->fp_status.float_exception_flags & float_flag_inexact) {
997 float_inexact_excp();
999 #else
1000 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
1001 (env->error_code & POWERPC_EXCP_FP)) {
1002 /* Differred floating-point exception after target FPR update */
1003 if (msr_fe0 != 0 || msr_fe1 != 0)
1004 raise_exception_err(env, env->exception_index, env->error_code);
1006 #endif
1009 #ifdef CONFIG_SOFTFLOAT
1010 void helper_reset_fpstatus (void)
1012 env->fp_status.float_exception_flags = 0;
1014 #endif
1016 /* fadd - fadd. */
1017 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1019 CPU_DoubleU farg1, farg2;
1021 farg1.ll = arg1;
1022 farg2.ll = arg2;
1023 #if USE_PRECISE_EMULATION
1024 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1025 float64_is_signaling_nan(farg2.d))) {
1026 /* sNaN addition */
1027 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1028 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
1029 fpisneg(farg1.d) == fpisneg(farg2.d))) {
1030 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1031 } else {
1032 /* Magnitude subtraction of infinities */
1033 farg1.ll == fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1035 #else
1036 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1037 #endif
1038 return farg1.ll;
1041 /* fsub - fsub. */
1042 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1044 CPU_DoubleU farg1, farg2;
1046 farg1.ll = arg1;
1047 farg2.ll = arg2;
1048 #if USE_PRECISE_EMULATION
1050 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1051 float64_is_signaling_nan(farg2.d))) {
1052 /* sNaN subtraction */
1053 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1054 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
1055 fpisneg(farg1.d) != fpisneg(farg2.d))) {
1056 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1057 } else {
1058 /* Magnitude subtraction of infinities */
1059 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1062 #else
1063 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1064 #endif
1065 return farg1.ll;
1068 /* fmul - fmul. */
1069 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1071 CPU_DoubleU farg1, farg2;
1073 farg1.ll = arg1;
1074 farg2.ll = arg2;
1075 #if USE_PRECISE_EMULATION
1076 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1077 float64_is_signaling_nan(farg2.d))) {
1078 /* sNaN multiplication */
1079 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1080 } else if (unlikely((isinfinity(farg1.d) && iszero(farg2.d)) ||
1081 (iszero(farg1.d) && isinfinity(farg2.d)))) {
1082 /* Multiplication of zero by infinity */
1083 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1084 } else {
1085 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1088 #else
1089 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1090 #endif
1091 return farg1.ll;
1094 /* fdiv - fdiv. */
1095 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1097 CPU_DoubleU farg1, farg2;
1099 farg1.ll = arg1;
1100 farg2.ll = arg2;
1101 #if USE_PRECISE_EMULATION
1102 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1103 float64_is_signaling_nan(farg2.d))) {
1104 /* sNaN division */
1105 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1106 } else if (unlikely(isinfinity(farg1.d) && isinfinity(farg2.d))) {
1107 /* Division of infinity by infinity */
1108 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1109 } else if (unlikely(iszero(farg2.d))) {
1110 if (iszero(farg1.d)) {
1111 /* Division of zero by zero */
1112 farg1.ll fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1113 } else {
1114 /* Division by zero */
1115 farg1.ll = float_zero_divide_excp(farg1.d, farg2.d);
1117 } else {
1118 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1120 #else
1121 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1122 #endif
1123 return farg1.ll;
1126 /* fabs */
1127 uint64_t helper_fabs (uint64_t arg)
1129 CPU_DoubleU farg;
1131 farg.ll = arg;
1132 farg.d = float64_abs(farg.d);
1133 return farg.ll;
1136 /* fnabs */
1137 uint64_t helper_fnabs (uint64_t arg)
1139 CPU_DoubleU farg;
1141 farg.ll = arg;
1142 farg.d = float64_abs(farg.d);
1143 farg.d = float64_chs(farg.d);
1144 return farg.ll;
1147 /* fneg */
1148 uint64_t helper_fneg (uint64_t arg)
1150 CPU_DoubleU farg;
1152 farg.ll = arg;
1153 farg.d = float64_chs(farg.d);
1154 return farg.ll;
1157 /* fctiw - fctiw. */
1158 uint64_t helper_fctiw (uint64_t arg)
1160 CPU_DoubleU farg;
1161 farg.ll = arg;
1163 if (unlikely(float64_is_signaling_nan(farg.d))) {
1164 /* sNaN conversion */
1165 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1166 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1167 /* qNan / infinity conversion */
1168 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1169 } else {
1170 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1171 #if USE_PRECISE_EMULATION
1172 /* XXX: higher bits are not supposed to be significant.
1173 * to make tests easier, return the same as a real PowerPC 750
1175 farg.ll |= 0xFFF80000ULL << 32;
1176 #endif
1178 return farg.ll;
1181 /* fctiwz - fctiwz. */
1182 uint64_t helper_fctiwz (uint64_t arg)
1184 CPU_DoubleU farg;
1185 farg.ll = arg;
1187 if (unlikely(float64_is_signaling_nan(farg.d))) {
1188 /* sNaN conversion */
1189 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1190 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1191 /* qNan / infinity conversion */
1192 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1193 } else {
1194 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1195 #if USE_PRECISE_EMULATION
1196 /* XXX: higher bits are not supposed to be significant.
1197 * to make tests easier, return the same as a real PowerPC 750
1199 farg.ll |= 0xFFF80000ULL << 32;
1200 #endif
1202 return farg.ll;
1205 #if defined(TARGET_PPC64)
1206 /* fcfid - fcfid. */
1207 uint64_t helper_fcfid (uint64_t arg)
1209 CPU_DoubleU farg;
1210 farg.d = int64_to_float64(arg, &env->fp_status);
1211 return farg.ll;
1214 /* fctid - fctid. */
1215 uint64_t helper_fctid (uint64_t arg)
1217 CPU_DoubleU farg;
1218 farg.ll = arg;
1220 if (unlikely(float64_is_signaling_nan(farg.d))) {
1221 /* sNaN conversion */
1222 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1223 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1224 /* qNan / infinity conversion */
1225 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1226 } else {
1227 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1229 return farg.ll;
1232 /* fctidz - fctidz. */
1233 uint64_t helper_fctidz (uint64_t arg)
1235 CPU_DoubleU farg;
1236 farg.ll = arg;
1238 if (unlikely(float64_is_signaling_nan(farg.d))) {
1239 /* sNaN conversion */
1240 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1241 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1242 /* qNan / infinity conversion */
1243 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1244 } else {
1245 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1247 return farg.ll;
1250 #endif
1252 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1254 CPU_DoubleU farg;
1255 farg.ll = arg;
1257 if (unlikely(float64_is_signaling_nan(farg.d))) {
1258 /* sNaN round */
1259 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1260 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1261 /* qNan / infinity round */
1262 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1263 } else {
1264 set_float_rounding_mode(rounding_mode, &env->fp_status);
1265 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1266 /* Restore rounding mode from FPSCR */
1267 fpscr_set_rounding_mode();
1269 return farg.ll;
1272 uint64_t helper_frin (uint64_t arg)
1274 return do_fri(arg, float_round_nearest_even);
1277 uint64_t helper_friz (uint64_t arg)
1279 return do_fri(arg, float_round_to_zero);
1282 uint64_t helper_frip (uint64_t arg)
1284 return do_fri(arg, float_round_up);
1287 uint64_t helper_frim (uint64_t arg)
1289 return do_fri(arg, float_round_down);
1292 /* fmadd - fmadd. */
1293 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1295 CPU_DoubleU farg1, farg2, farg3;
1297 farg1.ll = arg1;
1298 farg2.ll = arg2;
1299 farg3.ll = arg3;
1300 #if USE_PRECISE_EMULATION
1301 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1302 float64_is_signaling_nan(farg2.d) ||
1303 float64_is_signaling_nan(farg3.d))) {
1304 /* sNaN operation */
1305 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1306 } else {
1307 #ifdef FLOAT128
1308 /* This is the way the PowerPC specification defines it */
1309 float128 ft0_128, ft1_128;
1311 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1312 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1313 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1314 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1315 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1316 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1317 #else
1318 /* This is OK on x86 hosts */
1319 farg1.d = (farg1.d * farg2.d) + farg3.d;
1320 #endif
1322 #else
1323 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1324 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1325 #endif
1326 return farg1.ll;
1329 /* fmsub - fmsub. */
1330 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1332 CPU_DoubleU farg1, farg2, farg3;
1334 farg1.ll = arg1;
1335 farg2.ll = arg2;
1336 farg3.ll = arg3;
1337 #if USE_PRECISE_EMULATION
1338 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1339 float64_is_signaling_nan(farg2.d) ||
1340 float64_is_signaling_nan(farg3.d))) {
1341 /* sNaN operation */
1342 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1343 } else {
1344 #ifdef FLOAT128
1345 /* This is the way the PowerPC specification defines it */
1346 float128 ft0_128, ft1_128;
1348 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1349 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1350 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1351 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1352 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1353 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1354 #else
1355 /* This is OK on x86 hosts */
1356 farg1.d = (farg1.d * farg2.d) - farg3.d;
1357 #endif
1359 #else
1360 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1361 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1362 #endif
1363 return farg1.ll;
1366 /* fnmadd - fnmadd. */
1367 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1369 CPU_DoubleU farg1, farg2, farg3;
1371 farg1.ll = arg1;
1372 farg2.ll = arg2;
1373 farg3.ll = arg3;
1375 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1376 float64_is_signaling_nan(farg2.d) ||
1377 float64_is_signaling_nan(farg3.d))) {
1378 /* sNaN operation */
1379 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1380 } else {
1381 #if USE_PRECISE_EMULATION
1382 #ifdef FLOAT128
1383 /* This is the way the PowerPC specification defines it */
1384 float128 ft0_128, ft1_128;
1386 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1387 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1388 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1389 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1390 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1391 farg1.d= float128_to_float64(ft0_128, &env->fp_status);
1392 #else
1393 /* This is OK on x86 hosts */
1394 farg1.d = (farg1.d * farg2.d) + farg3.d;
1395 #endif
1396 #else
1397 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1398 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1399 #endif
1400 if (likely(!isnan(farg1.d)))
1401 farg1.d = float64_chs(farg1.d);
1403 return farg1.ll;
1406 /* fnmsub - fnmsub. */
1407 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1409 CPU_DoubleU farg1, farg2, farg3;
1411 farg1.ll = arg1;
1412 farg2.ll = arg2;
1413 farg3.ll = arg3;
1415 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1416 float64_is_signaling_nan(farg2.d) ||
1417 float64_is_signaling_nan(farg3.d))) {
1418 /* sNaN operation */
1419 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1420 } else {
1421 #if USE_PRECISE_EMULATION
1422 #ifdef FLOAT128
1423 /* This is the way the PowerPC specification defines it */
1424 float128 ft0_128, ft1_128;
1426 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1427 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1428 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1429 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1430 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1431 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1432 #else
1433 /* This is OK on x86 hosts */
1434 farg1.d = (farg1.d * farg2.d) - farg3.d;
1435 #endif
1436 #else
1437 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1438 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1439 #endif
1440 if (likely(!isnan(farg1.d)))
1441 farg1.d = float64_chs(farg1.d);
1443 return farg1.ll;
1446 /* frsp - frsp. */
1447 uint64_t helper_frsp (uint64_t arg)
1449 CPU_DoubleU farg;
1450 farg.ll = arg;
1452 #if USE_PRECISE_EMULATION
1453 if (unlikely(float64_is_signaling_nan(farg.d))) {
1454 /* sNaN square root */
1455 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1456 } else {
1457 fard.d = float64_to_float32(farg.d, &env->fp_status);
1459 #else
1460 farg.d = float64_to_float32(farg.d, &env->fp_status);
1461 #endif
1462 return farg.ll;
1465 /* fsqrt - fsqrt. */
1466 uint64_t helper_fsqrt (uint64_t arg)
1468 CPU_DoubleU farg;
1469 farg.ll = arg;
1471 if (unlikely(float64_is_signaling_nan(farg.d))) {
1472 /* sNaN square root */
1473 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1474 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1475 /* Square root of a negative nonzero number */
1476 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1477 } else {
1478 farg.d = float64_sqrt(farg.d, &env->fp_status);
1480 return farg.ll;
1483 /* fre - fre. */
1484 uint64_t helper_fre (uint64_t arg)
1486 CPU_DoubleU farg;
1487 farg.ll = arg;
1489 if (unlikely(float64_is_signaling_nan(farg.d))) {
1490 /* sNaN reciprocal */
1491 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1492 } else if (unlikely(iszero(farg.d))) {
1493 /* Zero reciprocal */
1494 farg.ll = float_zero_divide_excp(1.0, farg.d);
1495 } else if (likely(isnormal(farg.d))) {
1496 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1497 } else {
1498 if (farg.ll == 0x8000000000000000ULL) {
1499 farg.ll = 0xFFF0000000000000ULL;
1500 } else if (farg.ll == 0x0000000000000000ULL) {
1501 farg.ll = 0x7FF0000000000000ULL;
1502 } else if (isnan(farg.d)) {
1503 farg.ll = 0x7FF8000000000000ULL;
1504 } else if (fpisneg(farg.d)) {
1505 farg.ll = 0x8000000000000000ULL;
1506 } else {
1507 farg.ll = 0x0000000000000000ULL;
1510 return farg.d;
1513 /* fres - fres. */
1514 uint64_t helper_fres (uint64_t arg)
1516 CPU_DoubleU farg;
1517 farg.ll = arg;
1519 if (unlikely(float64_is_signaling_nan(farg.d))) {
1520 /* sNaN reciprocal */
1521 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1522 } else if (unlikely(iszero(farg.d))) {
1523 /* Zero reciprocal */
1524 farg.ll = float_zero_divide_excp(1.0, farg.d);
1525 } else if (likely(isnormal(farg.d))) {
1526 #if USE_PRECISE_EMULATION
1527 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1528 farg.d = float64_to_float32(farg.d, &env->fp_status);
1529 #else
1530 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1531 #endif
1532 } else {
1533 if (farg.ll == 0x8000000000000000ULL) {
1534 farg.ll = 0xFFF0000000000000ULL;
1535 } else if (farg.ll == 0x0000000000000000ULL) {
1536 farg.ll = 0x7FF0000000000000ULL;
1537 } else if (isnan(farg.d)) {
1538 farg.ll = 0x7FF8000000000000ULL;
1539 } else if (fpisneg(farg.d)) {
1540 farg.ll = 0x8000000000000000ULL;
1541 } else {
1542 farg.ll = 0x0000000000000000ULL;
1545 return farg.ll;
1548 /* frsqrte - frsqrte. */
1549 uint64_t helper_frsqrte (uint64_t arg)
1551 CPU_DoubleU farg;
1552 farg.ll = arg;
1554 if (unlikely(float64_is_signaling_nan(farg.d))) {
1555 /* sNaN reciprocal square root */
1556 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1557 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1558 /* Reciprocal square root of a negative nonzero number */
1559 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1560 } else if (likely(isnormal(farg.d))) {
1561 farg.d = float64_sqrt(farg.d, &env->fp_status);
1562 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1563 } else {
1564 if (farg.ll == 0x8000000000000000ULL) {
1565 farg.ll = 0xFFF0000000000000ULL;
1566 } else if (farg.ll == 0x0000000000000000ULL) {
1567 farg.ll = 0x7FF0000000000000ULL;
1568 } else if (isnan(farg.d)) {
1569 farg.ll |= 0x000FFFFFFFFFFFFFULL;
1570 } else if (fpisneg(farg.d)) {
1571 farg.ll = 0x7FF8000000000000ULL;
1572 } else {
1573 farg.ll = 0x0000000000000000ULL;
1576 return farg.ll;
1579 /* fsel - fsel. */
1580 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1582 CPU_DoubleU farg1, farg2, farg3;
1584 farg1.ll = arg1;
1585 farg2.ll = arg2;
1586 farg3.ll = arg3;
1588 if (!fpisneg(farg1.d) || iszero(farg1.d))
1589 return farg2.ll;
1590 else
1591 return farg2.ll;
1594 uint32_t helper_fcmpu (uint64_t arg1, uint64_t arg2)
1596 CPU_DoubleU farg1, farg2;
1597 uint32_t ret = 0;
1598 farg1.ll = arg1;
1599 farg2.ll = arg2;
1601 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1602 float64_is_signaling_nan(farg2.d))) {
1603 /* sNaN comparison */
1604 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1605 } else {
1606 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1607 ret = 0x08UL;
1608 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1609 ret = 0x04UL;
1610 } else {
1611 ret = 0x02UL;
1614 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1615 env->fpscr |= ret << FPSCR_FPRF;
1616 return ret;
1619 uint32_t helper_fcmpo (uint64_t arg1, uint64_t arg2)
1621 CPU_DoubleU farg1, farg2;
1622 uint32_t ret = 0;
1623 farg1.ll = arg1;
1624 farg2.ll = arg2;
1626 if (unlikely(float64_is_nan(farg1.d) ||
1627 float64_is_nan(farg2.d))) {
1628 if (float64_is_signaling_nan(farg1.d) ||
1629 float64_is_signaling_nan(farg2.d)) {
1630 /* sNaN comparison */
1631 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1632 POWERPC_EXCP_FP_VXVC);
1633 } else {
1634 /* qNaN comparison */
1635 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1637 } else {
1638 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1639 ret = 0x08UL;
1640 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1641 ret = 0x04UL;
1642 } else {
1643 ret = 0x02UL;
1646 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1647 env->fpscr |= ret << FPSCR_FPRF;
1648 return ret;
1651 #if !defined (CONFIG_USER_ONLY)
1652 void helper_store_msr (target_ulong val)
1654 val = hreg_store_msr(env, val, 0);
1655 if (val != 0) {
1656 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1657 raise_exception(env, val);
1661 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
1663 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1664 target_ulong msrm, int keep_msrh)
1666 #if defined(TARGET_PPC64)
1667 if (msr & (1ULL << MSR_SF)) {
1668 nip = (uint64_t)nip;
1669 msr &= (uint64_t)msrm;
1670 } else {
1671 nip = (uint32_t)nip;
1672 msr = (uint32_t)(msr & msrm);
1673 if (keep_msrh)
1674 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1676 #else
1677 nip = (uint32_t)nip;
1678 msr &= (uint32_t)msrm;
1679 #endif
1680 /* XXX: beware: this is false if VLE is supported */
1681 env->nip = nip & ~((target_ulong)0x00000003);
1682 hreg_store_msr(env, msr, 1);
1683 #if defined (DEBUG_OP)
1684 cpu_dump_rfi(env->nip, env->msr);
1685 #endif
1686 /* No need to raise an exception here,
1687 * as rfi is always the last insn of a TB
1689 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1692 void helper_rfi (void)
1694 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1695 ~((target_ulong)0xFFFF0000), 1);
1698 #if defined(TARGET_PPC64)
1699 void helper_rfid (void)
1701 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1702 ~((target_ulong)0xFFFF0000), 0);
1705 void helper_hrfid (void)
1707 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1708 ~((target_ulong)0xFFFF0000), 0);
1710 #endif
1711 #endif
1713 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1715 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1716 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1717 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1718 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1719 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1720 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1724 #if defined(TARGET_PPC64)
1725 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1727 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1728 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1729 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1730 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1731 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1732 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1734 #endif
1736 /*****************************************************************************/
1737 /* PowerPC 601 specific instructions (POWER bridge) */
1739 target_ulong helper_clcs (uint32_t arg)
1741 switch (arg) {
1742 case 0x0CUL:
1743 /* Instruction cache line size */
1744 return env->icache_line_size;
1745 break;
1746 case 0x0DUL:
1747 /* Data cache line size */
1748 return env->dcache_line_size;
1749 break;
1750 case 0x0EUL:
1751 /* Minimum cache line size */
1752 return (env->icache_line_size < env->dcache_line_size) ?
1753 env->icache_line_size : env->dcache_line_size;
1754 break;
1755 case 0x0FUL:
1756 /* Maximum cache line size */
1757 return (env->icache_line_size > env->dcache_line_size) ?
1758 env->icache_line_size : env->dcache_line_size;
1759 break;
1760 default:
1761 /* Undefined */
1762 return 0;
1763 break;
1767 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1769 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1771 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1772 (int32_t)arg2 == 0) {
1773 env->spr[SPR_MQ] = 0;
1774 return INT32_MIN;
1775 } else {
1776 env->spr[SPR_MQ] = tmp % arg2;
1777 return tmp / (int32_t)arg2;
1781 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1783 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1785 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1786 (int32_t)arg2 == 0) {
1787 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1788 env->spr[SPR_MQ] = 0;
1789 return INT32_MIN;
1790 } else {
1791 env->spr[SPR_MQ] = tmp % arg2;
1792 tmp /= (int32_t)arg2;
1793 if ((int32_t)tmp != tmp) {
1794 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1795 } else {
1796 env->xer &= ~(1 << XER_OV);
1798 return tmp;
1802 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1804 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1805 (int32_t)arg2 == 0) {
1806 env->spr[SPR_MQ] = 0;
1807 return INT32_MIN;
1808 } else {
1809 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1810 return (int32_t)arg1 / (int32_t)arg2;
1814 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1816 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1817 (int32_t)arg2 == 0) {
1818 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1819 env->spr[SPR_MQ] = 0;
1820 return INT32_MIN;
1821 } else {
1822 env->xer &= ~(1 << XER_OV);
1823 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1824 return (int32_t)arg1 / (int32_t)arg2;
1828 #if !defined (CONFIG_USER_ONLY)
1829 target_ulong helper_rac (target_ulong addr)
1831 mmu_ctx_t ctx;
1832 int nb_BATs;
1833 target_ulong ret = 0;
1835 /* We don't have to generate many instances of this instruction,
1836 * as rac is supervisor only.
1838 /* XXX: FIX THIS: Pretend we have no BAT */
1839 nb_BATs = env->nb_BATs;
1840 env->nb_BATs = 0;
1841 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1842 ret = ctx.raddr;
1843 env->nb_BATs = nb_BATs;
1844 return ret;
1847 void helper_rfsvc (void)
1849 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1851 #endif
1853 /*****************************************************************************/
1854 /* 602 specific instructions */
1855 /* mfrom is the most crazy instruction ever seen, imho ! */
1856 /* Real implementation uses a ROM table. Do the same */
1857 #define USE_MFROM_ROM_TABLE
1858 target_ulong helper_602_mfrom (target_ulong arg)
1860 if (likely(arg < 602)) {
1861 #if defined(USE_MFROM_ROM_TABLE)
1862 #include "mfrom_table.c"
1863 return mfrom_ROM_table[arg];
1864 #else
1865 double d;
1866 /* Extremly decomposed:
1867 * -arg / 256
1868 * return 256 * log10(10 + 1.0) + 0.5
1870 d = arg;
1871 d = float64_div(d, 256, &env->fp_status);
1872 d = float64_chs(d);
1873 d = exp10(d); // XXX: use float emulation function
1874 d = float64_add(d, 1.0, &env->fp_status);
1875 d = log10(d); // XXX: use float emulation function
1876 d = float64_mul(d, 256, &env->fp_status);
1877 d = float64_add(d, 0.5, &env->fp_status);
1878 return float64_round_to_int(d, &env->fp_status);
1879 #endif
1880 } else {
1881 return 0;
1885 /*****************************************************************************/
1886 /* Embedded PowerPC specific helpers */
1888 /* XXX: to be improved to check access rights when in user-mode */
1889 target_ulong helper_load_dcr (target_ulong dcrn)
1891 target_ulong val = 0;
1893 if (unlikely(env->dcr_env == NULL)) {
1894 if (loglevel != 0) {
1895 fprintf(logfile, "No DCR environment\n");
1897 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1898 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1899 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1900 if (loglevel != 0) {
1901 fprintf(logfile, "DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1903 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1904 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1906 return val;
1909 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1911 if (unlikely(env->dcr_env == NULL)) {
1912 if (loglevel != 0) {
1913 fprintf(logfile, "No DCR environment\n");
1915 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1916 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1917 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1918 if (loglevel != 0) {
1919 fprintf(logfile, "DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1921 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1922 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1926 #if !defined(CONFIG_USER_ONLY)
1927 void helper_40x_rfci (void)
1929 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1930 ~((target_ulong)0xFFFF0000), 0);
1933 void helper_rfci (void)
1935 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1936 ~((target_ulong)0x3FFF0000), 0);
1939 void helper_rfdi (void)
1941 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1942 ~((target_ulong)0x3FFF0000), 0);
1945 void helper_rfmci (void)
1947 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1948 ~((target_ulong)0x3FFF0000), 0);
1950 #endif
1952 /* 440 specific */
1953 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1955 target_ulong mask;
1956 int i;
1958 i = 1;
1959 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1960 if ((high & mask) == 0) {
1961 if (update_Rc) {
1962 env->crf[0] = 0x4;
1964 goto done;
1966 i++;
1968 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1969 if ((low & mask) == 0) {
1970 if (update_Rc) {
1971 env->crf[0] = 0x8;
1973 goto done;
1975 i++;
1977 if (update_Rc) {
1978 env->crf[0] = 0x2;
1980 done:
1981 env->xer = (env->xer & ~0x7F) | i;
1982 if (update_Rc) {
1983 env->crf[0] |= xer_so;
1985 return i;
1988 /*****************************************************************************/
1989 /* SPE extension helpers */
1990 /* Use a table to make this quicker */
1991 static uint8_t hbrev[16] = {
1992 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1993 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1996 static always_inline uint8_t byte_reverse (uint8_t val)
1998 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2001 static always_inline uint32_t word_reverse (uint32_t val)
2003 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2004 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2007 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2008 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2010 uint32_t a, b, d, mask;
2012 mask = UINT32_MAX >> (32 - MASKBITS);
2013 a = arg1 & mask;
2014 b = arg2 & mask;
2015 d = word_reverse(1 + word_reverse(a | ~b));
2016 return (arg1 & ~mask) | (d & b);
2019 uint32_t helper_cntlsw32 (uint32_t val)
2021 if (val & 0x80000000)
2022 return clz32(~val);
2023 else
2024 return clz32(val);
2027 uint32_t helper_cntlzw32 (uint32_t val)
2029 return clz32(val);
2032 /* Single-precision floating-point conversions */
2033 static always_inline uint32_t efscfsi (uint32_t val)
2035 CPU_FloatU u;
2037 u.f = int32_to_float32(val, &env->spe_status);
2039 return u.l;
2042 static always_inline uint32_t efscfui (uint32_t val)
2044 CPU_FloatU u;
2046 u.f = uint32_to_float32(val, &env->spe_status);
2048 return u.l;
2051 static always_inline int32_t efsctsi (uint32_t val)
2053 CPU_FloatU u;
2055 u.l = val;
2056 /* NaN are not treated the same way IEEE 754 does */
2057 if (unlikely(isnan(u.f)))
2058 return 0;
2060 return float32_to_int32(u.f, &env->spe_status);
2063 static always_inline uint32_t efsctui (uint32_t val)
2065 CPU_FloatU u;
2067 u.l = val;
2068 /* NaN are not treated the same way IEEE 754 does */
2069 if (unlikely(isnan(u.f)))
2070 return 0;
2072 return float32_to_uint32(u.f, &env->spe_status);
2075 static always_inline uint32_t efsctsiz (uint32_t val)
2077 CPU_FloatU u;
2079 u.l = val;
2080 /* NaN are not treated the same way IEEE 754 does */
2081 if (unlikely(isnan(u.f)))
2082 return 0;
2084 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2087 static always_inline uint32_t efsctuiz (uint32_t val)
2089 CPU_FloatU u;
2091 u.l = val;
2092 /* NaN are not treated the same way IEEE 754 does */
2093 if (unlikely(isnan(u.f)))
2094 return 0;
2096 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2099 static always_inline uint32_t efscfsf (uint32_t val)
2101 CPU_FloatU u;
2102 float32 tmp;
2104 u.f = int32_to_float32(val, &env->spe_status);
2105 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2106 u.f = float32_div(u.f, tmp, &env->spe_status);
2108 return u.l;
2111 static always_inline uint32_t efscfuf (uint32_t val)
2113 CPU_FloatU u;
2114 float32 tmp;
2116 u.f = uint32_to_float32(val, &env->spe_status);
2117 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2118 u.f = float32_div(u.f, tmp, &env->spe_status);
2120 return u.l;
2123 static always_inline uint32_t efsctsf (uint32_t val)
2125 CPU_FloatU u;
2126 float32 tmp;
2128 u.l = val;
2129 /* NaN are not treated the same way IEEE 754 does */
2130 if (unlikely(isnan(u.f)))
2131 return 0;
2132 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2133 u.f = float32_mul(u.f, tmp, &env->spe_status);
2135 return float32_to_int32(u.f, &env->spe_status);
2138 static always_inline uint32_t efsctuf (uint32_t val)
2140 CPU_FloatU u;
2141 float32 tmp;
2143 u.l = val;
2144 /* NaN are not treated the same way IEEE 754 does */
2145 if (unlikely(isnan(u.f)))
2146 return 0;
2147 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2148 u.f = float32_mul(u.f, tmp, &env->spe_status);
2150 return float32_to_uint32(u.f, &env->spe_status);
2153 #define HELPER_SPE_SINGLE_CONV(name) \
2154 uint32_t helper_e##name (uint32_t val) \
2156 return e##name(val); \
2158 /* efscfsi */
2159 HELPER_SPE_SINGLE_CONV(fscfsi);
2160 /* efscfui */
2161 HELPER_SPE_SINGLE_CONV(fscfui);
2162 /* efscfuf */
2163 HELPER_SPE_SINGLE_CONV(fscfuf);
2164 /* efscfsf */
2165 HELPER_SPE_SINGLE_CONV(fscfsf);
2166 /* efsctsi */
2167 HELPER_SPE_SINGLE_CONV(fsctsi);
2168 /* efsctui */
2169 HELPER_SPE_SINGLE_CONV(fsctui);
2170 /* efsctsiz */
2171 HELPER_SPE_SINGLE_CONV(fsctsiz);
2172 /* efsctuiz */
2173 HELPER_SPE_SINGLE_CONV(fsctuiz);
2174 /* efsctsf */
2175 HELPER_SPE_SINGLE_CONV(fsctsf);
2176 /* efsctuf */
2177 HELPER_SPE_SINGLE_CONV(fsctuf);
2179 #define HELPER_SPE_VECTOR_CONV(name) \
2180 uint64_t helper_ev##name (uint64_t val) \
2182 return ((uint64_t)e##name(val >> 32) << 32) | \
2183 (uint64_t)e##name(val); \
2185 /* evfscfsi */
2186 HELPER_SPE_VECTOR_CONV(fscfsi);
2187 /* evfscfui */
2188 HELPER_SPE_VECTOR_CONV(fscfui);
2189 /* evfscfuf */
2190 HELPER_SPE_VECTOR_CONV(fscfuf);
2191 /* evfscfsf */
2192 HELPER_SPE_VECTOR_CONV(fscfsf);
2193 /* evfsctsi */
2194 HELPER_SPE_VECTOR_CONV(fsctsi);
2195 /* evfsctui */
2196 HELPER_SPE_VECTOR_CONV(fsctui);
2197 /* evfsctsiz */
2198 HELPER_SPE_VECTOR_CONV(fsctsiz);
2199 /* evfsctuiz */
2200 HELPER_SPE_VECTOR_CONV(fsctuiz);
2201 /* evfsctsf */
2202 HELPER_SPE_VECTOR_CONV(fsctsf);
2203 /* evfsctuf */
2204 HELPER_SPE_VECTOR_CONV(fsctuf);
2206 /* Single-precision floating-point arithmetic */
2207 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2209 CPU_FloatU u1, u2;
2210 u1.l = op1;
2211 u2.l = op2;
2212 u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2213 return u1.l;
2216 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2218 CPU_FloatU u1, u2;
2219 u1.l = op1;
2220 u2.l = op2;
2221 u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2222 return u1.l;
2225 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2227 CPU_FloatU u1, u2;
2228 u1.l = op1;
2229 u2.l = op2;
2230 u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2231 return u1.l;
2234 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2236 CPU_FloatU u1, u2;
2237 u1.l = op1;
2238 u2.l = op2;
2239 u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2240 return u1.l;
2243 #define HELPER_SPE_SINGLE_ARITH(name) \
2244 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2246 return e##name(op1, op2); \
2248 /* efsadd */
2249 HELPER_SPE_SINGLE_ARITH(fsadd);
2250 /* efssub */
2251 HELPER_SPE_SINGLE_ARITH(fssub);
2252 /* efsmul */
2253 HELPER_SPE_SINGLE_ARITH(fsmul);
2254 /* efsdiv */
2255 HELPER_SPE_SINGLE_ARITH(fsdiv);
2257 #define HELPER_SPE_VECTOR_ARITH(name) \
2258 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
2260 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
2261 (uint64_t)e##name(op1, op2); \
2263 /* evfsadd */
2264 HELPER_SPE_VECTOR_ARITH(fsadd);
2265 /* evfssub */
2266 HELPER_SPE_VECTOR_ARITH(fssub);
2267 /* evfsmul */
2268 HELPER_SPE_VECTOR_ARITH(fsmul);
2269 /* evfsdiv */
2270 HELPER_SPE_VECTOR_ARITH(fsdiv);
2272 /* Single-precision floating-point comparisons */
2273 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2275 CPU_FloatU u1, u2;
2276 u1.l = op1;
2277 u2.l = op2;
2278 return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2281 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2283 CPU_FloatU u1, u2;
2284 u1.l = op1;
2285 u2.l = op2;
2286 return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2289 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2291 CPU_FloatU u1, u2;
2292 u1.l = op1;
2293 u2.l = op2;
2294 return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2297 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2299 /* XXX: TODO: test special values (NaN, infinites, ...) */
2300 return efststlt(op1, op2);
2303 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2305 /* XXX: TODO: test special values (NaN, infinites, ...) */
2306 return efststgt(op1, op2);
2309 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2311 /* XXX: TODO: test special values (NaN, infinites, ...) */
2312 return efststeq(op1, op2);
2315 #define HELPER_SINGLE_SPE_CMP(name) \
2316 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2318 return e##name(op1, op2) << 2; \
2320 /* efststlt */
2321 HELPER_SINGLE_SPE_CMP(fststlt);
2322 /* efststgt */
2323 HELPER_SINGLE_SPE_CMP(fststgt);
2324 /* efststeq */
2325 HELPER_SINGLE_SPE_CMP(fststeq);
2326 /* efscmplt */
2327 HELPER_SINGLE_SPE_CMP(fscmplt);
2328 /* efscmpgt */
2329 HELPER_SINGLE_SPE_CMP(fscmpgt);
2330 /* efscmpeq */
2331 HELPER_SINGLE_SPE_CMP(fscmpeq);
2333 static always_inline uint32_t evcmp_merge (int t0, int t1)
2335 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2338 #define HELPER_VECTOR_SPE_CMP(name) \
2339 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
2341 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
2343 /* evfststlt */
2344 HELPER_VECTOR_SPE_CMP(fststlt);
2345 /* evfststgt */
2346 HELPER_VECTOR_SPE_CMP(fststgt);
2347 /* evfststeq */
2348 HELPER_VECTOR_SPE_CMP(fststeq);
2349 /* evfscmplt */
2350 HELPER_VECTOR_SPE_CMP(fscmplt);
2351 /* evfscmpgt */
2352 HELPER_VECTOR_SPE_CMP(fscmpgt);
2353 /* evfscmpeq */
2354 HELPER_VECTOR_SPE_CMP(fscmpeq);
2356 /* Double-precision floating-point conversion */
2357 uint64_t helper_efdcfsi (uint32_t val)
2359 CPU_DoubleU u;
2361 u.d = int32_to_float64(val, &env->spe_status);
2363 return u.ll;
2366 uint64_t helper_efdcfsid (uint64_t val)
2368 CPU_DoubleU u;
2370 u.d = int64_to_float64(val, &env->spe_status);
2372 return u.ll;
2375 uint64_t helper_efdcfui (uint32_t val)
2377 CPU_DoubleU u;
2379 u.d = uint32_to_float64(val, &env->spe_status);
2381 return u.ll;
2384 uint64_t helper_efdcfuid (uint64_t val)
2386 CPU_DoubleU u;
2388 u.d = uint64_to_float64(val, &env->spe_status);
2390 return u.ll;
2393 uint32_t helper_efdctsi (uint64_t val)
2395 CPU_DoubleU u;
2397 u.ll = val;
2398 /* NaN are not treated the same way IEEE 754 does */
2399 if (unlikely(isnan(u.d)))
2400 return 0;
2402 return float64_to_int32(u.d, &env->spe_status);
2405 uint32_t helper_efdctui (uint64_t val)
2407 CPU_DoubleU u;
2409 u.ll = val;
2410 /* NaN are not treated the same way IEEE 754 does */
2411 if (unlikely(isnan(u.d)))
2412 return 0;
2414 return float64_to_uint32(u.d, &env->spe_status);
2417 uint32_t helper_efdctsiz (uint64_t val)
2419 CPU_DoubleU u;
2421 u.ll = val;
2422 /* NaN are not treated the same way IEEE 754 does */
2423 if (unlikely(isnan(u.d)))
2424 return 0;
2426 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2429 uint64_t helper_efdctsidz (uint64_t val)
2431 CPU_DoubleU u;
2433 u.ll = val;
2434 /* NaN are not treated the same way IEEE 754 does */
2435 if (unlikely(isnan(u.d)))
2436 return 0;
2438 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2441 uint32_t helper_efdctuiz (uint64_t val)
2443 CPU_DoubleU u;
2445 u.ll = val;
2446 /* NaN are not treated the same way IEEE 754 does */
2447 if (unlikely(isnan(u.d)))
2448 return 0;
2450 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2453 uint64_t helper_efdctuidz (uint64_t val)
2455 CPU_DoubleU u;
2457 u.ll = val;
2458 /* NaN are not treated the same way IEEE 754 does */
2459 if (unlikely(isnan(u.d)))
2460 return 0;
2462 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2465 uint64_t helper_efdcfsf (uint32_t val)
2467 CPU_DoubleU u;
2468 float64 tmp;
2470 u.d = int32_to_float64(val, &env->spe_status);
2471 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2472 u.d = float64_div(u.d, tmp, &env->spe_status);
2474 return u.ll;
2477 uint64_t helper_efdcfuf (uint32_t val)
2479 CPU_DoubleU u;
2480 float64 tmp;
2482 u.d = uint32_to_float64(val, &env->spe_status);
2483 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2484 u.d = float64_div(u.d, tmp, &env->spe_status);
2486 return u.ll;
2489 uint32_t helper_efdctsf (uint64_t val)
2491 CPU_DoubleU u;
2492 float64 tmp;
2494 u.ll = val;
2495 /* NaN are not treated the same way IEEE 754 does */
2496 if (unlikely(isnan(u.d)))
2497 return 0;
2498 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2499 u.d = float64_mul(u.d, tmp, &env->spe_status);
2501 return float64_to_int32(u.d, &env->spe_status);
2504 uint32_t helper_efdctuf (uint64_t val)
2506 CPU_DoubleU u;
2507 float64 tmp;
2509 u.ll = val;
2510 /* NaN are not treated the same way IEEE 754 does */
2511 if (unlikely(isnan(u.d)))
2512 return 0;
2513 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2514 u.d = float64_mul(u.d, tmp, &env->spe_status);
2516 return float64_to_uint32(u.d, &env->spe_status);
2519 uint32_t helper_efscfd (uint64_t val)
2521 CPU_DoubleU u1;
2522 CPU_FloatU u2;
2524 u1.ll = val;
2525 u2.f = float64_to_float32(u1.d, &env->spe_status);
2527 return u2.l;
2530 uint64_t helper_efdcfs (uint32_t val)
2532 CPU_DoubleU u2;
2533 CPU_FloatU u1;
2535 u1.l = val;
2536 u2.d = float32_to_float64(u1.f, &env->spe_status);
2538 return u2.ll;
2541 /* Double precision fixed-point arithmetic */
2542 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2544 CPU_DoubleU u1, u2;
2545 u1.ll = op1;
2546 u2.ll = op2;
2547 u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2548 return u1.ll;
2551 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2553 CPU_DoubleU u1, u2;
2554 u1.ll = op1;
2555 u2.ll = op2;
2556 u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2557 return u1.ll;
2560 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2562 CPU_DoubleU u1, u2;
2563 u1.ll = op1;
2564 u2.ll = op2;
2565 u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2566 return u1.ll;
2569 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2571 CPU_DoubleU u1, u2;
2572 u1.ll = op1;
2573 u2.ll = op2;
2574 u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2575 return u1.ll;
2578 /* Double precision floating point helpers */
2579 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2581 CPU_DoubleU u1, u2;
2582 u1.ll = op1;
2583 u2.ll = op2;
2584 return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2587 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2589 CPU_DoubleU u1, u2;
2590 u1.ll = op1;
2591 u2.ll = op2;
2592 return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2595 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2597 CPU_DoubleU u1, u2;
2598 u1.ll = op1;
2599 u2.ll = op2;
2600 return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2603 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2605 /* XXX: TODO: test special values (NaN, infinites, ...) */
2606 return helper_efdtstlt(op1, op2);
2609 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2611 /* XXX: TODO: test special values (NaN, infinites, ...) */
2612 return helper_efdtstgt(op1, op2);
2615 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2617 /* XXX: TODO: test special values (NaN, infinites, ...) */
2618 return helper_efdtsteq(op1, op2);
2621 /*****************************************************************************/
2622 /* Softmmu support */
2623 #if !defined (CONFIG_USER_ONLY)
2625 #define MMUSUFFIX _mmu
2627 #define SHIFT 0
2628 #include "softmmu_template.h"
2630 #define SHIFT 1
2631 #include "softmmu_template.h"
2633 #define SHIFT 2
2634 #include "softmmu_template.h"
2636 #define SHIFT 3
2637 #include "softmmu_template.h"
2639 /* try to fill the TLB and return an exception if error. If retaddr is
2640 NULL, it means that the function was called in C code (i.e. not
2641 from generated code or from helper.c) */
2642 /* XXX: fix it to restore all registers */
2643 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2645 TranslationBlock *tb;
2646 CPUState *saved_env;
2647 unsigned long pc;
2648 int ret;
2650 /* XXX: hack to restore env in all cases, even if not called from
2651 generated code */
2652 saved_env = env;
2653 env = cpu_single_env;
2654 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2655 if (unlikely(ret != 0)) {
2656 if (likely(retaddr)) {
2657 /* now we have a real cpu fault */
2658 pc = (unsigned long)retaddr;
2659 tb = tb_find_pc(pc);
2660 if (likely(tb)) {
2661 /* the PC is inside the translated code. It means that we have
2662 a virtual CPU fault */
2663 cpu_restore_state(tb, env, pc, NULL);
2666 raise_exception_err(env, env->exception_index, env->error_code);
2668 env = saved_env;
2671 /* Segment registers load and store */
2672 target_ulong helper_load_sr (target_ulong sr_num)
2674 return env->sr[sr_num];
2677 void helper_store_sr (target_ulong sr_num, target_ulong val)
2679 ppc_store_sr(env, sr_num, val);
2682 /* SLB management */
2683 #if defined(TARGET_PPC64)
2684 target_ulong helper_load_slb (target_ulong slb_nr)
2686 return ppc_load_slb(env, slb_nr);
2689 void helper_store_slb (target_ulong slb_nr, target_ulong rs)
2691 ppc_store_slb(env, slb_nr, rs);
2694 void helper_slbia (void)
2696 ppc_slb_invalidate_all(env);
2699 void helper_slbie (target_ulong addr)
2701 ppc_slb_invalidate_one(env, addr);
2704 #endif /* defined(TARGET_PPC64) */
2706 /* TLB management */
2707 void helper_tlbia (void)
2709 ppc_tlb_invalidate_all(env);
2712 void helper_tlbie (target_ulong addr)
2714 ppc_tlb_invalidate_one(env, addr);
2717 /* Software driven TLBs management */
2718 /* PowerPC 602/603 software TLB load instructions helpers */
2719 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
2721 target_ulong RPN, CMP, EPN;
2722 int way;
2724 RPN = env->spr[SPR_RPA];
2725 if (is_code) {
2726 CMP = env->spr[SPR_ICMP];
2727 EPN = env->spr[SPR_IMISS];
2728 } else {
2729 CMP = env->spr[SPR_DCMP];
2730 EPN = env->spr[SPR_DMISS];
2732 way = (env->spr[SPR_SRR1] >> 17) & 1;
2733 #if defined (DEBUG_SOFTWARE_TLB)
2734 if (loglevel != 0) {
2735 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2736 " PTE1 " ADDRX " way %d\n",
2737 __func__, T0, EPN, CMP, RPN, way);
2739 #endif
2740 /* Store this TLB */
2741 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2742 way, is_code, CMP, RPN);
2745 void helper_6xx_tlbd (target_ulong EPN)
2747 do_6xx_tlb(EPN, 0);
2750 void helper_6xx_tlbi (target_ulong EPN)
2752 do_6xx_tlb(EPN, 1);
2755 /* PowerPC 74xx software TLB load instructions helpers */
2756 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
2758 target_ulong RPN, CMP, EPN;
2759 int way;
2761 RPN = env->spr[SPR_PTELO];
2762 CMP = env->spr[SPR_PTEHI];
2763 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2764 way = env->spr[SPR_TLBMISS] & 0x3;
2765 #if defined (DEBUG_SOFTWARE_TLB)
2766 if (loglevel != 0) {
2767 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2768 " PTE1 " ADDRX " way %d\n",
2769 __func__, T0, EPN, CMP, RPN, way);
2771 #endif
2772 /* Store this TLB */
2773 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2774 way, is_code, CMP, RPN);
2777 void helper_74xx_tlbd (target_ulong EPN)
2779 do_74xx_tlb(EPN, 0);
2782 void helper_74xx_tlbi (target_ulong EPN)
2784 do_74xx_tlb(EPN, 1);
2787 static always_inline target_ulong booke_tlb_to_page_size (int size)
2789 return 1024 << (2 * size);
2792 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2794 int size;
2796 switch (page_size) {
2797 case 0x00000400UL:
2798 size = 0x0;
2799 break;
2800 case 0x00001000UL:
2801 size = 0x1;
2802 break;
2803 case 0x00004000UL:
2804 size = 0x2;
2805 break;
2806 case 0x00010000UL:
2807 size = 0x3;
2808 break;
2809 case 0x00040000UL:
2810 size = 0x4;
2811 break;
2812 case 0x00100000UL:
2813 size = 0x5;
2814 break;
2815 case 0x00400000UL:
2816 size = 0x6;
2817 break;
2818 case 0x01000000UL:
2819 size = 0x7;
2820 break;
2821 case 0x04000000UL:
2822 size = 0x8;
2823 break;
2824 case 0x10000000UL:
2825 size = 0x9;
2826 break;
2827 case 0x40000000UL:
2828 size = 0xA;
2829 break;
2830 #if defined (TARGET_PPC64)
2831 case 0x000100000000ULL:
2832 size = 0xB;
2833 break;
2834 case 0x000400000000ULL:
2835 size = 0xC;
2836 break;
2837 case 0x001000000000ULL:
2838 size = 0xD;
2839 break;
2840 case 0x004000000000ULL:
2841 size = 0xE;
2842 break;
2843 case 0x010000000000ULL:
2844 size = 0xF;
2845 break;
2846 #endif
2847 default:
2848 size = -1;
2849 break;
2852 return size;
2855 /* Helpers for 4xx TLB management */
2856 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
2858 ppcemb_tlb_t *tlb;
2859 target_ulong ret;
2860 int size;
2862 entry &= 0x3F;
2863 tlb = &env->tlb[entry].tlbe;
2864 ret = tlb->EPN;
2865 if (tlb->prot & PAGE_VALID)
2866 ret |= 0x400;
2867 size = booke_page_size_to_tlb(tlb->size);
2868 if (size < 0 || size > 0x7)
2869 size = 1;
2870 ret |= size << 7;
2871 env->spr[SPR_40x_PID] = tlb->PID;
2872 return ret;
2875 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
2877 ppcemb_tlb_t *tlb;
2878 target_ulong ret;
2880 entry &= 0x3F;
2881 tlb = &env->tlb[entry].tlbe;
2882 ret = tlb->RPN;
2883 if (tlb->prot & PAGE_EXEC)
2884 ret |= 0x200;
2885 if (tlb->prot & PAGE_WRITE)
2886 ret |= 0x100;
2887 return ret;
2890 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
2892 ppcemb_tlb_t *tlb;
2893 target_ulong page, end;
2895 #if defined (DEBUG_SOFTWARE_TLB)
2896 if (loglevel != 0) {
2897 fprintf(logfile, "%s entry " TDX " val " TDX "\n", __func__, entry, val);
2899 #endif
2900 entry &= 0x3F;
2901 tlb = &env->tlb[entry].tlbe;
2902 /* Invalidate previous TLB (if it's valid) */
2903 if (tlb->prot & PAGE_VALID) {
2904 end = tlb->EPN + tlb->size;
2905 #if defined (DEBUG_SOFTWARE_TLB)
2906 if (loglevel != 0) {
2907 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2908 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
2910 #endif
2911 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2912 tlb_flush_page(env, page);
2914 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
2915 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2916 * If this ever occurs, one should use the ppcemb target instead
2917 * of the ppc or ppc64 one
2919 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2920 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2921 "are not supported (%d)\n",
2922 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
2924 tlb->EPN = val & ~(tlb->size - 1);
2925 if (val & 0x40)
2926 tlb->prot |= PAGE_VALID;
2927 else
2928 tlb->prot &= ~PAGE_VALID;
2929 if (val & 0x20) {
2930 /* XXX: TO BE FIXED */
2931 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2933 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2934 tlb->attr = val & 0xFF;
2935 #if defined (DEBUG_SOFTWARE_TLB)
2936 if (loglevel != 0) {
2937 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2938 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2939 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2940 tlb->prot & PAGE_READ ? 'r' : '-',
2941 tlb->prot & PAGE_WRITE ? 'w' : '-',
2942 tlb->prot & PAGE_EXEC ? 'x' : '-',
2943 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2945 #endif
2946 /* Invalidate new TLB (if valid) */
2947 if (tlb->prot & PAGE_VALID) {
2948 end = tlb->EPN + tlb->size;
2949 #if defined (DEBUG_SOFTWARE_TLB)
2950 if (loglevel != 0) {
2951 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2952 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2954 #endif
2955 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2956 tlb_flush_page(env, page);
2960 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
2962 ppcemb_tlb_t *tlb;
2964 #if defined (DEBUG_SOFTWARE_TLB)
2965 if (loglevel != 0) {
2966 fprintf(logfile, "%s entry " TDX " val " TDX "\n", __func__, entry, val);
2968 #endif
2969 entry &= 0x3F;
2970 tlb = &env->tlb[entry].tlbe;
2971 tlb->RPN = val & 0xFFFFFC00;
2972 tlb->prot = PAGE_READ;
2973 if (val & 0x200)
2974 tlb->prot |= PAGE_EXEC;
2975 if (val & 0x100)
2976 tlb->prot |= PAGE_WRITE;
2977 #if defined (DEBUG_SOFTWARE_TLB)
2978 if (loglevel != 0) {
2979 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2980 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2981 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2982 tlb->prot & PAGE_READ ? 'r' : '-',
2983 tlb->prot & PAGE_WRITE ? 'w' : '-',
2984 tlb->prot & PAGE_EXEC ? 'x' : '-',
2985 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2987 #endif
2990 target_ulong helper_4xx_tlbsx (target_ulong address)
2992 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
2995 /* PowerPC 440 TLB management */
2996 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
2998 ppcemb_tlb_t *tlb;
2999 target_ulong EPN, RPN, size;
3000 int do_flush_tlbs;
3002 #if defined (DEBUG_SOFTWARE_TLB)
3003 if (loglevel != 0) {
3004 fprintf(logfile, "%s word %d entry " TDX " value " TDX "\n",
3005 __func__, word, entry, value);
3007 #endif
3008 do_flush_tlbs = 0;
3009 entry &= 0x3F;
3010 tlb = &env->tlb[entry].tlbe;
3011 switch (word) {
3012 default:
3013 /* Just here to please gcc */
3014 case 0:
3015 EPN = value & 0xFFFFFC00;
3016 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3017 do_flush_tlbs = 1;
3018 tlb->EPN = EPN;
3019 size = booke_tlb_to_page_size((value >> 4) & 0xF);
3020 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3021 do_flush_tlbs = 1;
3022 tlb->size = size;
3023 tlb->attr &= ~0x1;
3024 tlb->attr |= (value >> 8) & 1;
3025 if (value & 0x200) {
3026 tlb->prot |= PAGE_VALID;
3027 } else {
3028 if (tlb->prot & PAGE_VALID) {
3029 tlb->prot &= ~PAGE_VALID;
3030 do_flush_tlbs = 1;
3033 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3034 if (do_flush_tlbs)
3035 tlb_flush(env, 1);
3036 break;
3037 case 1:
3038 RPN = value & 0xFFFFFC0F;
3039 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3040 tlb_flush(env, 1);
3041 tlb->RPN = RPN;
3042 break;
3043 case 2:
3044 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3045 tlb->prot = tlb->prot & PAGE_VALID;
3046 if (value & 0x1)
3047 tlb->prot |= PAGE_READ << 4;
3048 if (value & 0x2)
3049 tlb->prot |= PAGE_WRITE << 4;
3050 if (value & 0x4)
3051 tlb->prot |= PAGE_EXEC << 4;
3052 if (value & 0x8)
3053 tlb->prot |= PAGE_READ;
3054 if (value & 0x10)
3055 tlb->prot |= PAGE_WRITE;
3056 if (value & 0x20)
3057 tlb->prot |= PAGE_EXEC;
3058 break;
3062 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3064 ppcemb_tlb_t *tlb;
3065 target_ulong ret;
3066 int size;
3068 entry &= 0x3F;
3069 tlb = &env->tlb[entry].tlbe;
3070 switch (word) {
3071 default:
3072 /* Just here to please gcc */
3073 case 0:
3074 ret = tlb->EPN;
3075 size = booke_page_size_to_tlb(tlb->size);
3076 if (size < 0 || size > 0xF)
3077 size = 1;
3078 ret |= size << 4;
3079 if (tlb->attr & 0x1)
3080 ret |= 0x100;
3081 if (tlb->prot & PAGE_VALID)
3082 ret |= 0x200;
3083 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3084 env->spr[SPR_440_MMUCR] |= tlb->PID;
3085 break;
3086 case 1:
3087 ret = tlb->RPN;
3088 break;
3089 case 2:
3090 ret = tlb->attr & ~0x1;
3091 if (tlb->prot & (PAGE_READ << 4))
3092 ret |= 0x1;
3093 if (tlb->prot & (PAGE_WRITE << 4))
3094 ret |= 0x2;
3095 if (tlb->prot & (PAGE_EXEC << 4))
3096 ret |= 0x4;
3097 if (tlb->prot & PAGE_READ)
3098 ret |= 0x8;
3099 if (tlb->prot & PAGE_WRITE)
3100 ret |= 0x10;
3101 if (tlb->prot & PAGE_EXEC)
3102 ret |= 0x20;
3103 break;
3105 return ret;
3108 target_ulong helper_440_tlbsx (target_ulong address)
3110 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3113 #endif /* !CONFIG_USER_ONLY */