target-ppc: fix fsel instruction
[qemu/mini2440/sniper_sniper_test.git] / target-ppc / op_helper.c
blobcd88bb60756dc12ac5061555dd66437e9b3b455a
1 /*
2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "helper.h"
24 #include "helper_regs.h"
26 //#define DEBUG_OP
27 //#define DEBUG_EXCEPTIONS
28 //#define DEBUG_SOFTWARE_TLB
30 /*****************************************************************************/
31 /* Exceptions processing helpers */
33 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
35 #if 0
36 printf("Raise exception %3x code : %d\n", exception, error_code);
37 #endif
38 env->exception_index = exception;
39 env->error_code = error_code;
40 cpu_loop_exit();
43 void helper_raise_exception (uint32_t exception)
45 helper_raise_exception_err(exception, 0);
48 /*****************************************************************************/
49 /* Registers load and stores */
50 target_ulong helper_load_cr (void)
52 return (env->crf[0] << 28) |
53 (env->crf[1] << 24) |
54 (env->crf[2] << 20) |
55 (env->crf[3] << 16) |
56 (env->crf[4] << 12) |
57 (env->crf[5] << 8) |
58 (env->crf[6] << 4) |
59 (env->crf[7] << 0);
62 void helper_store_cr (target_ulong val, uint32_t mask)
64 int i, sh;
66 for (i = 0, sh = 7; i < 8; i++, sh--) {
67 if (mask & (1 << sh))
68 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
72 /*****************************************************************************/
73 /* SPR accesses */
74 void helper_load_dump_spr (uint32_t sprn)
76 if (loglevel != 0) {
77 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
78 sprn, sprn, env->spr[sprn]);
82 void helper_store_dump_spr (uint32_t sprn)
84 if (loglevel != 0) {
85 fprintf(logfile, "Write SPR %d %03x <= " ADDRX "\n",
86 sprn, sprn, env->spr[sprn]);
90 target_ulong helper_load_tbl (void)
92 return cpu_ppc_load_tbl(env);
95 target_ulong helper_load_tbu (void)
97 return cpu_ppc_load_tbu(env);
100 target_ulong helper_load_atbl (void)
102 return cpu_ppc_load_atbl(env);
105 target_ulong helper_load_atbu (void)
107 return cpu_ppc_load_atbu(env);
110 target_ulong helper_load_601_rtcl (void)
112 return cpu_ppc601_load_rtcl(env);
115 target_ulong helper_load_601_rtcu (void)
117 return cpu_ppc601_load_rtcu(env);
120 #if !defined(CONFIG_USER_ONLY)
121 #if defined (TARGET_PPC64)
122 void helper_store_asr (target_ulong val)
124 ppc_store_asr(env, val);
126 #endif
128 void helper_store_sdr1 (target_ulong val)
130 ppc_store_sdr1(env, val);
133 void helper_store_tbl (target_ulong val)
135 cpu_ppc_store_tbl(env, val);
138 void helper_store_tbu (target_ulong val)
140 cpu_ppc_store_tbu(env, val);
143 void helper_store_atbl (target_ulong val)
145 cpu_ppc_store_atbl(env, val);
148 void helper_store_atbu (target_ulong val)
150 cpu_ppc_store_atbu(env, val);
153 void helper_store_601_rtcl (target_ulong val)
155 cpu_ppc601_store_rtcl(env, val);
158 void helper_store_601_rtcu (target_ulong val)
160 cpu_ppc601_store_rtcu(env, val);
163 target_ulong helper_load_decr (void)
165 return cpu_ppc_load_decr(env);
168 void helper_store_decr (target_ulong val)
170 cpu_ppc_store_decr(env, val);
173 void helper_store_hid0_601 (target_ulong val)
175 target_ulong hid0;
177 hid0 = env->spr[SPR_HID0];
178 if ((val ^ hid0) & 0x00000008) {
179 /* Change current endianness */
180 env->hflags &= ~(1 << MSR_LE);
181 env->hflags_nmsr &= ~(1 << MSR_LE);
182 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
183 env->hflags |= env->hflags_nmsr;
184 if (loglevel != 0) {
185 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
186 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
189 env->spr[SPR_HID0] = (uint32_t)val;
192 void helper_store_403_pbr (uint32_t num, target_ulong value)
194 if (likely(env->pb[num] != value)) {
195 env->pb[num] = value;
196 /* Should be optimized */
197 tlb_flush(env, 1);
201 target_ulong helper_load_40x_pit (void)
203 return load_40x_pit(env);
206 void helper_store_40x_pit (target_ulong val)
208 store_40x_pit(env, val);
211 void helper_store_40x_dbcr0 (target_ulong val)
213 store_40x_dbcr0(env, val);
216 void helper_store_40x_sler (target_ulong val)
218 store_40x_sler(env, val);
221 void helper_store_booke_tcr (target_ulong val)
223 store_booke_tcr(env, val);
226 void helper_store_booke_tsr (target_ulong val)
228 store_booke_tsr(env, val);
231 void helper_store_ibatu (uint32_t nr, target_ulong val)
233 ppc_store_ibatu(env, nr, val);
236 void helper_store_ibatl (uint32_t nr, target_ulong val)
238 ppc_store_ibatl(env, nr, val);
241 void helper_store_dbatu (uint32_t nr, target_ulong val)
243 ppc_store_dbatu(env, nr, val);
246 void helper_store_dbatl (uint32_t nr, target_ulong val)
248 ppc_store_dbatl(env, nr, val);
251 void helper_store_601_batl (uint32_t nr, target_ulong val)
253 ppc_store_ibatl_601(env, nr, val);
256 void helper_store_601_batu (uint32_t nr, target_ulong val)
258 ppc_store_ibatu_601(env, nr, val);
260 #endif
262 /*****************************************************************************/
263 /* Memory load and stores */
265 static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
267 #if defined(TARGET_PPC64)
268 if (!msr_sf)
269 return (uint32_t)(addr + arg);
270 else
271 #endif
272 return addr + arg;
275 void helper_lmw (target_ulong addr, uint32_t reg)
277 for (; reg < 32; reg++) {
278 if (msr_le)
279 env->gpr[reg] = bswap32(ldl(addr));
280 else
281 env->gpr[reg] = ldl(addr);
282 addr = addr_add(addr, 4);
286 void helper_stmw (target_ulong addr, uint32_t reg)
288 for (; reg < 32; reg++) {
289 if (msr_le)
290 stl(addr, bswap32((uint32_t)env->gpr[reg]));
291 else
292 stl(addr, (uint32_t)env->gpr[reg]);
293 addr = addr_add(addr, 4);
297 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
299 int sh;
300 for (; nb > 3; nb -= 4) {
301 env->gpr[reg] = ldl(addr);
302 reg = (reg + 1) % 32;
303 addr = addr_add(addr, 4);
305 if (unlikely(nb > 0)) {
306 env->gpr[reg] = 0;
307 for (sh = 24; nb > 0; nb--, sh -= 8) {
308 env->gpr[reg] |= ldub(addr) << sh;
309 addr = addr_add(addr, 1);
313 /* PPC32 specification says we must generate an exception if
314 * rA is in the range of registers to be loaded.
315 * In an other hand, IBM says this is valid, but rA won't be loaded.
316 * For now, I'll follow the spec...
318 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
320 if (likely(xer_bc != 0)) {
321 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
322 (reg < rb && (reg + xer_bc) > rb))) {
323 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
324 POWERPC_EXCP_INVAL |
325 POWERPC_EXCP_INVAL_LSWX);
326 } else {
327 helper_lsw(addr, xer_bc, reg);
332 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
334 int sh;
335 for (; nb > 3; nb -= 4) {
336 stl(addr, env->gpr[reg]);
337 reg = (reg + 1) % 32;
338 addr = addr_add(addr, 4);
340 if (unlikely(nb > 0)) {
341 for (sh = 24; nb > 0; nb--, sh -= 8)
342 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
343 addr = addr_add(addr, 1);
347 static void do_dcbz(target_ulong addr, int dcache_line_size)
349 addr &= ~(dcache_line_size - 1);
350 int i;
351 for (i = 0 ; i < dcache_line_size ; i += 4) {
352 stl(addr + i , 0);
354 if (env->reserve == addr)
355 env->reserve = (target_ulong)-1ULL;
358 void helper_dcbz(target_ulong addr)
360 do_dcbz(addr, env->dcache_line_size);
363 void helper_dcbz_970(target_ulong addr)
365 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
366 do_dcbz(addr, 32);
367 else
368 do_dcbz(addr, env->dcache_line_size);
371 void helper_icbi(target_ulong addr)
373 uint32_t tmp;
375 addr &= ~(env->dcache_line_size - 1);
376 /* Invalidate one cache line :
377 * PowerPC specification says this is to be treated like a load
378 * (not a fetch) by the MMU. To be sure it will be so,
379 * do the load "by hand".
381 tmp = ldl(addr);
382 tb_invalidate_page_range(addr, addr + env->icache_line_size);
385 // XXX: to be tested
386 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
388 int i, c, d;
389 d = 24;
390 for (i = 0; i < xer_bc; i++) {
391 c = ldub(addr);
392 addr = addr_add(addr, 1);
393 /* ra (if not 0) and rb are never modified */
394 if (likely(reg != rb && (ra == 0 || reg != ra))) {
395 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
397 if (unlikely(c == xer_cmp))
398 break;
399 if (likely(d != 0)) {
400 d -= 8;
401 } else {
402 d = 24;
403 reg++;
404 reg = reg & 0x1F;
407 return i;
410 /*****************************************************************************/
411 /* Fixed point operations helpers */
412 #if defined(TARGET_PPC64)
414 /* multiply high word */
415 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
417 uint64_t tl, th;
419 muls64(&tl, &th, arg1, arg2);
420 return th;
423 /* multiply high word unsigned */
424 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
426 uint64_t tl, th;
428 mulu64(&tl, &th, arg1, arg2);
429 return th;
432 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
434 int64_t th;
435 uint64_t tl;
437 muls64(&tl, (uint64_t *)&th, arg1, arg2);
438 /* If th != 0 && th != -1, then we had an overflow */
439 if (likely((uint64_t)(th + 1) <= 1)) {
440 env->xer &= ~(1 << XER_OV);
441 } else {
442 env->xer |= (1 << XER_OV) | (1 << XER_SO);
444 return (int64_t)tl;
446 #endif
448 target_ulong helper_cntlzw (target_ulong t)
450 return clz32(t);
453 #if defined(TARGET_PPC64)
454 target_ulong helper_cntlzd (target_ulong t)
456 return clz64(t);
458 #endif
460 /* shift right arithmetic helper */
461 target_ulong helper_sraw (target_ulong value, target_ulong shift)
463 int32_t ret;
465 if (likely(!(shift & 0x20))) {
466 if (likely((uint32_t)shift != 0)) {
467 shift &= 0x1f;
468 ret = (int32_t)value >> shift;
469 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
470 env->xer &= ~(1 << XER_CA);
471 } else {
472 env->xer |= (1 << XER_CA);
474 } else {
475 ret = (int32_t)value;
476 env->xer &= ~(1 << XER_CA);
478 } else {
479 ret = (int32_t)value >> 31;
480 if (ret) {
481 env->xer |= (1 << XER_CA);
482 } else {
483 env->xer &= ~(1 << XER_CA);
486 return (target_long)ret;
489 #if defined(TARGET_PPC64)
490 target_ulong helper_srad (target_ulong value, target_ulong shift)
492 int64_t ret;
494 if (likely(!(shift & 0x40))) {
495 if (likely((uint64_t)shift != 0)) {
496 shift &= 0x3f;
497 ret = (int64_t)value >> shift;
498 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
499 env->xer &= ~(1 << XER_CA);
500 } else {
501 env->xer |= (1 << XER_CA);
503 } else {
504 ret = (int64_t)value;
505 env->xer &= ~(1 << XER_CA);
507 } else {
508 ret = (int64_t)value >> 63;
509 if (ret) {
510 env->xer |= (1 << XER_CA);
511 } else {
512 env->xer &= ~(1 << XER_CA);
515 return ret;
517 #endif
519 target_ulong helper_popcntb (target_ulong val)
521 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
522 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
523 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
524 return val;
527 #if defined(TARGET_PPC64)
528 target_ulong helper_popcntb_64 (target_ulong val)
530 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
531 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
532 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
533 return val;
535 #endif
537 /*****************************************************************************/
538 /* Floating point operations helpers */
539 uint64_t helper_float32_to_float64(uint32_t arg)
541 CPU_FloatU f;
542 CPU_DoubleU d;
543 f.l = arg;
544 d.d = float32_to_float64(f.f, &env->fp_status);
545 return d.ll;
548 uint32_t helper_float64_to_float32(uint64_t arg)
550 CPU_FloatU f;
551 CPU_DoubleU d;
552 d.ll = arg;
553 f.f = float64_to_float32(d.d, &env->fp_status);
554 return f.l;
557 static always_inline int fpisneg (float64 d)
559 CPU_DoubleU u;
561 u.d = d;
563 return u.ll >> 63 != 0;
566 static always_inline int isden (float64 d)
568 CPU_DoubleU u;
570 u.d = d;
572 return ((u.ll >> 52) & 0x7FF) == 0;
575 static always_inline int iszero (float64 d)
577 CPU_DoubleU u;
579 u.d = d;
581 return (u.ll & ~0x8000000000000000ULL) == 0;
584 static always_inline int isinfinity (float64 d)
586 CPU_DoubleU u;
588 u.d = d;
590 return ((u.ll >> 52) & 0x7FF) == 0x7FF &&
591 (u.ll & 0x000FFFFFFFFFFFFFULL) == 0;
594 #ifdef CONFIG_SOFTFLOAT
595 static always_inline int isfinite (float64 d)
597 CPU_DoubleU u;
599 u.d = d;
601 return (((u.ll >> 52) & 0x7FF) != 0x7FF);
604 static always_inline int isnormal (float64 d)
606 CPU_DoubleU u;
608 u.d = d;
610 uint32_t exp = (u.ll >> 52) & 0x7FF;
611 return ((0 < exp) && (exp < 0x7FF));
613 #endif
615 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
617 CPU_DoubleU farg;
618 int isneg;
619 int ret;
620 farg.ll = arg;
621 isneg = fpisneg(farg.d);
622 if (unlikely(float64_is_nan(farg.d))) {
623 if (float64_is_signaling_nan(farg.d)) {
624 /* Signaling NaN: flags are undefined */
625 ret = 0x00;
626 } else {
627 /* Quiet NaN */
628 ret = 0x11;
630 } else if (unlikely(isinfinity(farg.d))) {
631 /* +/- infinity */
632 if (isneg)
633 ret = 0x09;
634 else
635 ret = 0x05;
636 } else {
637 if (iszero(farg.d)) {
638 /* +/- zero */
639 if (isneg)
640 ret = 0x12;
641 else
642 ret = 0x02;
643 } else {
644 if (isden(farg.d)) {
645 /* Denormalized numbers */
646 ret = 0x10;
647 } else {
648 /* Normalized numbers */
649 ret = 0x00;
651 if (isneg) {
652 ret |= 0x08;
653 } else {
654 ret |= 0x04;
658 if (set_fprf) {
659 /* We update FPSCR_FPRF */
660 env->fpscr &= ~(0x1F << FPSCR_FPRF);
661 env->fpscr |= ret << FPSCR_FPRF;
663 /* We just need fpcc to update Rc1 */
664 return ret & 0xF;
667 /* Floating-point invalid operations exception */
668 static always_inline uint64_t fload_invalid_op_excp (int op)
670 uint64_t ret = 0;
671 int ve;
673 ve = fpscr_ve;
674 if (op & POWERPC_EXCP_FP_VXSNAN) {
675 /* Operation on signaling NaN */
676 env->fpscr |= 1 << FPSCR_VXSNAN;
678 if (op & POWERPC_EXCP_FP_VXSOFT) {
679 /* Software-defined condition */
680 env->fpscr |= 1 << FPSCR_VXSOFT;
682 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
683 case POWERPC_EXCP_FP_VXISI:
684 /* Magnitude subtraction of infinities */
685 env->fpscr |= 1 << FPSCR_VXISI;
686 goto update_arith;
687 case POWERPC_EXCP_FP_VXIDI:
688 /* Division of infinity by infinity */
689 env->fpscr |= 1 << FPSCR_VXIDI;
690 goto update_arith;
691 case POWERPC_EXCP_FP_VXZDZ:
692 /* Division of zero by zero */
693 env->fpscr |= 1 << FPSCR_VXZDZ;
694 goto update_arith;
695 case POWERPC_EXCP_FP_VXIMZ:
696 /* Multiplication of zero by infinity */
697 env->fpscr |= 1 << FPSCR_VXIMZ;
698 goto update_arith;
699 case POWERPC_EXCP_FP_VXVC:
700 /* Ordered comparison of NaN */
701 env->fpscr |= 1 << FPSCR_VXVC;
702 env->fpscr &= ~(0xF << FPSCR_FPCC);
703 env->fpscr |= 0x11 << FPSCR_FPCC;
704 /* We must update the target FPR before raising the exception */
705 if (ve != 0) {
706 env->exception_index = POWERPC_EXCP_PROGRAM;
707 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
708 /* Update the floating-point enabled exception summary */
709 env->fpscr |= 1 << FPSCR_FEX;
710 /* Exception is differed */
711 ve = 0;
713 break;
714 case POWERPC_EXCP_FP_VXSQRT:
715 /* Square root of a negative number */
716 env->fpscr |= 1 << FPSCR_VXSQRT;
717 update_arith:
718 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
719 if (ve == 0) {
720 /* Set the result to quiet NaN */
721 ret = UINT64_MAX;
722 env->fpscr &= ~(0xF << FPSCR_FPCC);
723 env->fpscr |= 0x11 << FPSCR_FPCC;
725 break;
726 case POWERPC_EXCP_FP_VXCVI:
727 /* Invalid conversion */
728 env->fpscr |= 1 << FPSCR_VXCVI;
729 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
730 if (ve == 0) {
731 /* Set the result to quiet NaN */
732 ret = UINT64_MAX;
733 env->fpscr &= ~(0xF << FPSCR_FPCC);
734 env->fpscr |= 0x11 << FPSCR_FPCC;
736 break;
738 /* Update the floating-point invalid operation summary */
739 env->fpscr |= 1 << FPSCR_VX;
740 /* Update the floating-point exception summary */
741 env->fpscr |= 1 << FPSCR_FX;
742 if (ve != 0) {
743 /* Update the floating-point enabled exception summary */
744 env->fpscr |= 1 << FPSCR_FEX;
745 if (msr_fe0 != 0 || msr_fe1 != 0)
746 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
748 return ret;
751 static always_inline uint64_t float_zero_divide_excp (uint64_t arg1, uint64_t arg2)
753 env->fpscr |= 1 << FPSCR_ZX;
754 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
755 /* Update the floating-point exception summary */
756 env->fpscr |= 1 << FPSCR_FX;
757 if (fpscr_ze != 0) {
758 /* Update the floating-point enabled exception summary */
759 env->fpscr |= 1 << FPSCR_FEX;
760 if (msr_fe0 != 0 || msr_fe1 != 0) {
761 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
762 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
764 } else {
765 /* Set the result to infinity */
766 arg1 = ((arg1 ^ arg2) & 0x8000000000000000ULL);
767 arg1 |= 0x7FFULL << 52;
769 return arg1;
772 static always_inline void float_overflow_excp (void)
774 env->fpscr |= 1 << FPSCR_OX;
775 /* Update the floating-point exception summary */
776 env->fpscr |= 1 << FPSCR_FX;
777 if (fpscr_oe != 0) {
778 /* XXX: should adjust the result */
779 /* Update the floating-point enabled exception summary */
780 env->fpscr |= 1 << FPSCR_FEX;
781 /* We must update the target FPR before raising the exception */
782 env->exception_index = POWERPC_EXCP_PROGRAM;
783 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
784 } else {
785 env->fpscr |= 1 << FPSCR_XX;
786 env->fpscr |= 1 << FPSCR_FI;
790 static always_inline void float_underflow_excp (void)
792 env->fpscr |= 1 << FPSCR_UX;
793 /* Update the floating-point exception summary */
794 env->fpscr |= 1 << FPSCR_FX;
795 if (fpscr_ue != 0) {
796 /* XXX: should adjust the result */
797 /* Update the floating-point enabled exception summary */
798 env->fpscr |= 1 << FPSCR_FEX;
799 /* We must update the target FPR before raising the exception */
800 env->exception_index = POWERPC_EXCP_PROGRAM;
801 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
805 static always_inline void float_inexact_excp (void)
807 env->fpscr |= 1 << FPSCR_XX;
808 /* Update the floating-point exception summary */
809 env->fpscr |= 1 << FPSCR_FX;
810 if (fpscr_xe != 0) {
811 /* Update the floating-point enabled exception summary */
812 env->fpscr |= 1 << FPSCR_FEX;
813 /* We must update the target FPR before raising the exception */
814 env->exception_index = POWERPC_EXCP_PROGRAM;
815 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
819 static always_inline void fpscr_set_rounding_mode (void)
821 int rnd_type;
823 /* Set rounding mode */
824 switch (fpscr_rn) {
825 case 0:
826 /* Best approximation (round to nearest) */
827 rnd_type = float_round_nearest_even;
828 break;
829 case 1:
830 /* Smaller magnitude (round toward zero) */
831 rnd_type = float_round_to_zero;
832 break;
833 case 2:
834 /* Round toward +infinite */
835 rnd_type = float_round_up;
836 break;
837 default:
838 case 3:
839 /* Round toward -infinite */
840 rnd_type = float_round_down;
841 break;
843 set_float_rounding_mode(rnd_type, &env->fp_status);
846 void helper_fpscr_setbit (uint32_t bit)
848 int prev;
850 prev = (env->fpscr >> bit) & 1;
851 env->fpscr |= 1 << bit;
852 if (prev == 0) {
853 switch (bit) {
854 case FPSCR_VX:
855 env->fpscr |= 1 << FPSCR_FX;
856 if (fpscr_ve)
857 goto raise_ve;
858 case FPSCR_OX:
859 env->fpscr |= 1 << FPSCR_FX;
860 if (fpscr_oe)
861 goto raise_oe;
862 break;
863 case FPSCR_UX:
864 env->fpscr |= 1 << FPSCR_FX;
865 if (fpscr_ue)
866 goto raise_ue;
867 break;
868 case FPSCR_ZX:
869 env->fpscr |= 1 << FPSCR_FX;
870 if (fpscr_ze)
871 goto raise_ze;
872 break;
873 case FPSCR_XX:
874 env->fpscr |= 1 << FPSCR_FX;
875 if (fpscr_xe)
876 goto raise_xe;
877 break;
878 case FPSCR_VXSNAN:
879 case FPSCR_VXISI:
880 case FPSCR_VXIDI:
881 case FPSCR_VXZDZ:
882 case FPSCR_VXIMZ:
883 case FPSCR_VXVC:
884 case FPSCR_VXSOFT:
885 case FPSCR_VXSQRT:
886 case FPSCR_VXCVI:
887 env->fpscr |= 1 << FPSCR_VX;
888 env->fpscr |= 1 << FPSCR_FX;
889 if (fpscr_ve != 0)
890 goto raise_ve;
891 break;
892 case FPSCR_VE:
893 if (fpscr_vx != 0) {
894 raise_ve:
895 env->error_code = POWERPC_EXCP_FP;
896 if (fpscr_vxsnan)
897 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
898 if (fpscr_vxisi)
899 env->error_code |= POWERPC_EXCP_FP_VXISI;
900 if (fpscr_vxidi)
901 env->error_code |= POWERPC_EXCP_FP_VXIDI;
902 if (fpscr_vxzdz)
903 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
904 if (fpscr_vximz)
905 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
906 if (fpscr_vxvc)
907 env->error_code |= POWERPC_EXCP_FP_VXVC;
908 if (fpscr_vxsoft)
909 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
910 if (fpscr_vxsqrt)
911 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
912 if (fpscr_vxcvi)
913 env->error_code |= POWERPC_EXCP_FP_VXCVI;
914 goto raise_excp;
916 break;
917 case FPSCR_OE:
918 if (fpscr_ox != 0) {
919 raise_oe:
920 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
921 goto raise_excp;
923 break;
924 case FPSCR_UE:
925 if (fpscr_ux != 0) {
926 raise_ue:
927 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
928 goto raise_excp;
930 break;
931 case FPSCR_ZE:
932 if (fpscr_zx != 0) {
933 raise_ze:
934 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
935 goto raise_excp;
937 break;
938 case FPSCR_XE:
939 if (fpscr_xx != 0) {
940 raise_xe:
941 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
942 goto raise_excp;
944 break;
945 case FPSCR_RN1:
946 case FPSCR_RN:
947 fpscr_set_rounding_mode();
948 break;
949 default:
950 break;
951 raise_excp:
952 /* Update the floating-point enabled exception summary */
953 env->fpscr |= 1 << FPSCR_FEX;
954 /* We have to update Rc1 before raising the exception */
955 env->exception_index = POWERPC_EXCP_PROGRAM;
956 break;
961 void helper_store_fpscr (uint64_t arg, uint32_t mask)
964 * We use only the 32 LSB of the incoming fpr
966 uint32_t prev, new;
967 int i;
969 prev = env->fpscr;
970 new = (uint32_t)arg;
971 new &= ~0x90000000;
972 new |= prev & 0x90000000;
973 for (i = 0; i < 7; i++) {
974 if (mask & (1 << i)) {
975 env->fpscr &= ~(0xF << (4 * i));
976 env->fpscr |= new & (0xF << (4 * i));
979 /* Update VX and FEX */
980 if (fpscr_ix != 0)
981 env->fpscr |= 1 << FPSCR_VX;
982 else
983 env->fpscr &= ~(1 << FPSCR_VX);
984 if ((fpscr_ex & fpscr_eex) != 0) {
985 env->fpscr |= 1 << FPSCR_FEX;
986 env->exception_index = POWERPC_EXCP_PROGRAM;
987 /* XXX: we should compute it properly */
988 env->error_code = POWERPC_EXCP_FP;
990 else
991 env->fpscr &= ~(1 << FPSCR_FEX);
992 fpscr_set_rounding_mode();
995 void helper_float_check_status (void)
997 #ifdef CONFIG_SOFTFLOAT
998 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
999 (env->error_code & POWERPC_EXCP_FP)) {
1000 /* Differred floating-point exception after target FPR update */
1001 if (msr_fe0 != 0 || msr_fe1 != 0)
1002 helper_raise_exception_err(env->exception_index, env->error_code);
1003 } else {
1004 int status = get_float_exception_flags(&env->fp_status);
1005 if (status & float_flag_overflow) {
1006 float_overflow_excp();
1007 } else if (status & float_flag_underflow) {
1008 float_underflow_excp();
1009 } else if (status & float_flag_inexact) {
1010 float_inexact_excp();
1013 #else
1014 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
1015 (env->error_code & POWERPC_EXCP_FP)) {
1016 /* Differred floating-point exception after target FPR update */
1017 if (msr_fe0 != 0 || msr_fe1 != 0)
1018 helper_raise_exception_err(env->exception_index, env->error_code);
1020 #endif
1023 #ifdef CONFIG_SOFTFLOAT
1024 void helper_reset_fpstatus (void)
1026 set_float_exception_flags(0, &env->fp_status);
1028 #endif
1030 /* fadd - fadd. */
1031 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
1033 CPU_DoubleU farg1, farg2;
1035 farg1.ll = arg1;
1036 farg2.ll = arg2;
1037 #if USE_PRECISE_EMULATION
1038 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1039 float64_is_signaling_nan(farg2.d))) {
1040 /* sNaN addition */
1041 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1042 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
1043 fpisneg(farg1.d) == fpisneg(farg2.d))) {
1044 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1045 } else {
1046 /* Magnitude subtraction of infinities */
1047 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1049 #else
1050 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1051 #endif
1052 return farg1.ll;
1055 /* fsub - fsub. */
1056 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1058 CPU_DoubleU farg1, farg2;
1060 farg1.ll = arg1;
1061 farg2.ll = arg2;
1062 #if USE_PRECISE_EMULATION
1064 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1065 float64_is_signaling_nan(farg2.d))) {
1066 /* sNaN subtraction */
1067 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1068 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
1069 fpisneg(farg1.d) != fpisneg(farg2.d))) {
1070 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1071 } else {
1072 /* Magnitude subtraction of infinities */
1073 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1076 #else
1077 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1078 #endif
1079 return farg1.ll;
1082 /* fmul - fmul. */
1083 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1085 CPU_DoubleU farg1, farg2;
1087 farg1.ll = arg1;
1088 farg2.ll = arg2;
1089 #if USE_PRECISE_EMULATION
1090 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1091 float64_is_signaling_nan(farg2.d))) {
1092 /* sNaN multiplication */
1093 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1094 } else if (unlikely((isinfinity(farg1.d) && iszero(farg2.d)) ||
1095 (iszero(farg1.d) && isinfinity(farg2.d)))) {
1096 /* Multiplication of zero by infinity */
1097 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1098 } else {
1099 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1101 #else
1102 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1103 #endif
1104 return farg1.ll;
1107 /* fdiv - fdiv. */
1108 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1110 CPU_DoubleU farg1, farg2;
1112 farg1.ll = arg1;
1113 farg2.ll = arg2;
1114 #if USE_PRECISE_EMULATION
1115 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1116 float64_is_signaling_nan(farg2.d))) {
1117 /* sNaN division */
1118 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1119 } else if (unlikely(isinfinity(farg1.d) && isinfinity(farg2.d))) {
1120 /* Division of infinity by infinity */
1121 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1122 } else if (unlikely(iszero(farg2.d))) {
1123 if (iszero(farg1.d)) {
1124 /* Division of zero by zero */
1125 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1126 } else {
1127 /* Division by zero */
1128 farg1.ll = float_zero_divide_excp(farg1.d, farg2.d);
1130 } else {
1131 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1133 #else
1134 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1135 #endif
1136 return farg1.ll;
1139 /* fabs */
1140 uint64_t helper_fabs (uint64_t arg)
1142 CPU_DoubleU farg;
1144 farg.ll = arg;
1145 farg.d = float64_abs(farg.d);
1146 return farg.ll;
1149 /* fnabs */
1150 uint64_t helper_fnabs (uint64_t arg)
1152 CPU_DoubleU farg;
1154 farg.ll = arg;
1155 farg.d = float64_abs(farg.d);
1156 farg.d = float64_chs(farg.d);
1157 return farg.ll;
1160 /* fneg */
1161 uint64_t helper_fneg (uint64_t arg)
1163 CPU_DoubleU farg;
1165 farg.ll = arg;
1166 farg.d = float64_chs(farg.d);
1167 return farg.ll;
1170 /* fctiw - fctiw. */
1171 uint64_t helper_fctiw (uint64_t arg)
1173 CPU_DoubleU farg;
1174 farg.ll = arg;
1176 if (unlikely(float64_is_signaling_nan(farg.d))) {
1177 /* sNaN conversion */
1178 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1179 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1180 /* qNan / infinity conversion */
1181 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1182 } else {
1183 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1184 #if USE_PRECISE_EMULATION
1185 /* XXX: higher bits are not supposed to be significant.
1186 * to make tests easier, return the same as a real PowerPC 750
1188 farg.ll |= 0xFFF80000ULL << 32;
1189 #endif
1191 return farg.ll;
1194 /* fctiwz - fctiwz. */
1195 uint64_t helper_fctiwz (uint64_t arg)
1197 CPU_DoubleU farg;
1198 farg.ll = arg;
1200 if (unlikely(float64_is_signaling_nan(farg.d))) {
1201 /* sNaN conversion */
1202 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1203 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1204 /* qNan / infinity conversion */
1205 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1206 } else {
1207 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1208 #if USE_PRECISE_EMULATION
1209 /* XXX: higher bits are not supposed to be significant.
1210 * to make tests easier, return the same as a real PowerPC 750
1212 farg.ll |= 0xFFF80000ULL << 32;
1213 #endif
1215 return farg.ll;
1218 #if defined(TARGET_PPC64)
1219 /* fcfid - fcfid. */
1220 uint64_t helper_fcfid (uint64_t arg)
1222 CPU_DoubleU farg;
1223 farg.d = int64_to_float64(arg, &env->fp_status);
1224 return farg.ll;
1227 /* fctid - fctid. */
1228 uint64_t helper_fctid (uint64_t arg)
1230 CPU_DoubleU farg;
1231 farg.ll = arg;
1233 if (unlikely(float64_is_signaling_nan(farg.d))) {
1234 /* sNaN conversion */
1235 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1236 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1237 /* qNan / infinity conversion */
1238 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1239 } else {
1240 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1242 return farg.ll;
1245 /* fctidz - fctidz. */
1246 uint64_t helper_fctidz (uint64_t arg)
1248 CPU_DoubleU farg;
1249 farg.ll = arg;
1251 if (unlikely(float64_is_signaling_nan(farg.d))) {
1252 /* sNaN conversion */
1253 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1254 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1255 /* qNan / infinity conversion */
1256 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1257 } else {
1258 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1260 return farg.ll;
1263 #endif
1265 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1267 CPU_DoubleU farg;
1268 farg.ll = arg;
1270 if (unlikely(float64_is_signaling_nan(farg.d))) {
1271 /* sNaN round */
1272 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1273 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1274 /* qNan / infinity round */
1275 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1276 } else {
1277 set_float_rounding_mode(rounding_mode, &env->fp_status);
1278 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1279 /* Restore rounding mode from FPSCR */
1280 fpscr_set_rounding_mode();
1282 return farg.ll;
1285 uint64_t helper_frin (uint64_t arg)
1287 return do_fri(arg, float_round_nearest_even);
1290 uint64_t helper_friz (uint64_t arg)
1292 return do_fri(arg, float_round_to_zero);
1295 uint64_t helper_frip (uint64_t arg)
1297 return do_fri(arg, float_round_up);
1300 uint64_t helper_frim (uint64_t arg)
1302 return do_fri(arg, float_round_down);
1305 /* fmadd - fmadd. */
1306 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1308 CPU_DoubleU farg1, farg2, farg3;
1310 farg1.ll = arg1;
1311 farg2.ll = arg2;
1312 farg3.ll = arg3;
1313 #if USE_PRECISE_EMULATION
1314 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1315 float64_is_signaling_nan(farg2.d) ||
1316 float64_is_signaling_nan(farg3.d))) {
1317 /* sNaN operation */
1318 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1319 } else {
1320 #ifdef FLOAT128
1321 /* This is the way the PowerPC specification defines it */
1322 float128 ft0_128, ft1_128;
1324 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1325 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1326 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1327 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1328 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1329 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1330 #else
1331 /* This is OK on x86 hosts */
1332 farg1.d = (farg1.d * farg2.d) + farg3.d;
1333 #endif
1335 #else
1336 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1337 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1338 #endif
1339 return farg1.ll;
1342 /* fmsub - fmsub. */
1343 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1345 CPU_DoubleU farg1, farg2, farg3;
1347 farg1.ll = arg1;
1348 farg2.ll = arg2;
1349 farg3.ll = arg3;
1350 #if USE_PRECISE_EMULATION
1351 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1352 float64_is_signaling_nan(farg2.d) ||
1353 float64_is_signaling_nan(farg3.d))) {
1354 /* sNaN operation */
1355 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1356 } else {
1357 #ifdef FLOAT128
1358 /* This is the way the PowerPC specification defines it */
1359 float128 ft0_128, ft1_128;
1361 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1362 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1363 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1364 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1365 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1366 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1367 #else
1368 /* This is OK on x86 hosts */
1369 farg1.d = (farg1.d * farg2.d) - farg3.d;
1370 #endif
1372 #else
1373 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1374 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1375 #endif
1376 return farg1.ll;
1379 /* fnmadd - fnmadd. */
1380 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1382 CPU_DoubleU farg1, farg2, farg3;
1384 farg1.ll = arg1;
1385 farg2.ll = arg2;
1386 farg3.ll = arg3;
1388 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1389 float64_is_signaling_nan(farg2.d) ||
1390 float64_is_signaling_nan(farg3.d))) {
1391 /* sNaN operation */
1392 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1393 } else {
1394 #if USE_PRECISE_EMULATION
1395 #ifdef FLOAT128
1396 /* This is the way the PowerPC specification defines it */
1397 float128 ft0_128, ft1_128;
1399 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1400 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1401 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1402 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1403 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1404 farg1.d= float128_to_float64(ft0_128, &env->fp_status);
1405 #else
1406 /* This is OK on x86 hosts */
1407 farg1.d = (farg1.d * farg2.d) + farg3.d;
1408 #endif
1409 #else
1410 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1411 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1412 #endif
1413 if (likely(!float64_is_nan(farg1.d)))
1414 farg1.d = float64_chs(farg1.d);
1416 return farg1.ll;
1419 /* fnmsub - fnmsub. */
1420 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1422 CPU_DoubleU farg1, farg2, farg3;
1424 farg1.ll = arg1;
1425 farg2.ll = arg2;
1426 farg3.ll = arg3;
1428 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1429 float64_is_signaling_nan(farg2.d) ||
1430 float64_is_signaling_nan(farg3.d))) {
1431 /* sNaN operation */
1432 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1433 } else {
1434 #if USE_PRECISE_EMULATION
1435 #ifdef FLOAT128
1436 /* This is the way the PowerPC specification defines it */
1437 float128 ft0_128, ft1_128;
1439 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1440 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1441 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1442 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1443 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1444 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1445 #else
1446 /* This is OK on x86 hosts */
1447 farg1.d = (farg1.d * farg2.d) - farg3.d;
1448 #endif
1449 #else
1450 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1451 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1452 #endif
1453 if (likely(!float64_is_nan(farg1.d)))
1454 farg1.d = float64_chs(farg1.d);
1456 return farg1.ll;
1459 /* frsp - frsp. */
1460 uint64_t helper_frsp (uint64_t arg)
1462 CPU_DoubleU farg;
1463 farg.ll = arg;
1465 #if USE_PRECISE_EMULATION
1466 if (unlikely(float64_is_signaling_nan(farg.d))) {
1467 /* sNaN square root */
1468 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1469 } else {
1470 farg.d = float64_to_float32(farg.d, &env->fp_status);
1472 #else
1473 farg.d = float64_to_float32(farg.d, &env->fp_status);
1474 #endif
1475 return farg.ll;
1478 /* fsqrt - fsqrt. */
1479 uint64_t helper_fsqrt (uint64_t arg)
1481 CPU_DoubleU farg;
1482 farg.ll = arg;
1484 if (unlikely(float64_is_signaling_nan(farg.d))) {
1485 /* sNaN square root */
1486 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1487 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1488 /* Square root of a negative nonzero number */
1489 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1490 } else {
1491 farg.d = float64_sqrt(farg.d, &env->fp_status);
1493 return farg.ll;
1496 /* fre - fre. */
1497 uint64_t helper_fre (uint64_t arg)
1499 CPU_DoubleU farg;
1500 farg.ll = arg;
1502 if (unlikely(float64_is_signaling_nan(farg.d))) {
1503 /* sNaN reciprocal */
1504 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1505 } else if (unlikely(iszero(farg.d))) {
1506 /* Zero reciprocal */
1507 farg.ll = float_zero_divide_excp(1.0, farg.d);
1508 } else if (likely(isnormal(farg.d))) {
1509 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1510 } else {
1511 if (farg.ll == 0x8000000000000000ULL) {
1512 farg.ll = 0xFFF0000000000000ULL;
1513 } else if (farg.ll == 0x0000000000000000ULL) {
1514 farg.ll = 0x7FF0000000000000ULL;
1515 } else if (float64_is_nan(farg.d)) {
1516 farg.ll = 0x7FF8000000000000ULL;
1517 } else if (fpisneg(farg.d)) {
1518 farg.ll = 0x8000000000000000ULL;
1519 } else {
1520 farg.ll = 0x0000000000000000ULL;
1523 return farg.d;
1526 /* fres - fres. */
1527 uint64_t helper_fres (uint64_t arg)
1529 CPU_DoubleU farg;
1530 farg.ll = arg;
1532 if (unlikely(float64_is_signaling_nan(farg.d))) {
1533 /* sNaN reciprocal */
1534 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1535 } else if (unlikely(iszero(farg.d))) {
1536 /* Zero reciprocal */
1537 farg.ll = float_zero_divide_excp(1.0, farg.d);
1538 } else if (likely(isnormal(farg.d))) {
1539 #if USE_PRECISE_EMULATION
1540 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1541 farg.d = float64_to_float32(farg.d, &env->fp_status);
1542 #else
1543 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1544 #endif
1545 } else {
1546 if (farg.ll == 0x8000000000000000ULL) {
1547 farg.ll = 0xFFF0000000000000ULL;
1548 } else if (farg.ll == 0x0000000000000000ULL) {
1549 farg.ll = 0x7FF0000000000000ULL;
1550 } else if (float64_is_nan(farg.d)) {
1551 farg.ll = 0x7FF8000000000000ULL;
1552 } else if (fpisneg(farg.d)) {
1553 farg.ll = 0x8000000000000000ULL;
1554 } else {
1555 farg.ll = 0x0000000000000000ULL;
1558 return farg.ll;
1561 /* frsqrte - frsqrte. */
1562 uint64_t helper_frsqrte (uint64_t arg)
1564 CPU_DoubleU farg;
1565 farg.ll = arg;
1567 if (unlikely(float64_is_signaling_nan(farg.d))) {
1568 /* sNaN reciprocal square root */
1569 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1570 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1571 /* Reciprocal square root of a negative nonzero number */
1572 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1573 } else if (likely(isnormal(farg.d))) {
1574 farg.d = float64_sqrt(farg.d, &env->fp_status);
1575 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1576 } else {
1577 if (farg.ll == 0x8000000000000000ULL) {
1578 farg.ll = 0xFFF0000000000000ULL;
1579 } else if (farg.ll == 0x0000000000000000ULL) {
1580 farg.ll = 0x7FF0000000000000ULL;
1581 } else if (float64_is_nan(farg.d)) {
1582 farg.ll |= 0x000FFFFFFFFFFFFFULL;
1583 } else if (fpisneg(farg.d)) {
1584 farg.ll = 0x7FF8000000000000ULL;
1585 } else {
1586 farg.ll = 0x0000000000000000ULL;
1589 return farg.ll;
1592 /* fsel - fsel. */
1593 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1595 CPU_DoubleU farg1;
1597 farg1.ll = arg1;
1599 if (!fpisneg(farg1.d) || iszero(farg1.d))
1600 return arg2;
1601 else
1602 return arg3;
1605 uint32_t helper_fcmpu (uint64_t arg1, uint64_t arg2)
1607 CPU_DoubleU farg1, farg2;
1608 uint32_t ret = 0;
1609 farg1.ll = arg1;
1610 farg2.ll = arg2;
1612 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1613 float64_is_signaling_nan(farg2.d))) {
1614 /* sNaN comparison */
1615 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1616 } else {
1617 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1618 ret = 0x08UL;
1619 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1620 ret = 0x04UL;
1621 } else {
1622 ret = 0x02UL;
1625 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1626 env->fpscr |= ret << FPSCR_FPRF;
1627 return ret;
1630 uint32_t helper_fcmpo (uint64_t arg1, uint64_t arg2)
1632 CPU_DoubleU farg1, farg2;
1633 uint32_t ret = 0;
1634 farg1.ll = arg1;
1635 farg2.ll = arg2;
1637 if (unlikely(float64_is_nan(farg1.d) ||
1638 float64_is_nan(farg2.d))) {
1639 if (float64_is_signaling_nan(farg1.d) ||
1640 float64_is_signaling_nan(farg2.d)) {
1641 /* sNaN comparison */
1642 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1643 POWERPC_EXCP_FP_VXVC);
1644 } else {
1645 /* qNaN comparison */
1646 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1648 } else {
1649 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1650 ret = 0x08UL;
1651 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1652 ret = 0x04UL;
1653 } else {
1654 ret = 0x02UL;
1657 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1658 env->fpscr |= ret << FPSCR_FPRF;
1659 return ret;
1662 #if !defined (CONFIG_USER_ONLY)
1663 void helper_store_msr (target_ulong val)
1665 val = hreg_store_msr(env, val, 0);
1666 if (val != 0) {
1667 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1668 helper_raise_exception(val);
1672 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1673 target_ulong msrm, int keep_msrh)
1675 #if defined(TARGET_PPC64)
1676 if (msr & (1ULL << MSR_SF)) {
1677 nip = (uint64_t)nip;
1678 msr &= (uint64_t)msrm;
1679 } else {
1680 nip = (uint32_t)nip;
1681 msr = (uint32_t)(msr & msrm);
1682 if (keep_msrh)
1683 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1685 #else
1686 nip = (uint32_t)nip;
1687 msr &= (uint32_t)msrm;
1688 #endif
1689 /* XXX: beware: this is false if VLE is supported */
1690 env->nip = nip & ~((target_ulong)0x00000003);
1691 hreg_store_msr(env, msr, 1);
1692 #if defined (DEBUG_OP)
1693 cpu_dump_rfi(env->nip, env->msr);
1694 #endif
1695 /* No need to raise an exception here,
1696 * as rfi is always the last insn of a TB
1698 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1701 void helper_rfi (void)
1703 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1704 ~((target_ulong)0xFFFF0000), 1);
1707 #if defined(TARGET_PPC64)
1708 void helper_rfid (void)
1710 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1711 ~((target_ulong)0xFFFF0000), 0);
1714 void helper_hrfid (void)
1716 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1717 ~((target_ulong)0xFFFF0000), 0);
1719 #endif
1720 #endif
1722 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1724 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1725 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1726 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1727 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1728 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1729 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1733 #if defined(TARGET_PPC64)
1734 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1736 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1737 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1738 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1739 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1740 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1741 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1743 #endif
1745 /*****************************************************************************/
1746 /* PowerPC 601 specific instructions (POWER bridge) */
1748 target_ulong helper_clcs (uint32_t arg)
1750 switch (arg) {
1751 case 0x0CUL:
1752 /* Instruction cache line size */
1753 return env->icache_line_size;
1754 break;
1755 case 0x0DUL:
1756 /* Data cache line size */
1757 return env->dcache_line_size;
1758 break;
1759 case 0x0EUL:
1760 /* Minimum cache line size */
1761 return (env->icache_line_size < env->dcache_line_size) ?
1762 env->icache_line_size : env->dcache_line_size;
1763 break;
1764 case 0x0FUL:
1765 /* Maximum cache line size */
1766 return (env->icache_line_size > env->dcache_line_size) ?
1767 env->icache_line_size : env->dcache_line_size;
1768 break;
1769 default:
1770 /* Undefined */
1771 return 0;
1772 break;
1776 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1778 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1780 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1781 (int32_t)arg2 == 0) {
1782 env->spr[SPR_MQ] = 0;
1783 return INT32_MIN;
1784 } else {
1785 env->spr[SPR_MQ] = tmp % arg2;
1786 return tmp / (int32_t)arg2;
1790 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1792 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1794 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1795 (int32_t)arg2 == 0) {
1796 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1797 env->spr[SPR_MQ] = 0;
1798 return INT32_MIN;
1799 } else {
1800 env->spr[SPR_MQ] = tmp % arg2;
1801 tmp /= (int32_t)arg2;
1802 if ((int32_t)tmp != tmp) {
1803 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1804 } else {
1805 env->xer &= ~(1 << XER_OV);
1807 return tmp;
1811 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1813 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1814 (int32_t)arg2 == 0) {
1815 env->spr[SPR_MQ] = 0;
1816 return INT32_MIN;
1817 } else {
1818 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1819 return (int32_t)arg1 / (int32_t)arg2;
1823 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1825 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1826 (int32_t)arg2 == 0) {
1827 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1828 env->spr[SPR_MQ] = 0;
1829 return INT32_MIN;
1830 } else {
1831 env->xer &= ~(1 << XER_OV);
1832 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1833 return (int32_t)arg1 / (int32_t)arg2;
1837 #if !defined (CONFIG_USER_ONLY)
1838 target_ulong helper_rac (target_ulong addr)
1840 mmu_ctx_t ctx;
1841 int nb_BATs;
1842 target_ulong ret = 0;
1844 /* We don't have to generate many instances of this instruction,
1845 * as rac is supervisor only.
1847 /* XXX: FIX THIS: Pretend we have no BAT */
1848 nb_BATs = env->nb_BATs;
1849 env->nb_BATs = 0;
1850 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1851 ret = ctx.raddr;
1852 env->nb_BATs = nb_BATs;
1853 return ret;
1856 void helper_rfsvc (void)
1858 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1860 #endif
1862 /*****************************************************************************/
1863 /* 602 specific instructions */
1864 /* mfrom is the most crazy instruction ever seen, imho ! */
1865 /* Real implementation uses a ROM table. Do the same */
1866 /* Extremly decomposed:
1867 * -arg / 256
1868 * return 256 * log10(10 + 1.0) + 0.5
1870 #if !defined (CONFIG_USER_ONLY)
1871 target_ulong helper_602_mfrom (target_ulong arg)
1873 if (likely(arg < 602)) {
1874 #include "mfrom_table.c"
1875 return mfrom_ROM_table[arg];
1876 } else {
1877 return 0;
1880 #endif
1882 /*****************************************************************************/
1883 /* Embedded PowerPC specific helpers */
1885 /* XXX: to be improved to check access rights when in user-mode */
1886 target_ulong helper_load_dcr (target_ulong dcrn)
1888 target_ulong val = 0;
1890 if (unlikely(env->dcr_env == NULL)) {
1891 if (loglevel != 0) {
1892 fprintf(logfile, "No DCR environment\n");
1894 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1895 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1896 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1897 if (loglevel != 0) {
1898 fprintf(logfile, "DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1900 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1901 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1903 return val;
1906 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1908 if (unlikely(env->dcr_env == NULL)) {
1909 if (loglevel != 0) {
1910 fprintf(logfile, "No DCR environment\n");
1912 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1913 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1914 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1915 if (loglevel != 0) {
1916 fprintf(logfile, "DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1918 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1919 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1923 #if !defined(CONFIG_USER_ONLY)
1924 void helper_40x_rfci (void)
1926 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1927 ~((target_ulong)0xFFFF0000), 0);
1930 void helper_rfci (void)
1932 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1933 ~((target_ulong)0x3FFF0000), 0);
1936 void helper_rfdi (void)
1938 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1939 ~((target_ulong)0x3FFF0000), 0);
1942 void helper_rfmci (void)
1944 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1945 ~((target_ulong)0x3FFF0000), 0);
1947 #endif
1949 /* 440 specific */
1950 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1952 target_ulong mask;
1953 int i;
1955 i = 1;
1956 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1957 if ((high & mask) == 0) {
1958 if (update_Rc) {
1959 env->crf[0] = 0x4;
1961 goto done;
1963 i++;
1965 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1966 if ((low & mask) == 0) {
1967 if (update_Rc) {
1968 env->crf[0] = 0x8;
1970 goto done;
1972 i++;
1974 if (update_Rc) {
1975 env->crf[0] = 0x2;
1977 done:
1978 env->xer = (env->xer & ~0x7F) | i;
1979 if (update_Rc) {
1980 env->crf[0] |= xer_so;
1982 return i;
1985 /*****************************************************************************/
1986 /* SPE extension helpers */
1987 /* Use a table to make this quicker */
1988 static uint8_t hbrev[16] = {
1989 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1990 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1993 static always_inline uint8_t byte_reverse (uint8_t val)
1995 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1998 static always_inline uint32_t word_reverse (uint32_t val)
2000 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2001 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2004 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2005 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2007 uint32_t a, b, d, mask;
2009 mask = UINT32_MAX >> (32 - MASKBITS);
2010 a = arg1 & mask;
2011 b = arg2 & mask;
2012 d = word_reverse(1 + word_reverse(a | ~b));
2013 return (arg1 & ~mask) | (d & b);
2016 uint32_t helper_cntlsw32 (uint32_t val)
2018 if (val & 0x80000000)
2019 return clz32(~val);
2020 else
2021 return clz32(val);
2024 uint32_t helper_cntlzw32 (uint32_t val)
2026 return clz32(val);
2029 /* Single-precision floating-point conversions */
2030 static always_inline uint32_t efscfsi (uint32_t val)
2032 CPU_FloatU u;
2034 u.f = int32_to_float32(val, &env->spe_status);
2036 return u.l;
2039 static always_inline uint32_t efscfui (uint32_t val)
2041 CPU_FloatU u;
2043 u.f = uint32_to_float32(val, &env->spe_status);
2045 return u.l;
2048 static always_inline int32_t efsctsi (uint32_t val)
2050 CPU_FloatU u;
2052 u.l = val;
2053 /* NaN are not treated the same way IEEE 754 does */
2054 if (unlikely(float32_is_nan(u.f)))
2055 return 0;
2057 return float32_to_int32(u.f, &env->spe_status);
2060 static always_inline uint32_t efsctui (uint32_t val)
2062 CPU_FloatU u;
2064 u.l = val;
2065 /* NaN are not treated the same way IEEE 754 does */
2066 if (unlikely(float32_is_nan(u.f)))
2067 return 0;
2069 return float32_to_uint32(u.f, &env->spe_status);
2072 static always_inline uint32_t efsctsiz (uint32_t val)
2074 CPU_FloatU u;
2076 u.l = val;
2077 /* NaN are not treated the same way IEEE 754 does */
2078 if (unlikely(float32_is_nan(u.f)))
2079 return 0;
2081 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2084 static always_inline uint32_t efsctuiz (uint32_t val)
2086 CPU_FloatU u;
2088 u.l = val;
2089 /* NaN are not treated the same way IEEE 754 does */
2090 if (unlikely(float32_is_nan(u.f)))
2091 return 0;
2093 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2096 static always_inline uint32_t efscfsf (uint32_t val)
2098 CPU_FloatU u;
2099 float32 tmp;
2101 u.f = int32_to_float32(val, &env->spe_status);
2102 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2103 u.f = float32_div(u.f, tmp, &env->spe_status);
2105 return u.l;
2108 static always_inline uint32_t efscfuf (uint32_t val)
2110 CPU_FloatU u;
2111 float32 tmp;
2113 u.f = uint32_to_float32(val, &env->spe_status);
2114 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2115 u.f = float32_div(u.f, tmp, &env->spe_status);
2117 return u.l;
2120 static always_inline uint32_t efsctsf (uint32_t val)
2122 CPU_FloatU u;
2123 float32 tmp;
2125 u.l = val;
2126 /* NaN are not treated the same way IEEE 754 does */
2127 if (unlikely(float32_is_nan(u.f)))
2128 return 0;
2129 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2130 u.f = float32_mul(u.f, tmp, &env->spe_status);
2132 return float32_to_int32(u.f, &env->spe_status);
2135 static always_inline uint32_t efsctuf (uint32_t val)
2137 CPU_FloatU u;
2138 float32 tmp;
2140 u.l = val;
2141 /* NaN are not treated the same way IEEE 754 does */
2142 if (unlikely(float32_is_nan(u.f)))
2143 return 0;
2144 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2145 u.f = float32_mul(u.f, tmp, &env->spe_status);
2147 return float32_to_uint32(u.f, &env->spe_status);
2150 #define HELPER_SPE_SINGLE_CONV(name) \
2151 uint32_t helper_e##name (uint32_t val) \
2153 return e##name(val); \
2155 /* efscfsi */
2156 HELPER_SPE_SINGLE_CONV(fscfsi);
2157 /* efscfui */
2158 HELPER_SPE_SINGLE_CONV(fscfui);
2159 /* efscfuf */
2160 HELPER_SPE_SINGLE_CONV(fscfuf);
2161 /* efscfsf */
2162 HELPER_SPE_SINGLE_CONV(fscfsf);
2163 /* efsctsi */
2164 HELPER_SPE_SINGLE_CONV(fsctsi);
2165 /* efsctui */
2166 HELPER_SPE_SINGLE_CONV(fsctui);
2167 /* efsctsiz */
2168 HELPER_SPE_SINGLE_CONV(fsctsiz);
2169 /* efsctuiz */
2170 HELPER_SPE_SINGLE_CONV(fsctuiz);
2171 /* efsctsf */
2172 HELPER_SPE_SINGLE_CONV(fsctsf);
2173 /* efsctuf */
2174 HELPER_SPE_SINGLE_CONV(fsctuf);
2176 #define HELPER_SPE_VECTOR_CONV(name) \
2177 uint64_t helper_ev##name (uint64_t val) \
2179 return ((uint64_t)e##name(val >> 32) << 32) | \
2180 (uint64_t)e##name(val); \
2182 /* evfscfsi */
2183 HELPER_SPE_VECTOR_CONV(fscfsi);
2184 /* evfscfui */
2185 HELPER_SPE_VECTOR_CONV(fscfui);
2186 /* evfscfuf */
2187 HELPER_SPE_VECTOR_CONV(fscfuf);
2188 /* evfscfsf */
2189 HELPER_SPE_VECTOR_CONV(fscfsf);
2190 /* evfsctsi */
2191 HELPER_SPE_VECTOR_CONV(fsctsi);
2192 /* evfsctui */
2193 HELPER_SPE_VECTOR_CONV(fsctui);
2194 /* evfsctsiz */
2195 HELPER_SPE_VECTOR_CONV(fsctsiz);
2196 /* evfsctuiz */
2197 HELPER_SPE_VECTOR_CONV(fsctuiz);
2198 /* evfsctsf */
2199 HELPER_SPE_VECTOR_CONV(fsctsf);
2200 /* evfsctuf */
2201 HELPER_SPE_VECTOR_CONV(fsctuf);
2203 /* Single-precision floating-point arithmetic */
2204 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2206 CPU_FloatU u1, u2;
2207 u1.l = op1;
2208 u2.l = op2;
2209 u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2210 return u1.l;
2213 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2215 CPU_FloatU u1, u2;
2216 u1.l = op1;
2217 u2.l = op2;
2218 u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2219 return u1.l;
2222 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2224 CPU_FloatU u1, u2;
2225 u1.l = op1;
2226 u2.l = op2;
2227 u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2228 return u1.l;
2231 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2233 CPU_FloatU u1, u2;
2234 u1.l = op1;
2235 u2.l = op2;
2236 u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2237 return u1.l;
2240 #define HELPER_SPE_SINGLE_ARITH(name) \
2241 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2243 return e##name(op1, op2); \
2245 /* efsadd */
2246 HELPER_SPE_SINGLE_ARITH(fsadd);
2247 /* efssub */
2248 HELPER_SPE_SINGLE_ARITH(fssub);
2249 /* efsmul */
2250 HELPER_SPE_SINGLE_ARITH(fsmul);
2251 /* efsdiv */
2252 HELPER_SPE_SINGLE_ARITH(fsdiv);
2254 #define HELPER_SPE_VECTOR_ARITH(name) \
2255 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
2257 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
2258 (uint64_t)e##name(op1, op2); \
2260 /* evfsadd */
2261 HELPER_SPE_VECTOR_ARITH(fsadd);
2262 /* evfssub */
2263 HELPER_SPE_VECTOR_ARITH(fssub);
2264 /* evfsmul */
2265 HELPER_SPE_VECTOR_ARITH(fsmul);
2266 /* evfsdiv */
2267 HELPER_SPE_VECTOR_ARITH(fsdiv);
2269 /* Single-precision floating-point comparisons */
2270 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2272 CPU_FloatU u1, u2;
2273 u1.l = op1;
2274 u2.l = op2;
2275 return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2278 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2280 CPU_FloatU u1, u2;
2281 u1.l = op1;
2282 u2.l = op2;
2283 return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2286 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2288 CPU_FloatU u1, u2;
2289 u1.l = op1;
2290 u2.l = op2;
2291 return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2294 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2296 /* XXX: TODO: test special values (NaN, infinites, ...) */
2297 return efststlt(op1, op2);
2300 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2302 /* XXX: TODO: test special values (NaN, infinites, ...) */
2303 return efststgt(op1, op2);
2306 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2308 /* XXX: TODO: test special values (NaN, infinites, ...) */
2309 return efststeq(op1, op2);
2312 #define HELPER_SINGLE_SPE_CMP(name) \
2313 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2315 return e##name(op1, op2) << 2; \
2317 /* efststlt */
2318 HELPER_SINGLE_SPE_CMP(fststlt);
2319 /* efststgt */
2320 HELPER_SINGLE_SPE_CMP(fststgt);
2321 /* efststeq */
2322 HELPER_SINGLE_SPE_CMP(fststeq);
2323 /* efscmplt */
2324 HELPER_SINGLE_SPE_CMP(fscmplt);
2325 /* efscmpgt */
2326 HELPER_SINGLE_SPE_CMP(fscmpgt);
2327 /* efscmpeq */
2328 HELPER_SINGLE_SPE_CMP(fscmpeq);
2330 static always_inline uint32_t evcmp_merge (int t0, int t1)
2332 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2335 #define HELPER_VECTOR_SPE_CMP(name) \
2336 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
2338 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
2340 /* evfststlt */
2341 HELPER_VECTOR_SPE_CMP(fststlt);
2342 /* evfststgt */
2343 HELPER_VECTOR_SPE_CMP(fststgt);
2344 /* evfststeq */
2345 HELPER_VECTOR_SPE_CMP(fststeq);
2346 /* evfscmplt */
2347 HELPER_VECTOR_SPE_CMP(fscmplt);
2348 /* evfscmpgt */
2349 HELPER_VECTOR_SPE_CMP(fscmpgt);
2350 /* evfscmpeq */
2351 HELPER_VECTOR_SPE_CMP(fscmpeq);
2353 /* Double-precision floating-point conversion */
2354 uint64_t helper_efdcfsi (uint32_t val)
2356 CPU_DoubleU u;
2358 u.d = int32_to_float64(val, &env->spe_status);
2360 return u.ll;
2363 uint64_t helper_efdcfsid (uint64_t val)
2365 CPU_DoubleU u;
2367 u.d = int64_to_float64(val, &env->spe_status);
2369 return u.ll;
2372 uint64_t helper_efdcfui (uint32_t val)
2374 CPU_DoubleU u;
2376 u.d = uint32_to_float64(val, &env->spe_status);
2378 return u.ll;
2381 uint64_t helper_efdcfuid (uint64_t val)
2383 CPU_DoubleU u;
2385 u.d = uint64_to_float64(val, &env->spe_status);
2387 return u.ll;
2390 uint32_t helper_efdctsi (uint64_t val)
2392 CPU_DoubleU u;
2394 u.ll = val;
2395 /* NaN are not treated the same way IEEE 754 does */
2396 if (unlikely(float64_is_nan(u.d)))
2397 return 0;
2399 return float64_to_int32(u.d, &env->spe_status);
2402 uint32_t helper_efdctui (uint64_t val)
2404 CPU_DoubleU u;
2406 u.ll = val;
2407 /* NaN are not treated the same way IEEE 754 does */
2408 if (unlikely(float64_is_nan(u.d)))
2409 return 0;
2411 return float64_to_uint32(u.d, &env->spe_status);
2414 uint32_t helper_efdctsiz (uint64_t val)
2416 CPU_DoubleU u;
2418 u.ll = val;
2419 /* NaN are not treated the same way IEEE 754 does */
2420 if (unlikely(float64_is_nan(u.d)))
2421 return 0;
2423 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2426 uint64_t helper_efdctsidz (uint64_t val)
2428 CPU_DoubleU u;
2430 u.ll = val;
2431 /* NaN are not treated the same way IEEE 754 does */
2432 if (unlikely(float64_is_nan(u.d)))
2433 return 0;
2435 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2438 uint32_t helper_efdctuiz (uint64_t val)
2440 CPU_DoubleU u;
2442 u.ll = val;
2443 /* NaN are not treated the same way IEEE 754 does */
2444 if (unlikely(float64_is_nan(u.d)))
2445 return 0;
2447 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2450 uint64_t helper_efdctuidz (uint64_t val)
2452 CPU_DoubleU u;
2454 u.ll = val;
2455 /* NaN are not treated the same way IEEE 754 does */
2456 if (unlikely(float64_is_nan(u.d)))
2457 return 0;
2459 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2462 uint64_t helper_efdcfsf (uint32_t val)
2464 CPU_DoubleU u;
2465 float64 tmp;
2467 u.d = int32_to_float64(val, &env->spe_status);
2468 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2469 u.d = float64_div(u.d, tmp, &env->spe_status);
2471 return u.ll;
2474 uint64_t helper_efdcfuf (uint32_t val)
2476 CPU_DoubleU u;
2477 float64 tmp;
2479 u.d = uint32_to_float64(val, &env->spe_status);
2480 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2481 u.d = float64_div(u.d, tmp, &env->spe_status);
2483 return u.ll;
2486 uint32_t helper_efdctsf (uint64_t val)
2488 CPU_DoubleU u;
2489 float64 tmp;
2491 u.ll = val;
2492 /* NaN are not treated the same way IEEE 754 does */
2493 if (unlikely(float64_is_nan(u.d)))
2494 return 0;
2495 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2496 u.d = float64_mul(u.d, tmp, &env->spe_status);
2498 return float64_to_int32(u.d, &env->spe_status);
2501 uint32_t helper_efdctuf (uint64_t val)
2503 CPU_DoubleU u;
2504 float64 tmp;
2506 u.ll = val;
2507 /* NaN are not treated the same way IEEE 754 does */
2508 if (unlikely(float64_is_nan(u.d)))
2509 return 0;
2510 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2511 u.d = float64_mul(u.d, tmp, &env->spe_status);
2513 return float64_to_uint32(u.d, &env->spe_status);
2516 uint32_t helper_efscfd (uint64_t val)
2518 CPU_DoubleU u1;
2519 CPU_FloatU u2;
2521 u1.ll = val;
2522 u2.f = float64_to_float32(u1.d, &env->spe_status);
2524 return u2.l;
2527 uint64_t helper_efdcfs (uint32_t val)
2529 CPU_DoubleU u2;
2530 CPU_FloatU u1;
2532 u1.l = val;
2533 u2.d = float32_to_float64(u1.f, &env->spe_status);
2535 return u2.ll;
2538 /* Double precision fixed-point arithmetic */
2539 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2541 CPU_DoubleU u1, u2;
2542 u1.ll = op1;
2543 u2.ll = op2;
2544 u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2545 return u1.ll;
2548 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2550 CPU_DoubleU u1, u2;
2551 u1.ll = op1;
2552 u2.ll = op2;
2553 u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2554 return u1.ll;
2557 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2559 CPU_DoubleU u1, u2;
2560 u1.ll = op1;
2561 u2.ll = op2;
2562 u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2563 return u1.ll;
2566 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2568 CPU_DoubleU u1, u2;
2569 u1.ll = op1;
2570 u2.ll = op2;
2571 u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2572 return u1.ll;
2575 /* Double precision floating point helpers */
2576 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2578 CPU_DoubleU u1, u2;
2579 u1.ll = op1;
2580 u2.ll = op2;
2581 return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2584 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2586 CPU_DoubleU u1, u2;
2587 u1.ll = op1;
2588 u2.ll = op2;
2589 return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2592 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2594 CPU_DoubleU u1, u2;
2595 u1.ll = op1;
2596 u2.ll = op2;
2597 return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2600 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2602 /* XXX: TODO: test special values (NaN, infinites, ...) */
2603 return helper_efdtstlt(op1, op2);
2606 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2608 /* XXX: TODO: test special values (NaN, infinites, ...) */
2609 return helper_efdtstgt(op1, op2);
2612 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2614 /* XXX: TODO: test special values (NaN, infinites, ...) */
2615 return helper_efdtsteq(op1, op2);
2618 /*****************************************************************************/
2619 /* Softmmu support */
2620 #if !defined (CONFIG_USER_ONLY)
2622 #define MMUSUFFIX _mmu
2624 #define SHIFT 0
2625 #include "softmmu_template.h"
2627 #define SHIFT 1
2628 #include "softmmu_template.h"
2630 #define SHIFT 2
2631 #include "softmmu_template.h"
2633 #define SHIFT 3
2634 #include "softmmu_template.h"
2636 /* try to fill the TLB and return an exception if error. If retaddr is
2637 NULL, it means that the function was called in C code (i.e. not
2638 from generated code or from helper.c) */
2639 /* XXX: fix it to restore all registers */
2640 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2642 TranslationBlock *tb;
2643 CPUState *saved_env;
2644 unsigned long pc;
2645 int ret;
2647 /* XXX: hack to restore env in all cases, even if not called from
2648 generated code */
2649 saved_env = env;
2650 env = cpu_single_env;
2651 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2652 if (unlikely(ret != 0)) {
2653 if (likely(retaddr)) {
2654 /* now we have a real cpu fault */
2655 pc = (unsigned long)retaddr;
2656 tb = tb_find_pc(pc);
2657 if (likely(tb)) {
2658 /* the PC is inside the translated code. It means that we have
2659 a virtual CPU fault */
2660 cpu_restore_state(tb, env, pc, NULL);
2663 helper_raise_exception_err(env->exception_index, env->error_code);
2665 env = saved_env;
2668 /* Segment registers load and store */
2669 target_ulong helper_load_sr (target_ulong sr_num)
2671 return env->sr[sr_num];
2674 void helper_store_sr (target_ulong sr_num, target_ulong val)
2676 ppc_store_sr(env, sr_num, val);
2679 /* SLB management */
2680 #if defined(TARGET_PPC64)
2681 target_ulong helper_load_slb (target_ulong slb_nr)
2683 return ppc_load_slb(env, slb_nr);
2686 void helper_store_slb (target_ulong slb_nr, target_ulong rs)
2688 ppc_store_slb(env, slb_nr, rs);
2691 void helper_slbia (void)
2693 ppc_slb_invalidate_all(env);
2696 void helper_slbie (target_ulong addr)
2698 ppc_slb_invalidate_one(env, addr);
2701 #endif /* defined(TARGET_PPC64) */
2703 /* TLB management */
2704 void helper_tlbia (void)
2706 ppc_tlb_invalidate_all(env);
2709 void helper_tlbie (target_ulong addr)
2711 ppc_tlb_invalidate_one(env, addr);
2714 /* Software driven TLBs management */
2715 /* PowerPC 602/603 software TLB load instructions helpers */
2716 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
2718 target_ulong RPN, CMP, EPN;
2719 int way;
2721 RPN = env->spr[SPR_RPA];
2722 if (is_code) {
2723 CMP = env->spr[SPR_ICMP];
2724 EPN = env->spr[SPR_IMISS];
2725 } else {
2726 CMP = env->spr[SPR_DCMP];
2727 EPN = env->spr[SPR_DMISS];
2729 way = (env->spr[SPR_SRR1] >> 17) & 1;
2730 #if defined (DEBUG_SOFTWARE_TLB)
2731 if (loglevel != 0) {
2732 fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
2733 " PTE1 " ADDRX " way %d\n",
2734 __func__, new_EPN, EPN, CMP, RPN, way);
2736 #endif
2737 /* Store this TLB */
2738 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2739 way, is_code, CMP, RPN);
2742 void helper_6xx_tlbd (target_ulong EPN)
2744 do_6xx_tlb(EPN, 0);
2747 void helper_6xx_tlbi (target_ulong EPN)
2749 do_6xx_tlb(EPN, 1);
2752 /* PowerPC 74xx software TLB load instructions helpers */
2753 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
2755 target_ulong RPN, CMP, EPN;
2756 int way;
2758 RPN = env->spr[SPR_PTELO];
2759 CMP = env->spr[SPR_PTEHI];
2760 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2761 way = env->spr[SPR_TLBMISS] & 0x3;
2762 #if defined (DEBUG_SOFTWARE_TLB)
2763 if (loglevel != 0) {
2764 fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
2765 " PTE1 " ADDRX " way %d\n",
2766 __func__, new_EPN, EPN, CMP, RPN, way);
2768 #endif
2769 /* Store this TLB */
2770 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2771 way, is_code, CMP, RPN);
2774 void helper_74xx_tlbd (target_ulong EPN)
2776 do_74xx_tlb(EPN, 0);
2779 void helper_74xx_tlbi (target_ulong EPN)
2781 do_74xx_tlb(EPN, 1);
2784 static always_inline target_ulong booke_tlb_to_page_size (int size)
2786 return 1024 << (2 * size);
2789 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2791 int size;
2793 switch (page_size) {
2794 case 0x00000400UL:
2795 size = 0x0;
2796 break;
2797 case 0x00001000UL:
2798 size = 0x1;
2799 break;
2800 case 0x00004000UL:
2801 size = 0x2;
2802 break;
2803 case 0x00010000UL:
2804 size = 0x3;
2805 break;
2806 case 0x00040000UL:
2807 size = 0x4;
2808 break;
2809 case 0x00100000UL:
2810 size = 0x5;
2811 break;
2812 case 0x00400000UL:
2813 size = 0x6;
2814 break;
2815 case 0x01000000UL:
2816 size = 0x7;
2817 break;
2818 case 0x04000000UL:
2819 size = 0x8;
2820 break;
2821 case 0x10000000UL:
2822 size = 0x9;
2823 break;
2824 case 0x40000000UL:
2825 size = 0xA;
2826 break;
2827 #if defined (TARGET_PPC64)
2828 case 0x000100000000ULL:
2829 size = 0xB;
2830 break;
2831 case 0x000400000000ULL:
2832 size = 0xC;
2833 break;
2834 case 0x001000000000ULL:
2835 size = 0xD;
2836 break;
2837 case 0x004000000000ULL:
2838 size = 0xE;
2839 break;
2840 case 0x010000000000ULL:
2841 size = 0xF;
2842 break;
2843 #endif
2844 default:
2845 size = -1;
2846 break;
2849 return size;
2852 /* Helpers for 4xx TLB management */
2853 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
2855 ppcemb_tlb_t *tlb;
2856 target_ulong ret;
2857 int size;
2859 entry &= 0x3F;
2860 tlb = &env->tlb[entry].tlbe;
2861 ret = tlb->EPN;
2862 if (tlb->prot & PAGE_VALID)
2863 ret |= 0x400;
2864 size = booke_page_size_to_tlb(tlb->size);
2865 if (size < 0 || size > 0x7)
2866 size = 1;
2867 ret |= size << 7;
2868 env->spr[SPR_40x_PID] = tlb->PID;
2869 return ret;
2872 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
2874 ppcemb_tlb_t *tlb;
2875 target_ulong ret;
2877 entry &= 0x3F;
2878 tlb = &env->tlb[entry].tlbe;
2879 ret = tlb->RPN;
2880 if (tlb->prot & PAGE_EXEC)
2881 ret |= 0x200;
2882 if (tlb->prot & PAGE_WRITE)
2883 ret |= 0x100;
2884 return ret;
2887 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
2889 ppcemb_tlb_t *tlb;
2890 target_ulong page, end;
2892 #if defined (DEBUG_SOFTWARE_TLB)
2893 if (loglevel != 0) {
2894 fprintf(logfile, "%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
2896 #endif
2897 entry &= 0x3F;
2898 tlb = &env->tlb[entry].tlbe;
2899 /* Invalidate previous TLB (if it's valid) */
2900 if (tlb->prot & PAGE_VALID) {
2901 end = tlb->EPN + tlb->size;
2902 #if defined (DEBUG_SOFTWARE_TLB)
2903 if (loglevel != 0) {
2904 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2905 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
2907 #endif
2908 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2909 tlb_flush_page(env, page);
2911 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
2912 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2913 * If this ever occurs, one should use the ppcemb target instead
2914 * of the ppc or ppc64 one
2916 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2917 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2918 "are not supported (%d)\n",
2919 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
2921 tlb->EPN = val & ~(tlb->size - 1);
2922 if (val & 0x40)
2923 tlb->prot |= PAGE_VALID;
2924 else
2925 tlb->prot &= ~PAGE_VALID;
2926 if (val & 0x20) {
2927 /* XXX: TO BE FIXED */
2928 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2930 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2931 tlb->attr = val & 0xFF;
2932 #if defined (DEBUG_SOFTWARE_TLB)
2933 if (loglevel != 0) {
2934 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2935 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2936 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2937 tlb->prot & PAGE_READ ? 'r' : '-',
2938 tlb->prot & PAGE_WRITE ? 'w' : '-',
2939 tlb->prot & PAGE_EXEC ? 'x' : '-',
2940 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2942 #endif
2943 /* Invalidate new TLB (if valid) */
2944 if (tlb->prot & PAGE_VALID) {
2945 end = tlb->EPN + tlb->size;
2946 #if defined (DEBUG_SOFTWARE_TLB)
2947 if (loglevel != 0) {
2948 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2949 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
2951 #endif
2952 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2953 tlb_flush_page(env, page);
2957 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
2959 ppcemb_tlb_t *tlb;
2961 #if defined (DEBUG_SOFTWARE_TLB)
2962 if (loglevel != 0) {
2963 fprintf(logfile, "%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
2965 #endif
2966 entry &= 0x3F;
2967 tlb = &env->tlb[entry].tlbe;
2968 tlb->RPN = val & 0xFFFFFC00;
2969 tlb->prot = PAGE_READ;
2970 if (val & 0x200)
2971 tlb->prot |= PAGE_EXEC;
2972 if (val & 0x100)
2973 tlb->prot |= PAGE_WRITE;
2974 #if defined (DEBUG_SOFTWARE_TLB)
2975 if (loglevel != 0) {
2976 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2977 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2978 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2979 tlb->prot & PAGE_READ ? 'r' : '-',
2980 tlb->prot & PAGE_WRITE ? 'w' : '-',
2981 tlb->prot & PAGE_EXEC ? 'x' : '-',
2982 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2984 #endif
2987 target_ulong helper_4xx_tlbsx (target_ulong address)
2989 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
2992 /* PowerPC 440 TLB management */
2993 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
2995 ppcemb_tlb_t *tlb;
2996 target_ulong EPN, RPN, size;
2997 int do_flush_tlbs;
2999 #if defined (DEBUG_SOFTWARE_TLB)
3000 if (loglevel != 0) {
3001 fprintf(logfile, "%s word %d entry %d value " ADDRX "\n",
3002 __func__, word, (int)entry, value);
3004 #endif
3005 do_flush_tlbs = 0;
3006 entry &= 0x3F;
3007 tlb = &env->tlb[entry].tlbe;
3008 switch (word) {
3009 default:
3010 /* Just here to please gcc */
3011 case 0:
3012 EPN = value & 0xFFFFFC00;
3013 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3014 do_flush_tlbs = 1;
3015 tlb->EPN = EPN;
3016 size = booke_tlb_to_page_size((value >> 4) & 0xF);
3017 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3018 do_flush_tlbs = 1;
3019 tlb->size = size;
3020 tlb->attr &= ~0x1;
3021 tlb->attr |= (value >> 8) & 1;
3022 if (value & 0x200) {
3023 tlb->prot |= PAGE_VALID;
3024 } else {
3025 if (tlb->prot & PAGE_VALID) {
3026 tlb->prot &= ~PAGE_VALID;
3027 do_flush_tlbs = 1;
3030 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3031 if (do_flush_tlbs)
3032 tlb_flush(env, 1);
3033 break;
3034 case 1:
3035 RPN = value & 0xFFFFFC0F;
3036 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3037 tlb_flush(env, 1);
3038 tlb->RPN = RPN;
3039 break;
3040 case 2:
3041 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3042 tlb->prot = tlb->prot & PAGE_VALID;
3043 if (value & 0x1)
3044 tlb->prot |= PAGE_READ << 4;
3045 if (value & 0x2)
3046 tlb->prot |= PAGE_WRITE << 4;
3047 if (value & 0x4)
3048 tlb->prot |= PAGE_EXEC << 4;
3049 if (value & 0x8)
3050 tlb->prot |= PAGE_READ;
3051 if (value & 0x10)
3052 tlb->prot |= PAGE_WRITE;
3053 if (value & 0x20)
3054 tlb->prot |= PAGE_EXEC;
3055 break;
3059 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3061 ppcemb_tlb_t *tlb;
3062 target_ulong ret;
3063 int size;
3065 entry &= 0x3F;
3066 tlb = &env->tlb[entry].tlbe;
3067 switch (word) {
3068 default:
3069 /* Just here to please gcc */
3070 case 0:
3071 ret = tlb->EPN;
3072 size = booke_page_size_to_tlb(tlb->size);
3073 if (size < 0 || size > 0xF)
3074 size = 1;
3075 ret |= size << 4;
3076 if (tlb->attr & 0x1)
3077 ret |= 0x100;
3078 if (tlb->prot & PAGE_VALID)
3079 ret |= 0x200;
3080 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3081 env->spr[SPR_440_MMUCR] |= tlb->PID;
3082 break;
3083 case 1:
3084 ret = tlb->RPN;
3085 break;
3086 case 2:
3087 ret = tlb->attr & ~0x1;
3088 if (tlb->prot & (PAGE_READ << 4))
3089 ret |= 0x1;
3090 if (tlb->prot & (PAGE_WRITE << 4))
3091 ret |= 0x2;
3092 if (tlb->prot & (PAGE_EXEC << 4))
3093 ret |= 0x4;
3094 if (tlb->prot & PAGE_READ)
3095 ret |= 0x8;
3096 if (tlb->prot & PAGE_WRITE)
3097 ret |= 0x10;
3098 if (tlb->prot & PAGE_EXEC)
3099 ret |= 0x20;
3100 break;
3102 return ret;
3105 target_ulong helper_440_tlbsx (target_ulong address)
3107 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3110 #endif /* !CONFIG_USER_ONLY */