2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "host-utils.h"
24 #include "helper_regs.h"
27 //#define DEBUG_EXCEPTIONS
28 //#define DEBUG_SOFTWARE_TLB
30 /*****************************************************************************/
31 /* Exceptions processing helpers */
33 void helper_raise_exception_err (uint32_t exception
, uint32_t error_code
)
35 raise_exception_err(env
, exception
, error_code
);
38 void helper_raise_debug (void)
40 raise_exception(env
, EXCP_DEBUG
);
43 /*****************************************************************************/
44 /* Registers load and stores */
45 target_ulong
helper_load_cr (void)
47 return (env
->crf
[0] << 28) |
57 void helper_store_cr (target_ulong val
, uint32_t mask
)
61 for (i
= 0, sh
= 7; i
< 8; i
++, sh
--) {
63 env
->crf
[i
] = (val
>> (sh
* 4)) & 0xFUL
;
67 /*****************************************************************************/
69 void helper_load_dump_spr (uint32_t sprn
)
72 fprintf(logfile
, "Read SPR %d %03x => " ADDRX
"\n",
73 sprn
, sprn
, env
->spr
[sprn
]);
77 void helper_store_dump_spr (uint32_t sprn
)
80 fprintf(logfile
, "Write SPR %d %03x <= " ADDRX
"\n",
81 sprn
, sprn
, env
->spr
[sprn
]);
85 target_ulong
helper_load_tbl (void)
87 return cpu_ppc_load_tbl(env
);
90 target_ulong
helper_load_tbu (void)
92 return cpu_ppc_load_tbu(env
);
95 target_ulong
helper_load_atbl (void)
97 return cpu_ppc_load_atbl(env
);
100 target_ulong
helper_load_atbu (void)
102 return cpu_ppc_load_atbu(env
);
105 target_ulong
helper_load_601_rtcl (void)
107 return cpu_ppc601_load_rtcl(env
);
110 target_ulong
helper_load_601_rtcu (void)
112 return cpu_ppc601_load_rtcu(env
);
115 #if !defined(CONFIG_USER_ONLY)
116 #if defined (TARGET_PPC64)
117 void helper_store_asr (target_ulong val
)
119 ppc_store_asr(env
, val
);
123 void helper_store_sdr1 (target_ulong val
)
125 ppc_store_sdr1(env
, val
);
128 void helper_store_tbl (target_ulong val
)
130 cpu_ppc_store_tbl(env
, val
);
133 void helper_store_tbu (target_ulong val
)
135 cpu_ppc_store_tbu(env
, val
);
138 void helper_store_atbl (target_ulong val
)
140 cpu_ppc_store_atbl(env
, val
);
143 void helper_store_atbu (target_ulong val
)
145 cpu_ppc_store_atbu(env
, val
);
148 void helper_store_601_rtcl (target_ulong val
)
150 cpu_ppc601_store_rtcl(env
, val
);
153 void helper_store_601_rtcu (target_ulong val
)
155 cpu_ppc601_store_rtcu(env
, val
);
158 target_ulong
helper_load_decr (void)
160 return cpu_ppc_load_decr(env
);
163 void helper_store_decr (target_ulong val
)
165 cpu_ppc_store_decr(env
, val
);
168 void helper_store_hid0_601 (target_ulong val
)
172 hid0
= env
->spr
[SPR_HID0
];
173 if ((val
^ hid0
) & 0x00000008) {
174 /* Change current endianness */
175 env
->hflags
&= ~(1 << MSR_LE
);
176 env
->hflags_nmsr
&= ~(1 << MSR_LE
);
177 env
->hflags_nmsr
|= (1 << MSR_LE
) & (((val
>> 3) & 1) << MSR_LE
);
178 env
->hflags
|= env
->hflags_nmsr
;
180 fprintf(logfile
, "%s: set endianness to %c => " ADDRX
"\n",
181 __func__
, val
& 0x8 ? 'l' : 'b', env
->hflags
);
184 env
->spr
[SPR_HID0
] = (uint32_t)val
;
187 void helper_store_403_pbr (uint32_t num
, target_ulong value
)
189 if (likely(env
->pb
[num
] != value
)) {
190 env
->pb
[num
] = value
;
191 /* Should be optimized */
196 target_ulong
helper_load_40x_pit (void)
198 return load_40x_pit(env
);
201 void helper_store_40x_pit (target_ulong val
)
203 store_40x_pit(env
, val
);
206 void helper_store_40x_dbcr0 (target_ulong val
)
208 store_40x_dbcr0(env
, val
);
211 void helper_store_40x_sler (target_ulong val
)
213 store_40x_sler(env
, val
);
216 void helper_store_booke_tcr (target_ulong val
)
218 store_booke_tcr(env
, val
);
221 void helper_store_booke_tsr (target_ulong val
)
223 store_booke_tsr(env
, val
);
226 void helper_store_ibatu (uint32_t nr
, target_ulong val
)
228 ppc_store_ibatu(env
, nr
, val
);
231 void helper_store_ibatl (uint32_t nr
, target_ulong val
)
233 ppc_store_ibatl(env
, nr
, val
);
236 void helper_store_dbatu (uint32_t nr
, target_ulong val
)
238 ppc_store_dbatu(env
, nr
, val
);
241 void helper_store_dbatl (uint32_t nr
, target_ulong val
)
243 ppc_store_dbatl(env
, nr
, val
);
246 void helper_store_601_batl (uint32_t nr
, target_ulong val
)
248 ppc_store_ibatl_601(env
, nr
, val
);
251 void helper_store_601_batu (uint32_t nr
, target_ulong val
)
253 ppc_store_ibatu_601(env
, nr
, val
);
257 /*****************************************************************************/
258 /* Memory load and stores */
260 static always_inline target_ulong
get_addr(target_ulong addr
)
262 #if defined(TARGET_PPC64)
267 return (uint32_t)addr
;
270 void helper_lmw (target_ulong addr
, uint32_t reg
)
272 for (; reg
< 32; reg
++, addr
+= 4) {
274 env
->gpr
[reg
] = bswap32(ldl(get_addr(addr
)));
276 env
->gpr
[reg
] = ldl(get_addr(addr
));
280 void helper_stmw (target_ulong addr
, uint32_t reg
)
282 for (; reg
< 32; reg
++, addr
+= 4) {
284 stl(get_addr(addr
), bswap32((uint32_t)env
->gpr
[reg
]));
286 stl(get_addr(addr
), (uint32_t)env
->gpr
[reg
]);
290 void helper_lsw(target_ulong addr
, uint32_t nb
, uint32_t reg
)
293 for (; nb
> 3; nb
-= 4, addr
+= 4) {
294 env
->gpr
[reg
] = ldl(get_addr(addr
));
295 reg
= (reg
+ 1) % 32;
297 if (unlikely(nb
> 0)) {
299 for (sh
= 24; nb
> 0; nb
--, addr
++, sh
-= 8) {
300 env
->gpr
[reg
] |= ldub(get_addr(addr
)) << sh
;
304 /* PPC32 specification says we must generate an exception if
305 * rA is in the range of registers to be loaded.
306 * In an other hand, IBM says this is valid, but rA won't be loaded.
307 * For now, I'll follow the spec...
309 void helper_lswx(target_ulong addr
, uint32_t reg
, uint32_t ra
, uint32_t rb
)
311 if (likely(xer_bc
!= 0)) {
312 if (unlikely((ra
!= 0 && reg
< ra
&& (reg
+ xer_bc
) > ra
) ||
313 (reg
< rb
&& (reg
+ xer_bc
) > rb
))) {
314 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
316 POWERPC_EXCP_INVAL_LSWX
);
318 helper_lsw(addr
, xer_bc
, reg
);
323 void helper_stsw(target_ulong addr
, uint32_t nb
, uint32_t reg
)
326 for (; nb
> 3; nb
-= 4, addr
+= 4) {
327 stl(get_addr(addr
), env
->gpr
[reg
]);
328 reg
= (reg
+ 1) % 32;
330 if (unlikely(nb
> 0)) {
331 for (sh
= 24; nb
> 0; nb
--, addr
++, sh
-= 8)
332 stb(get_addr(addr
), (env
->gpr
[reg
] >> sh
) & 0xFF);
336 static void do_dcbz(target_ulong addr
, int dcache_line_size
)
338 target_long mask
= get_addr(~(dcache_line_size
- 1));
341 for (i
= 0 ; i
< dcache_line_size
; i
+= 4) {
344 if ((env
->reserve
& mask
) == addr
)
345 env
->reserve
= (target_ulong
)-1ULL;
348 void helper_dcbz(target_ulong addr
)
350 do_dcbz(addr
, env
->dcache_line_size
);
353 void helper_dcbz_970(target_ulong addr
)
355 if (((env
->spr
[SPR_970_HID5
] >> 7) & 0x3) == 1)
358 do_dcbz(addr
, env
->dcache_line_size
);
361 void helper_icbi(target_ulong addr
)
365 addr
= get_addr(addr
& ~(env
->dcache_line_size
- 1));
366 /* Invalidate one cache line :
367 * PowerPC specification says this is to be treated like a load
368 * (not a fetch) by the MMU. To be sure it will be so,
369 * do the load "by hand".
372 tb_invalidate_page_range(addr
, addr
+ env
->icache_line_size
);
376 target_ulong
helper_lscbx (target_ulong addr
, uint32_t reg
, uint32_t ra
, uint32_t rb
)
380 for (i
= 0; i
< xer_bc
; i
++) {
381 c
= ldub((uint32_t)addr
++);
382 /* ra (if not 0) and rb are never modified */
383 if (likely(reg
!= rb
&& (ra
== 0 || reg
!= ra
))) {
384 env
->gpr
[reg
] = (env
->gpr
[reg
] & ~(0xFF << d
)) | (c
<< d
);
386 if (unlikely(c
== xer_cmp
))
388 if (likely(d
!= 0)) {
399 /*****************************************************************************/
400 /* Fixed point operations helpers */
401 #if defined(TARGET_PPC64)
403 /* multiply high word */
404 uint64_t helper_mulhd (uint64_t arg1
, uint64_t arg2
)
408 muls64(&tl
, &th
, arg1
, arg2
);
412 /* multiply high word unsigned */
413 uint64_t helper_mulhdu (uint64_t arg1
, uint64_t arg2
)
417 mulu64(&tl
, &th
, arg1
, arg2
);
421 uint64_t helper_mulldo (uint64_t arg1
, uint64_t arg2
)
426 muls64(&tl
, (uint64_t *)&th
, arg1
, arg2
);
427 /* If th != 0 && th != -1, then we had an overflow */
428 if (likely((uint64_t)(th
+ 1) <= 1)) {
429 env
->xer
&= ~(1 << XER_OV
);
431 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
437 target_ulong
helper_cntlzw (target_ulong t
)
442 #if defined(TARGET_PPC64)
443 target_ulong
helper_cntlzd (target_ulong t
)
449 /* shift right arithmetic helper */
450 target_ulong
helper_sraw (target_ulong value
, target_ulong shift
)
454 if (likely(!(shift
& 0x20))) {
455 if (likely((uint32_t)shift
!= 0)) {
457 ret
= (int32_t)value
>> shift
;
458 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
459 env
->xer
&= ~(1 << XER_CA
);
461 env
->xer
|= (1 << XER_CA
);
464 ret
= (int32_t)value
;
465 env
->xer
&= ~(1 << XER_CA
);
468 ret
= (int32_t)value
>> 31;
470 env
->xer
|= (1 << XER_CA
);
472 env
->xer
&= ~(1 << XER_CA
);
475 return (target_long
)ret
;
478 #if defined(TARGET_PPC64)
479 target_ulong
helper_srad (target_ulong value
, target_ulong shift
)
483 if (likely(!(shift
& 0x40))) {
484 if (likely((uint64_t)shift
!= 0)) {
486 ret
= (int64_t)value
>> shift
;
487 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
488 env
->xer
&= ~(1 << XER_CA
);
490 env
->xer
|= (1 << XER_CA
);
493 ret
= (int64_t)value
;
494 env
->xer
&= ~(1 << XER_CA
);
497 ret
= (int64_t)value
>> 63;
499 env
->xer
|= (1 << XER_CA
);
501 env
->xer
&= ~(1 << XER_CA
);
508 target_ulong
helper_popcntb (target_ulong val
)
510 val
= (val
& 0x55555555) + ((val
>> 1) & 0x55555555);
511 val
= (val
& 0x33333333) + ((val
>> 2) & 0x33333333);
512 val
= (val
& 0x0f0f0f0f) + ((val
>> 4) & 0x0f0f0f0f);
516 #if defined(TARGET_PPC64)
517 target_ulong
helper_popcntb_64 (target_ulong val
)
519 val
= (val
& 0x5555555555555555ULL
) + ((val
>> 1) & 0x5555555555555555ULL
);
520 val
= (val
& 0x3333333333333333ULL
) + ((val
>> 2) & 0x3333333333333333ULL
);
521 val
= (val
& 0x0f0f0f0f0f0f0f0fULL
) + ((val
>> 4) & 0x0f0f0f0f0f0f0f0fULL
);
526 /*****************************************************************************/
527 /* Floating point operations helpers */
528 uint64_t helper_float32_to_float64(uint32_t arg
)
533 d
.d
= float32_to_float64(f
.f
, &env
->fp_status
);
537 uint32_t helper_float64_to_float32(uint64_t arg
)
542 f
.f
= float64_to_float32(d
.d
, &env
->fp_status
);
546 static always_inline
int fpisneg (float64 d
)
552 return u
.ll
>> 63 != 0;
555 static always_inline
int isden (float64 d
)
561 return ((u
.ll
>> 52) & 0x7FF) == 0;
564 static always_inline
int iszero (float64 d
)
570 return (u
.ll
& ~0x8000000000000000ULL
) == 0;
573 static always_inline
int isinfinity (float64 d
)
579 return ((u
.ll
>> 52) & 0x7FF) == 0x7FF &&
580 (u
.ll
& 0x000FFFFFFFFFFFFFULL
) == 0;
583 #ifdef CONFIG_SOFTFLOAT
584 static always_inline
int isfinite (float64 d
)
590 return (((u
.ll
>> 52) & 0x7FF) != 0x7FF);
593 static always_inline
int isnormal (float64 d
)
599 uint32_t exp
= (u
.ll
>> 52) & 0x7FF;
600 return ((0 < exp
) && (exp
< 0x7FF));
604 uint32_t helper_compute_fprf (uint64_t arg
, uint32_t set_fprf
)
610 isneg
= fpisneg(farg
.d
);
611 if (unlikely(float64_is_nan(farg
.d
))) {
612 if (float64_is_signaling_nan(farg
.d
)) {
613 /* Signaling NaN: flags are undefined */
619 } else if (unlikely(isinfinity(farg
.d
))) {
626 if (iszero(farg
.d
)) {
634 /* Denormalized numbers */
637 /* Normalized numbers */
648 /* We update FPSCR_FPRF */
649 env
->fpscr
&= ~(0x1F << FPSCR_FPRF
);
650 env
->fpscr
|= ret
<< FPSCR_FPRF
;
652 /* We just need fpcc to update Rc1 */
656 /* Floating-point invalid operations exception */
657 static always_inline
uint64_t fload_invalid_op_excp (int op
)
663 if (op
& POWERPC_EXCP_FP_VXSNAN
) {
664 /* Operation on signaling NaN */
665 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
667 if (op
& POWERPC_EXCP_FP_VXSOFT
) {
668 /* Software-defined condition */
669 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
671 switch (op
& ~(POWERPC_EXCP_FP_VXSOFT
| POWERPC_EXCP_FP_VXSNAN
)) {
672 case POWERPC_EXCP_FP_VXISI
:
673 /* Magnitude subtraction of infinities */
674 env
->fpscr
|= 1 << FPSCR_VXISI
;
676 case POWERPC_EXCP_FP_VXIDI
:
677 /* Division of infinity by infinity */
678 env
->fpscr
|= 1 << FPSCR_VXIDI
;
680 case POWERPC_EXCP_FP_VXZDZ
:
681 /* Division of zero by zero */
682 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
684 case POWERPC_EXCP_FP_VXIMZ
:
685 /* Multiplication of zero by infinity */
686 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
688 case POWERPC_EXCP_FP_VXVC
:
689 /* Ordered comparison of NaN */
690 env
->fpscr
|= 1 << FPSCR_VXVC
;
691 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
692 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
693 /* We must update the target FPR before raising the exception */
695 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
696 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
697 /* Update the floating-point enabled exception summary */
698 env
->fpscr
|= 1 << FPSCR_FEX
;
699 /* Exception is differed */
703 case POWERPC_EXCP_FP_VXSQRT
:
704 /* Square root of a negative number */
705 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
707 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
709 /* Set the result to quiet NaN */
711 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
712 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
715 case POWERPC_EXCP_FP_VXCVI
:
716 /* Invalid conversion */
717 env
->fpscr
|= 1 << FPSCR_VXCVI
;
718 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
720 /* Set the result to quiet NaN */
722 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
723 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
727 /* Update the floating-point invalid operation summary */
728 env
->fpscr
|= 1 << FPSCR_VX
;
729 /* Update the floating-point exception summary */
730 env
->fpscr
|= 1 << FPSCR_FX
;
732 /* Update the floating-point enabled exception summary */
733 env
->fpscr
|= 1 << FPSCR_FEX
;
734 if (msr_fe0
!= 0 || msr_fe1
!= 0)
735 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_FP
| op
);
740 static always_inline
uint64_t float_zero_divide_excp (uint64_t arg1
, uint64_t arg2
)
742 env
->fpscr
|= 1 << FPSCR_ZX
;
743 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
744 /* Update the floating-point exception summary */
745 env
->fpscr
|= 1 << FPSCR_FX
;
747 /* Update the floating-point enabled exception summary */
748 env
->fpscr
|= 1 << FPSCR_FEX
;
749 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
750 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
751 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
);
754 /* Set the result to infinity */
755 arg1
= ((arg1
^ arg2
) & 0x8000000000000000ULL
);
756 arg1
|= 0x7FFULL
<< 52;
761 static always_inline
void float_overflow_excp (void)
763 env
->fpscr
|= 1 << FPSCR_OX
;
764 /* Update the floating-point exception summary */
765 env
->fpscr
|= 1 << FPSCR_FX
;
767 /* XXX: should adjust the result */
768 /* Update the floating-point enabled exception summary */
769 env
->fpscr
|= 1 << FPSCR_FEX
;
770 /* We must update the target FPR before raising the exception */
771 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
772 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
774 env
->fpscr
|= 1 << FPSCR_XX
;
775 env
->fpscr
|= 1 << FPSCR_FI
;
779 static always_inline
void float_underflow_excp (void)
781 env
->fpscr
|= 1 << FPSCR_UX
;
782 /* Update the floating-point exception summary */
783 env
->fpscr
|= 1 << FPSCR_FX
;
785 /* XXX: should adjust the result */
786 /* Update the floating-point enabled exception summary */
787 env
->fpscr
|= 1 << FPSCR_FEX
;
788 /* We must update the target FPR before raising the exception */
789 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
790 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
794 static always_inline
void float_inexact_excp (void)
796 env
->fpscr
|= 1 << FPSCR_XX
;
797 /* Update the floating-point exception summary */
798 env
->fpscr
|= 1 << FPSCR_FX
;
800 /* Update the floating-point enabled exception summary */
801 env
->fpscr
|= 1 << FPSCR_FEX
;
802 /* We must update the target FPR before raising the exception */
803 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
804 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
808 static always_inline
void fpscr_set_rounding_mode (void)
812 /* Set rounding mode */
815 /* Best approximation (round to nearest) */
816 rnd_type
= float_round_nearest_even
;
819 /* Smaller magnitude (round toward zero) */
820 rnd_type
= float_round_to_zero
;
823 /* Round toward +infinite */
824 rnd_type
= float_round_up
;
828 /* Round toward -infinite */
829 rnd_type
= float_round_down
;
832 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
835 void helper_fpscr_setbit (uint32_t bit
)
839 prev
= (env
->fpscr
>> bit
) & 1;
840 env
->fpscr
|= 1 << bit
;
844 env
->fpscr
|= 1 << FPSCR_FX
;
848 env
->fpscr
|= 1 << FPSCR_FX
;
853 env
->fpscr
|= 1 << FPSCR_FX
;
858 env
->fpscr
|= 1 << FPSCR_FX
;
863 env
->fpscr
|= 1 << FPSCR_FX
;
876 env
->fpscr
|= 1 << FPSCR_VX
;
877 env
->fpscr
|= 1 << FPSCR_FX
;
884 env
->error_code
= POWERPC_EXCP_FP
;
886 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
888 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
890 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
892 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
894 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
896 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
898 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
900 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
902 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
909 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
916 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
923 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
930 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
936 fpscr_set_rounding_mode();
941 /* Update the floating-point enabled exception summary */
942 env
->fpscr
|= 1 << FPSCR_FEX
;
943 /* We have to update Rc1 before raising the exception */
944 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
950 void helper_store_fpscr (uint64_t arg
, uint32_t mask
)
953 * We use only the 32 LSB of the incoming fpr
961 new |= prev
& 0x90000000;
962 for (i
= 0; i
< 7; i
++) {
963 if (mask
& (1 << i
)) {
964 env
->fpscr
&= ~(0xF << (4 * i
));
965 env
->fpscr
|= new & (0xF << (4 * i
));
968 /* Update VX and FEX */
970 env
->fpscr
|= 1 << FPSCR_VX
;
972 env
->fpscr
&= ~(1 << FPSCR_VX
);
973 if ((fpscr_ex
& fpscr_eex
) != 0) {
974 env
->fpscr
|= 1 << FPSCR_FEX
;
975 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
976 /* XXX: we should compute it properly */
977 env
->error_code
= POWERPC_EXCP_FP
;
980 env
->fpscr
&= ~(1 << FPSCR_FEX
);
981 fpscr_set_rounding_mode();
984 void helper_float_check_status (void)
986 #ifdef CONFIG_SOFTFLOAT
987 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
988 (env
->error_code
& POWERPC_EXCP_FP
)) {
989 /* Differred floating-point exception after target FPR update */
990 if (msr_fe0
!= 0 || msr_fe1
!= 0)
991 raise_exception_err(env
, env
->exception_index
, env
->error_code
);
992 } else if (env
->fp_status
.float_exception_flags
& float_flag_overflow
) {
993 float_overflow_excp();
994 } else if (env
->fp_status
.float_exception_flags
& float_flag_underflow
) {
995 float_underflow_excp();
996 } else if (env
->fp_status
.float_exception_flags
& float_flag_inexact
) {
997 float_inexact_excp();
1000 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
1001 (env
->error_code
& POWERPC_EXCP_FP
)) {
1002 /* Differred floating-point exception after target FPR update */
1003 if (msr_fe0
!= 0 || msr_fe1
!= 0)
1004 raise_exception_err(env
, env
->exception_index
, env
->error_code
);
1009 #ifdef CONFIG_SOFTFLOAT
1010 void helper_reset_fpstatus (void)
1012 env
->fp_status
.float_exception_flags
= 0;
1017 uint64_t helper_fadd (uint64_t arg1
, uint64_t arg2
)
1019 CPU_DoubleU farg1
, farg2
;
1023 #if USE_PRECISE_EMULATION
1024 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1025 float64_is_signaling_nan(farg2
.d
))) {
1027 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1028 } else if (likely(isfinite(farg1
.d
) || isfinite(farg2
.d
) ||
1029 fpisneg(farg1
.d
) == fpisneg(farg2
.d
))) {
1030 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
1032 /* Magnitude subtraction of infinities */
1033 farg1
.ll
== fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1036 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
1042 uint64_t helper_fsub (uint64_t arg1
, uint64_t arg2
)
1044 CPU_DoubleU farg1
, farg2
;
1048 #if USE_PRECISE_EMULATION
1050 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1051 float64_is_signaling_nan(farg2
.d
))) {
1052 /* sNaN subtraction */
1053 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1054 } else if (likely(isfinite(farg1
.d
) || isfinite(farg2
.d
) ||
1055 fpisneg(farg1
.d
) != fpisneg(farg2
.d
))) {
1056 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
1058 /* Magnitude subtraction of infinities */
1059 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1063 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
1069 uint64_t helper_fmul (uint64_t arg1
, uint64_t arg2
)
1071 CPU_DoubleU farg1
, farg2
;
1075 #if USE_PRECISE_EMULATION
1076 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1077 float64_is_signaling_nan(farg2
.d
))) {
1078 /* sNaN multiplication */
1079 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1080 } else if (unlikely((isinfinity(farg1
.d
) && iszero(farg2
.d
)) ||
1081 (iszero(farg1
.d
) && isinfinity(farg2
.d
)))) {
1082 /* Multiplication of zero by infinity */
1083 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1085 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1089 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1095 uint64_t helper_fdiv (uint64_t arg1
, uint64_t arg2
)
1097 CPU_DoubleU farg1
, farg2
;
1101 #if USE_PRECISE_EMULATION
1102 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1103 float64_is_signaling_nan(farg2
.d
))) {
1105 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1106 } else if (unlikely(isinfinity(farg1
.d
) && isinfinity(farg2
.d
))) {
1107 /* Division of infinity by infinity */
1108 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI
);
1109 } else if (unlikely(iszero(farg2
.d
))) {
1110 if (iszero(farg1
.d
)) {
1111 /* Division of zero by zero */
1112 farg1
.ll
fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ
);
1114 /* Division by zero */
1115 farg1
.ll
= float_zero_divide_excp(farg1
.d
, farg2
.d
);
1118 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
1121 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
1127 uint64_t helper_fabs (uint64_t arg
)
1132 farg
.d
= float64_abs(farg
.d
);
1137 uint64_t helper_fnabs (uint64_t arg
)
1142 farg
.d
= float64_abs(farg
.d
);
1143 farg
.d
= float64_chs(farg
.d
);
1148 uint64_t helper_fneg (uint64_t arg
)
1153 farg
.d
= float64_chs(farg
.d
);
1157 /* fctiw - fctiw. */
1158 uint64_t helper_fctiw (uint64_t arg
)
1163 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1164 /* sNaN conversion */
1165 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1166 } else if (unlikely(float64_is_nan(farg
.d
) || isinfinity(farg
.d
))) {
1167 /* qNan / infinity conversion */
1168 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1170 farg
.ll
= float64_to_int32(farg
.d
, &env
->fp_status
);
1171 #if USE_PRECISE_EMULATION
1172 /* XXX: higher bits are not supposed to be significant.
1173 * to make tests easier, return the same as a real PowerPC 750
1175 farg
.ll
|= 0xFFF80000ULL
<< 32;
1181 /* fctiwz - fctiwz. */
1182 uint64_t helper_fctiwz (uint64_t arg
)
1187 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1188 /* sNaN conversion */
1189 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1190 } else if (unlikely(float64_is_nan(farg
.d
) || isinfinity(farg
.d
))) {
1191 /* qNan / infinity conversion */
1192 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1194 farg
.ll
= float64_to_int32_round_to_zero(farg
.d
, &env
->fp_status
);
1195 #if USE_PRECISE_EMULATION
1196 /* XXX: higher bits are not supposed to be significant.
1197 * to make tests easier, return the same as a real PowerPC 750
1199 farg
.ll
|= 0xFFF80000ULL
<< 32;
1205 #if defined(TARGET_PPC64)
1206 /* fcfid - fcfid. */
1207 uint64_t helper_fcfid (uint64_t arg
)
1210 farg
.d
= int64_to_float64(arg
, &env
->fp_status
);
1214 /* fctid - fctid. */
1215 uint64_t helper_fctid (uint64_t arg
)
1220 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1221 /* sNaN conversion */
1222 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1223 } else if (unlikely(float64_is_nan(farg
.d
) || isinfinity(farg
.d
))) {
1224 /* qNan / infinity conversion */
1225 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1227 farg
.ll
= float64_to_int64(farg
.d
, &env
->fp_status
);
1232 /* fctidz - fctidz. */
1233 uint64_t helper_fctidz (uint64_t arg
)
1238 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1239 /* sNaN conversion */
1240 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1241 } else if (unlikely(float64_is_nan(farg
.d
) || isinfinity(farg
.d
))) {
1242 /* qNan / infinity conversion */
1243 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1245 farg
.ll
= float64_to_int64_round_to_zero(farg
.d
, &env
->fp_status
);
1252 static always_inline
uint64_t do_fri (uint64_t arg
, int rounding_mode
)
1257 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1259 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1260 } else if (unlikely(float64_is_nan(farg
.d
) || isinfinity(farg
.d
))) {
1261 /* qNan / infinity round */
1262 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1264 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
1265 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
1266 /* Restore rounding mode from FPSCR */
1267 fpscr_set_rounding_mode();
1272 uint64_t helper_frin (uint64_t arg
)
1274 return do_fri(arg
, float_round_nearest_even
);
1277 uint64_t helper_friz (uint64_t arg
)
1279 return do_fri(arg
, float_round_to_zero
);
1282 uint64_t helper_frip (uint64_t arg
)
1284 return do_fri(arg
, float_round_up
);
1287 uint64_t helper_frim (uint64_t arg
)
1289 return do_fri(arg
, float_round_down
);
1292 /* fmadd - fmadd. */
1293 uint64_t helper_fmadd (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1295 CPU_DoubleU farg1
, farg2
, farg3
;
1300 #if USE_PRECISE_EMULATION
1301 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1302 float64_is_signaling_nan(farg2
.d
) ||
1303 float64_is_signaling_nan(farg3
.d
))) {
1304 /* sNaN operation */
1305 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1308 /* This is the way the PowerPC specification defines it */
1309 float128 ft0_128
, ft1_128
;
1311 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1312 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1313 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1314 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1315 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
1316 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1318 /* This is OK on x86 hosts */
1319 farg1
.d
= (farg1
.d
* farg2
.d
) + farg3
.d
;
1323 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1324 farg1
.d
= float64_add(farg1
.d
, farg3
.d
, &env
->fp_status
);
1329 /* fmsub - fmsub. */
1330 uint64_t helper_fmsub (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1332 CPU_DoubleU farg1
, farg2
, farg3
;
1337 #if USE_PRECISE_EMULATION
1338 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1339 float64_is_signaling_nan(farg2
.d
) ||
1340 float64_is_signaling_nan(farg3
.d
))) {
1341 /* sNaN operation */
1342 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1345 /* This is the way the PowerPC specification defines it */
1346 float128 ft0_128
, ft1_128
;
1348 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1349 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1350 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1351 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1352 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
1353 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1355 /* This is OK on x86 hosts */
1356 farg1
.d
= (farg1
.d
* farg2
.d
) - farg3
.d
;
1360 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1361 farg1
.d
= float64_sub(farg1
.d
, farg3
.d
, &env
->fp_status
);
1366 /* fnmadd - fnmadd. */
1367 uint64_t helper_fnmadd (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1369 CPU_DoubleU farg1
, farg2
, farg3
;
1375 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1376 float64_is_signaling_nan(farg2
.d
) ||
1377 float64_is_signaling_nan(farg3
.d
))) {
1378 /* sNaN operation */
1379 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1381 #if USE_PRECISE_EMULATION
1383 /* This is the way the PowerPC specification defines it */
1384 float128 ft0_128
, ft1_128
;
1386 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1387 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1388 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1389 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1390 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
1391 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1393 /* This is OK on x86 hosts */
1394 farg1
.d
= (farg1
.d
* farg2
.d
) + farg3
.d
;
1397 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1398 farg1
.d
= float64_add(farg1
.d
, farg3
.d
, &env
->fp_status
);
1400 if (likely(!isnan(farg1
.d
)))
1401 farg1
.d
= float64_chs(farg1
.d
);
1406 /* fnmsub - fnmsub. */
1407 uint64_t helper_fnmsub (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1409 CPU_DoubleU farg1
, farg2
, farg3
;
1415 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1416 float64_is_signaling_nan(farg2
.d
) ||
1417 float64_is_signaling_nan(farg3
.d
))) {
1418 /* sNaN operation */
1419 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1421 #if USE_PRECISE_EMULATION
1423 /* This is the way the PowerPC specification defines it */
1424 float128 ft0_128
, ft1_128
;
1426 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1427 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1428 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1429 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1430 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
1431 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1433 /* This is OK on x86 hosts */
1434 farg1
.d
= (farg1
.d
* farg2
.d
) - farg3
.d
;
1437 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1438 farg1
.d
= float64_sub(farg1
.d
, farg3
.d
, &env
->fp_status
);
1440 if (likely(!isnan(farg1
.d
)))
1441 farg1
.d
= float64_chs(farg1
.d
);
1447 uint64_t helper_frsp (uint64_t arg
)
1452 #if USE_PRECISE_EMULATION
1453 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1454 /* sNaN square root */
1455 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1457 fard
.d
= float64_to_float32(farg
.d
, &env
->fp_status
);
1460 farg
.d
= float64_to_float32(farg
.d
, &env
->fp_status
);
1465 /* fsqrt - fsqrt. */
1466 uint64_t helper_fsqrt (uint64_t arg
)
1471 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1472 /* sNaN square root */
1473 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1474 } else if (unlikely(fpisneg(farg
.d
) && !iszero(farg
.d
))) {
1475 /* Square root of a negative nonzero number */
1476 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT
);
1478 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1484 uint64_t helper_fre (uint64_t arg
)
1489 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1490 /* sNaN reciprocal */
1491 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1492 } else if (unlikely(iszero(farg
.d
))) {
1493 /* Zero reciprocal */
1494 farg
.ll
= float_zero_divide_excp(1.0, farg
.d
);
1495 } else if (likely(isnormal(farg
.d
))) {
1496 farg
.d
= float64_div(1.0, farg
.d
, &env
->fp_status
);
1498 if (farg
.ll
== 0x8000000000000000ULL
) {
1499 farg
.ll
= 0xFFF0000000000000ULL
;
1500 } else if (farg
.ll
== 0x0000000000000000ULL
) {
1501 farg
.ll
= 0x7FF0000000000000ULL
;
1502 } else if (isnan(farg
.d
)) {
1503 farg
.ll
= 0x7FF8000000000000ULL
;
1504 } else if (fpisneg(farg
.d
)) {
1505 farg
.ll
= 0x8000000000000000ULL
;
1507 farg
.ll
= 0x0000000000000000ULL
;
1514 uint64_t helper_fres (uint64_t arg
)
1519 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1520 /* sNaN reciprocal */
1521 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1522 } else if (unlikely(iszero(farg
.d
))) {
1523 /* Zero reciprocal */
1524 farg
.ll
= float_zero_divide_excp(1.0, farg
.d
);
1525 } else if (likely(isnormal(farg
.d
))) {
1526 #if USE_PRECISE_EMULATION
1527 farg
.d
= float64_div(1.0, farg
.d
, &env
->fp_status
);
1528 farg
.d
= float64_to_float32(farg
.d
, &env
->fp_status
);
1530 farg
.d
= float32_div(1.0, farg
.d
, &env
->fp_status
);
1533 if (farg
.ll
== 0x8000000000000000ULL
) {
1534 farg
.ll
= 0xFFF0000000000000ULL
;
1535 } else if (farg
.ll
== 0x0000000000000000ULL
) {
1536 farg
.ll
= 0x7FF0000000000000ULL
;
1537 } else if (isnan(farg
.d
)) {
1538 farg
.ll
= 0x7FF8000000000000ULL
;
1539 } else if (fpisneg(farg
.d
)) {
1540 farg
.ll
= 0x8000000000000000ULL
;
1542 farg
.ll
= 0x0000000000000000ULL
;
1548 /* frsqrte - frsqrte. */
1549 uint64_t helper_frsqrte (uint64_t arg
)
1554 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1555 /* sNaN reciprocal square root */
1556 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1557 } else if (unlikely(fpisneg(farg
.d
) && !iszero(farg
.d
))) {
1558 /* Reciprocal square root of a negative nonzero number */
1559 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT
);
1560 } else if (likely(isnormal(farg
.d
))) {
1561 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1562 farg
.d
= float32_div(1.0, farg
.d
, &env
->fp_status
);
1564 if (farg
.ll
== 0x8000000000000000ULL
) {
1565 farg
.ll
= 0xFFF0000000000000ULL
;
1566 } else if (farg
.ll
== 0x0000000000000000ULL
) {
1567 farg
.ll
= 0x7FF0000000000000ULL
;
1568 } else if (isnan(farg
.d
)) {
1569 farg
.ll
|= 0x000FFFFFFFFFFFFFULL
;
1570 } else if (fpisneg(farg
.d
)) {
1571 farg
.ll
= 0x7FF8000000000000ULL
;
1573 farg
.ll
= 0x0000000000000000ULL
;
1580 uint64_t helper_fsel (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1582 CPU_DoubleU farg1
, farg2
, farg3
;
1588 if (!fpisneg(farg1
.d
) || iszero(farg1
.d
))
1594 uint32_t helper_fcmpu (uint64_t arg1
, uint64_t arg2
)
1596 CPU_DoubleU farg1
, farg2
;
1601 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1602 float64_is_signaling_nan(farg2
.d
))) {
1603 /* sNaN comparison */
1604 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1606 if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1608 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1614 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1615 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1619 uint32_t helper_fcmpo (uint64_t arg1
, uint64_t arg2
)
1621 CPU_DoubleU farg1
, farg2
;
1626 if (unlikely(float64_is_nan(farg1
.d
) ||
1627 float64_is_nan(farg2
.d
))) {
1628 if (float64_is_signaling_nan(farg1
.d
) ||
1629 float64_is_signaling_nan(farg2
.d
)) {
1630 /* sNaN comparison */
1631 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
|
1632 POWERPC_EXCP_FP_VXVC
);
1634 /* qNaN comparison */
1635 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC
);
1638 if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1640 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1646 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1647 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1651 #if !defined (CONFIG_USER_ONLY)
1652 void helper_store_msr (target_ulong val
)
1654 val
= hreg_store_msr(env
, val
, 0);
1656 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
1657 raise_exception(env
, val
);
1661 void cpu_dump_rfi (target_ulong RA
, target_ulong msr
);
1663 static always_inline
void do_rfi (target_ulong nip
, target_ulong msr
,
1664 target_ulong msrm
, int keep_msrh
)
1666 #if defined(TARGET_PPC64)
1667 if (msr
& (1ULL << MSR_SF
)) {
1668 nip
= (uint64_t)nip
;
1669 msr
&= (uint64_t)msrm
;
1671 nip
= (uint32_t)nip
;
1672 msr
= (uint32_t)(msr
& msrm
);
1674 msr
|= env
->msr
& ~((uint64_t)0xFFFFFFFF);
1677 nip
= (uint32_t)nip
;
1678 msr
&= (uint32_t)msrm
;
1680 /* XXX: beware: this is false if VLE is supported */
1681 env
->nip
= nip
& ~((target_ulong
)0x00000003);
1682 hreg_store_msr(env
, msr
, 1);
1683 #if defined (DEBUG_OP)
1684 cpu_dump_rfi(env
->nip
, env
->msr
);
1686 /* No need to raise an exception here,
1687 * as rfi is always the last insn of a TB
1689 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
1692 void helper_rfi (void)
1694 do_rfi(env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
],
1695 ~((target_ulong
)0xFFFF0000), 1);
1698 #if defined(TARGET_PPC64)
1699 void helper_rfid (void)
1701 do_rfi(env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
],
1702 ~((target_ulong
)0xFFFF0000), 0);
1705 void helper_hrfid (void)
1707 do_rfi(env
->spr
[SPR_HSRR0
], env
->spr
[SPR_HSRR1
],
1708 ~((target_ulong
)0xFFFF0000), 0);
1713 void helper_tw (target_ulong arg1
, target_ulong arg2
, uint32_t flags
)
1715 if (!likely(!(((int32_t)arg1
< (int32_t)arg2
&& (flags
& 0x10)) ||
1716 ((int32_t)arg1
> (int32_t)arg2
&& (flags
& 0x08)) ||
1717 ((int32_t)arg1
== (int32_t)arg2
&& (flags
& 0x04)) ||
1718 ((uint32_t)arg1
< (uint32_t)arg2
&& (flags
& 0x02)) ||
1719 ((uint32_t)arg1
> (uint32_t)arg2
&& (flags
& 0x01))))) {
1720 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_TRAP
);
1724 #if defined(TARGET_PPC64)
1725 void helper_td (target_ulong arg1
, target_ulong arg2
, uint32_t flags
)
1727 if (!likely(!(((int64_t)arg1
< (int64_t)arg2
&& (flags
& 0x10)) ||
1728 ((int64_t)arg1
> (int64_t)arg2
&& (flags
& 0x08)) ||
1729 ((int64_t)arg1
== (int64_t)arg2
&& (flags
& 0x04)) ||
1730 ((uint64_t)arg1
< (uint64_t)arg2
&& (flags
& 0x02)) ||
1731 ((uint64_t)arg1
> (uint64_t)arg2
&& (flags
& 0x01)))))
1732 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_TRAP
);
1736 /*****************************************************************************/
1737 /* PowerPC 601 specific instructions (POWER bridge) */
1739 target_ulong
helper_clcs (uint32_t arg
)
1743 /* Instruction cache line size */
1744 return env
->icache_line_size
;
1747 /* Data cache line size */
1748 return env
->dcache_line_size
;
1751 /* Minimum cache line size */
1752 return (env
->icache_line_size
< env
->dcache_line_size
) ?
1753 env
->icache_line_size
: env
->dcache_line_size
;
1756 /* Maximum cache line size */
1757 return (env
->icache_line_size
> env
->dcache_line_size
) ?
1758 env
->icache_line_size
: env
->dcache_line_size
;
1767 target_ulong
helper_div (target_ulong arg1
, target_ulong arg2
)
1769 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
1771 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1772 (int32_t)arg2
== 0) {
1773 env
->spr
[SPR_MQ
] = 0;
1776 env
->spr
[SPR_MQ
] = tmp
% arg2
;
1777 return tmp
/ (int32_t)arg2
;
1781 target_ulong
helper_divo (target_ulong arg1
, target_ulong arg2
)
1783 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
1785 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1786 (int32_t)arg2
== 0) {
1787 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1788 env
->spr
[SPR_MQ
] = 0;
1791 env
->spr
[SPR_MQ
] = tmp
% arg2
;
1792 tmp
/= (int32_t)arg2
;
1793 if ((int32_t)tmp
!= tmp
) {
1794 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1796 env
->xer
&= ~(1 << XER_OV
);
1802 target_ulong
helper_divs (target_ulong arg1
, target_ulong arg2
)
1804 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1805 (int32_t)arg2
== 0) {
1806 env
->spr
[SPR_MQ
] = 0;
1809 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
1810 return (int32_t)arg1
/ (int32_t)arg2
;
1814 target_ulong
helper_divso (target_ulong arg1
, target_ulong arg2
)
1816 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1817 (int32_t)arg2
== 0) {
1818 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1819 env
->spr
[SPR_MQ
] = 0;
1822 env
->xer
&= ~(1 << XER_OV
);
1823 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
1824 return (int32_t)arg1
/ (int32_t)arg2
;
1828 #if !defined (CONFIG_USER_ONLY)
1829 target_ulong
helper_rac (target_ulong addr
)
1833 target_ulong ret
= 0;
1835 /* We don't have to generate many instances of this instruction,
1836 * as rac is supervisor only.
1838 /* XXX: FIX THIS: Pretend we have no BAT */
1839 nb_BATs
= env
->nb_BATs
;
1841 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0)
1843 env
->nb_BATs
= nb_BATs
;
1847 void helper_rfsvc (void)
1849 do_rfi(env
->lr
, env
->ctr
, 0x0000FFFF, 0);
1853 /*****************************************************************************/
1854 /* 602 specific instructions */
1855 /* mfrom is the most crazy instruction ever seen, imho ! */
1856 /* Real implementation uses a ROM table. Do the same */
1857 #define USE_MFROM_ROM_TABLE
1858 target_ulong
helper_602_mfrom (target_ulong arg
)
1860 if (likely(arg
< 602)) {
1861 #if defined(USE_MFROM_ROM_TABLE)
1862 #include "mfrom_table.c"
1863 return mfrom_ROM_table
[arg
];
1866 /* Extremly decomposed:
1868 * return 256 * log10(10 + 1.0) + 0.5
1871 d
= float64_div(d
, 256, &env
->fp_status
);
1873 d
= exp10(d
); // XXX: use float emulation function
1874 d
= float64_add(d
, 1.0, &env
->fp_status
);
1875 d
= log10(d
); // XXX: use float emulation function
1876 d
= float64_mul(d
, 256, &env
->fp_status
);
1877 d
= float64_add(d
, 0.5, &env
->fp_status
);
1878 return float64_round_to_int(d
, &env
->fp_status
);
1885 /*****************************************************************************/
1886 /* Embedded PowerPC specific helpers */
1888 /* XXX: to be improved to check access rights when in user-mode */
1889 target_ulong
helper_load_dcr (target_ulong dcrn
)
1891 target_ulong val
= 0;
1893 if (unlikely(env
->dcr_env
== NULL
)) {
1894 if (loglevel
!= 0) {
1895 fprintf(logfile
, "No DCR environment\n");
1897 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
1898 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1899 } else if (unlikely(ppc_dcr_read(env
->dcr_env
, dcrn
, &val
) != 0)) {
1900 if (loglevel
!= 0) {
1901 fprintf(logfile
, "DCR read error %d %03x\n", (int)dcrn
, (int)dcrn
);
1903 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
1904 POWERPC_EXCP_INVAL
| POWERPC_EXCP_PRIV_REG
);
1909 void helper_store_dcr (target_ulong dcrn
, target_ulong val
)
1911 if (unlikely(env
->dcr_env
== NULL
)) {
1912 if (loglevel
!= 0) {
1913 fprintf(logfile
, "No DCR environment\n");
1915 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
1916 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1917 } else if (unlikely(ppc_dcr_write(env
->dcr_env
, dcrn
, val
) != 0)) {
1918 if (loglevel
!= 0) {
1919 fprintf(logfile
, "DCR write error %d %03x\n", (int)dcrn
, (int)dcrn
);
1921 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
1922 POWERPC_EXCP_INVAL
| POWERPC_EXCP_PRIV_REG
);
1926 #if !defined(CONFIG_USER_ONLY)
1927 void helper_40x_rfci (void)
1929 do_rfi(env
->spr
[SPR_40x_SRR2
], env
->spr
[SPR_40x_SRR3
],
1930 ~((target_ulong
)0xFFFF0000), 0);
1933 void helper_rfci (void)
1935 do_rfi(env
->spr
[SPR_BOOKE_CSRR0
], SPR_BOOKE_CSRR1
,
1936 ~((target_ulong
)0x3FFF0000), 0);
1939 void helper_rfdi (void)
1941 do_rfi(env
->spr
[SPR_BOOKE_DSRR0
], SPR_BOOKE_DSRR1
,
1942 ~((target_ulong
)0x3FFF0000), 0);
1945 void helper_rfmci (void)
1947 do_rfi(env
->spr
[SPR_BOOKE_MCSRR0
], SPR_BOOKE_MCSRR1
,
1948 ~((target_ulong
)0x3FFF0000), 0);
1953 target_ulong
helper_dlmzb (target_ulong high
, target_ulong low
, uint32_t update_Rc
)
1959 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1960 if ((high
& mask
) == 0) {
1968 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1969 if ((low
& mask
) == 0) {
1981 env
->xer
= (env
->xer
& ~0x7F) | i
;
1983 env
->crf
[0] |= xer_so
;
1988 /*****************************************************************************/
1989 /* SPE extension helpers */
1990 /* Use a table to make this quicker */
1991 static uint8_t hbrev
[16] = {
1992 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1993 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1996 static always_inline
uint8_t byte_reverse (uint8_t val
)
1998 return hbrev
[val
>> 4] | (hbrev
[val
& 0xF] << 4);
2001 static always_inline
uint32_t word_reverse (uint32_t val
)
2003 return byte_reverse(val
>> 24) | (byte_reverse(val
>> 16) << 8) |
2004 (byte_reverse(val
>> 8) << 16) | (byte_reverse(val
) << 24);
2007 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2008 target_ulong
helper_brinc (target_ulong arg1
, target_ulong arg2
)
2010 uint32_t a
, b
, d
, mask
;
2012 mask
= UINT32_MAX
>> (32 - MASKBITS
);
2015 d
= word_reverse(1 + word_reverse(a
| ~b
));
2016 return (arg1
& ~mask
) | (d
& b
);
2019 uint32_t helper_cntlsw32 (uint32_t val
)
2021 if (val
& 0x80000000)
2027 uint32_t helper_cntlzw32 (uint32_t val
)
2032 /* Single-precision floating-point conversions */
2033 static always_inline
uint32_t efscfsi (uint32_t val
)
2037 u
.f
= int32_to_float32(val
, &env
->spe_status
);
2042 static always_inline
uint32_t efscfui (uint32_t val
)
2046 u
.f
= uint32_to_float32(val
, &env
->spe_status
);
2051 static always_inline
int32_t efsctsi (uint32_t val
)
2056 /* NaN are not treated the same way IEEE 754 does */
2057 if (unlikely(isnan(u
.f
)))
2060 return float32_to_int32(u
.f
, &env
->spe_status
);
2063 static always_inline
uint32_t efsctui (uint32_t val
)
2068 /* NaN are not treated the same way IEEE 754 does */
2069 if (unlikely(isnan(u
.f
)))
2072 return float32_to_uint32(u
.f
, &env
->spe_status
);
2075 static always_inline
uint32_t efsctsiz (uint32_t val
)
2080 /* NaN are not treated the same way IEEE 754 does */
2081 if (unlikely(isnan(u
.f
)))
2084 return float32_to_int32_round_to_zero(u
.f
, &env
->spe_status
);
2087 static always_inline
uint32_t efsctuiz (uint32_t val
)
2092 /* NaN are not treated the same way IEEE 754 does */
2093 if (unlikely(isnan(u
.f
)))
2096 return float32_to_uint32_round_to_zero(u
.f
, &env
->spe_status
);
2099 static always_inline
uint32_t efscfsf (uint32_t val
)
2104 u
.f
= int32_to_float32(val
, &env
->spe_status
);
2105 tmp
= int64_to_float32(1ULL << 32, &env
->spe_status
);
2106 u
.f
= float32_div(u
.f
, tmp
, &env
->spe_status
);
2111 static always_inline
uint32_t efscfuf (uint32_t val
)
2116 u
.f
= uint32_to_float32(val
, &env
->spe_status
);
2117 tmp
= uint64_to_float32(1ULL << 32, &env
->spe_status
);
2118 u
.f
= float32_div(u
.f
, tmp
, &env
->spe_status
);
2123 static always_inline
uint32_t efsctsf (uint32_t val
)
2129 /* NaN are not treated the same way IEEE 754 does */
2130 if (unlikely(isnan(u
.f
)))
2132 tmp
= uint64_to_float32(1ULL << 32, &env
->spe_status
);
2133 u
.f
= float32_mul(u
.f
, tmp
, &env
->spe_status
);
2135 return float32_to_int32(u
.f
, &env
->spe_status
);
2138 static always_inline
uint32_t efsctuf (uint32_t val
)
2144 /* NaN are not treated the same way IEEE 754 does */
2145 if (unlikely(isnan(u
.f
)))
2147 tmp
= uint64_to_float32(1ULL << 32, &env
->spe_status
);
2148 u
.f
= float32_mul(u
.f
, tmp
, &env
->spe_status
);
2150 return float32_to_uint32(u
.f
, &env
->spe_status
);
2153 #define HELPER_SPE_SINGLE_CONV(name) \
2154 uint32_t helper_e##name (uint32_t val) \
2156 return e##name(val); \
2159 HELPER_SPE_SINGLE_CONV(fscfsi
);
2161 HELPER_SPE_SINGLE_CONV(fscfui
);
2163 HELPER_SPE_SINGLE_CONV(fscfuf
);
2165 HELPER_SPE_SINGLE_CONV(fscfsf
);
2167 HELPER_SPE_SINGLE_CONV(fsctsi
);
2169 HELPER_SPE_SINGLE_CONV(fsctui
);
2171 HELPER_SPE_SINGLE_CONV(fsctsiz
);
2173 HELPER_SPE_SINGLE_CONV(fsctuiz
);
2175 HELPER_SPE_SINGLE_CONV(fsctsf
);
2177 HELPER_SPE_SINGLE_CONV(fsctuf
);
2179 #define HELPER_SPE_VECTOR_CONV(name) \
2180 uint64_t helper_ev##name (uint64_t val) \
2182 return ((uint64_t)e##name(val >> 32) << 32) | \
2183 (uint64_t)e##name(val); \
2186 HELPER_SPE_VECTOR_CONV(fscfsi
);
2188 HELPER_SPE_VECTOR_CONV(fscfui
);
2190 HELPER_SPE_VECTOR_CONV(fscfuf
);
2192 HELPER_SPE_VECTOR_CONV(fscfsf
);
2194 HELPER_SPE_VECTOR_CONV(fsctsi
);
2196 HELPER_SPE_VECTOR_CONV(fsctui
);
2198 HELPER_SPE_VECTOR_CONV(fsctsiz
);
2200 HELPER_SPE_VECTOR_CONV(fsctuiz
);
2202 HELPER_SPE_VECTOR_CONV(fsctsf
);
2204 HELPER_SPE_VECTOR_CONV(fsctuf
);
2206 /* Single-precision floating-point arithmetic */
2207 static always_inline
uint32_t efsadd (uint32_t op1
, uint32_t op2
)
2212 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->spe_status
);
2216 static always_inline
uint32_t efssub (uint32_t op1
, uint32_t op2
)
2221 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->spe_status
);
2225 static always_inline
uint32_t efsmul (uint32_t op1
, uint32_t op2
)
2230 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->spe_status
);
2234 static always_inline
uint32_t efsdiv (uint32_t op1
, uint32_t op2
)
2239 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->spe_status
);
2243 #define HELPER_SPE_SINGLE_ARITH(name) \
2244 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2246 return e##name(op1, op2); \
2249 HELPER_SPE_SINGLE_ARITH(fsadd
);
2251 HELPER_SPE_SINGLE_ARITH(fssub
);
2253 HELPER_SPE_SINGLE_ARITH(fsmul
);
2255 HELPER_SPE_SINGLE_ARITH(fsdiv
);
2257 #define HELPER_SPE_VECTOR_ARITH(name) \
2258 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
2260 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
2261 (uint64_t)e##name(op1, op2); \
2264 HELPER_SPE_VECTOR_ARITH(fsadd
);
2266 HELPER_SPE_VECTOR_ARITH(fssub
);
2268 HELPER_SPE_VECTOR_ARITH(fsmul
);
2270 HELPER_SPE_VECTOR_ARITH(fsdiv
);
2272 /* Single-precision floating-point comparisons */
2273 static always_inline
uint32_t efststlt (uint32_t op1
, uint32_t op2
)
2278 return float32_lt(u1
.f
, u2
.f
, &env
->spe_status
) ? 4 : 0;
2281 static always_inline
uint32_t efststgt (uint32_t op1
, uint32_t op2
)
2286 return float32_le(u1
.f
, u2
.f
, &env
->spe_status
) ? 0 : 4;
2289 static always_inline
uint32_t efststeq (uint32_t op1
, uint32_t op2
)
2294 return float32_eq(u1
.f
, u2
.f
, &env
->spe_status
) ? 4 : 0;
2297 static always_inline
uint32_t efscmplt (uint32_t op1
, uint32_t op2
)
2299 /* XXX: TODO: test special values (NaN, infinites, ...) */
2300 return efststlt(op1
, op2
);
2303 static always_inline
uint32_t efscmpgt (uint32_t op1
, uint32_t op2
)
2305 /* XXX: TODO: test special values (NaN, infinites, ...) */
2306 return efststgt(op1
, op2
);
2309 static always_inline
uint32_t efscmpeq (uint32_t op1
, uint32_t op2
)
2311 /* XXX: TODO: test special values (NaN, infinites, ...) */
2312 return efststeq(op1
, op2
);
2315 #define HELPER_SINGLE_SPE_CMP(name) \
2316 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2318 return e##name(op1, op2) << 2; \
2321 HELPER_SINGLE_SPE_CMP(fststlt
);
2323 HELPER_SINGLE_SPE_CMP(fststgt
);
2325 HELPER_SINGLE_SPE_CMP(fststeq
);
2327 HELPER_SINGLE_SPE_CMP(fscmplt
);
2329 HELPER_SINGLE_SPE_CMP(fscmpgt
);
2331 HELPER_SINGLE_SPE_CMP(fscmpeq
);
2333 static always_inline
uint32_t evcmp_merge (int t0
, int t1
)
2335 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
2338 #define HELPER_VECTOR_SPE_CMP(name) \
2339 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
2341 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
2344 HELPER_VECTOR_SPE_CMP(fststlt
);
2346 HELPER_VECTOR_SPE_CMP(fststgt
);
2348 HELPER_VECTOR_SPE_CMP(fststeq
);
2350 HELPER_VECTOR_SPE_CMP(fscmplt
);
2352 HELPER_VECTOR_SPE_CMP(fscmpgt
);
2354 HELPER_VECTOR_SPE_CMP(fscmpeq
);
2356 /* Double-precision floating-point conversion */
2357 uint64_t helper_efdcfsi (uint32_t val
)
2361 u
.d
= int32_to_float64(val
, &env
->spe_status
);
2366 uint64_t helper_efdcfsid (uint64_t val
)
2370 u
.d
= int64_to_float64(val
, &env
->spe_status
);
2375 uint64_t helper_efdcfui (uint32_t val
)
2379 u
.d
= uint32_to_float64(val
, &env
->spe_status
);
2384 uint64_t helper_efdcfuid (uint64_t val
)
2388 u
.d
= uint64_to_float64(val
, &env
->spe_status
);
2393 uint32_t helper_efdctsi (uint64_t val
)
2398 /* NaN are not treated the same way IEEE 754 does */
2399 if (unlikely(isnan(u
.d
)))
2402 return float64_to_int32(u
.d
, &env
->spe_status
);
2405 uint32_t helper_efdctui (uint64_t val
)
2410 /* NaN are not treated the same way IEEE 754 does */
2411 if (unlikely(isnan(u
.d
)))
2414 return float64_to_uint32(u
.d
, &env
->spe_status
);
2417 uint32_t helper_efdctsiz (uint64_t val
)
2422 /* NaN are not treated the same way IEEE 754 does */
2423 if (unlikely(isnan(u
.d
)))
2426 return float64_to_int32_round_to_zero(u
.d
, &env
->spe_status
);
2429 uint64_t helper_efdctsidz (uint64_t val
)
2434 /* NaN are not treated the same way IEEE 754 does */
2435 if (unlikely(isnan(u
.d
)))
2438 return float64_to_int64_round_to_zero(u
.d
, &env
->spe_status
);
2441 uint32_t helper_efdctuiz (uint64_t val
)
2446 /* NaN are not treated the same way IEEE 754 does */
2447 if (unlikely(isnan(u
.d
)))
2450 return float64_to_uint32_round_to_zero(u
.d
, &env
->spe_status
);
2453 uint64_t helper_efdctuidz (uint64_t val
)
2458 /* NaN are not treated the same way IEEE 754 does */
2459 if (unlikely(isnan(u
.d
)))
2462 return float64_to_uint64_round_to_zero(u
.d
, &env
->spe_status
);
2465 uint64_t helper_efdcfsf (uint32_t val
)
2470 u
.d
= int32_to_float64(val
, &env
->spe_status
);
2471 tmp
= int64_to_float64(1ULL << 32, &env
->spe_status
);
2472 u
.d
= float64_div(u
.d
, tmp
, &env
->spe_status
);
2477 uint64_t helper_efdcfuf (uint32_t val
)
2482 u
.d
= uint32_to_float64(val
, &env
->spe_status
);
2483 tmp
= int64_to_float64(1ULL << 32, &env
->spe_status
);
2484 u
.d
= float64_div(u
.d
, tmp
, &env
->spe_status
);
2489 uint32_t helper_efdctsf (uint64_t val
)
2495 /* NaN are not treated the same way IEEE 754 does */
2496 if (unlikely(isnan(u
.d
)))
2498 tmp
= uint64_to_float64(1ULL << 32, &env
->spe_status
);
2499 u
.d
= float64_mul(u
.d
, tmp
, &env
->spe_status
);
2501 return float64_to_int32(u
.d
, &env
->spe_status
);
2504 uint32_t helper_efdctuf (uint64_t val
)
2510 /* NaN are not treated the same way IEEE 754 does */
2511 if (unlikely(isnan(u
.d
)))
2513 tmp
= uint64_to_float64(1ULL << 32, &env
->spe_status
);
2514 u
.d
= float64_mul(u
.d
, tmp
, &env
->spe_status
);
2516 return float64_to_uint32(u
.d
, &env
->spe_status
);
2519 uint32_t helper_efscfd (uint64_t val
)
2525 u2
.f
= float64_to_float32(u1
.d
, &env
->spe_status
);
2530 uint64_t helper_efdcfs (uint32_t val
)
2536 u2
.d
= float32_to_float64(u1
.f
, &env
->spe_status
);
2541 /* Double precision fixed-point arithmetic */
2542 uint64_t helper_efdadd (uint64_t op1
, uint64_t op2
)
2547 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->spe_status
);
2551 uint64_t helper_efdsub (uint64_t op1
, uint64_t op2
)
2556 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->spe_status
);
2560 uint64_t helper_efdmul (uint64_t op1
, uint64_t op2
)
2565 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->spe_status
);
2569 uint64_t helper_efddiv (uint64_t op1
, uint64_t op2
)
2574 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->spe_status
);
2578 /* Double precision floating point helpers */
2579 uint32_t helper_efdtstlt (uint64_t op1
, uint64_t op2
)
2584 return float64_lt(u1
.d
, u2
.d
, &env
->spe_status
) ? 4 : 0;
2587 uint32_t helper_efdtstgt (uint64_t op1
, uint64_t op2
)
2592 return float64_le(u1
.d
, u2
.d
, &env
->spe_status
) ? 0 : 4;
2595 uint32_t helper_efdtsteq (uint64_t op1
, uint64_t op2
)
2600 return float64_eq(u1
.d
, u2
.d
, &env
->spe_status
) ? 4 : 0;
2603 uint32_t helper_efdcmplt (uint64_t op1
, uint64_t op2
)
2605 /* XXX: TODO: test special values (NaN, infinites, ...) */
2606 return helper_efdtstlt(op1
, op2
);
2609 uint32_t helper_efdcmpgt (uint64_t op1
, uint64_t op2
)
2611 /* XXX: TODO: test special values (NaN, infinites, ...) */
2612 return helper_efdtstgt(op1
, op2
);
2615 uint32_t helper_efdcmpeq (uint64_t op1
, uint64_t op2
)
2617 /* XXX: TODO: test special values (NaN, infinites, ...) */
2618 return helper_efdtsteq(op1
, op2
);
2621 /*****************************************************************************/
2622 /* Softmmu support */
2623 #if !defined (CONFIG_USER_ONLY)
2625 #define MMUSUFFIX _mmu
2628 #include "softmmu_template.h"
2631 #include "softmmu_template.h"
2634 #include "softmmu_template.h"
2637 #include "softmmu_template.h"
2639 /* try to fill the TLB and return an exception if error. If retaddr is
2640 NULL, it means that the function was called in C code (i.e. not
2641 from generated code or from helper.c) */
2642 /* XXX: fix it to restore all registers */
2643 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
2645 TranslationBlock
*tb
;
2646 CPUState
*saved_env
;
2650 /* XXX: hack to restore env in all cases, even if not called from
2653 env
= cpu_single_env
;
2654 ret
= cpu_ppc_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
2655 if (unlikely(ret
!= 0)) {
2656 if (likely(retaddr
)) {
2657 /* now we have a real cpu fault */
2658 pc
= (unsigned long)retaddr
;
2659 tb
= tb_find_pc(pc
);
2661 /* the PC is inside the translated code. It means that we have
2662 a virtual CPU fault */
2663 cpu_restore_state(tb
, env
, pc
, NULL
);
2666 raise_exception_err(env
, env
->exception_index
, env
->error_code
);
2671 /* Segment registers load and store */
2672 target_ulong
helper_load_sr (target_ulong sr_num
)
2674 return env
->sr
[sr_num
];
2677 void helper_store_sr (target_ulong sr_num
, target_ulong val
)
2679 ppc_store_sr(env
, sr_num
, val
);
2682 /* SLB management */
2683 #if defined(TARGET_PPC64)
2684 target_ulong
helper_load_slb (target_ulong slb_nr
)
2686 return ppc_load_slb(env
, slb_nr
);
2689 void helper_store_slb (target_ulong slb_nr
, target_ulong rs
)
2691 ppc_store_slb(env
, slb_nr
, rs
);
2694 void helper_slbia (void)
2696 ppc_slb_invalidate_all(env
);
2699 void helper_slbie (target_ulong addr
)
2701 ppc_slb_invalidate_one(env
, addr
);
2704 #endif /* defined(TARGET_PPC64) */
2706 /* TLB management */
2707 void helper_tlbia (void)
2709 ppc_tlb_invalidate_all(env
);
2712 void helper_tlbie (target_ulong addr
)
2714 ppc_tlb_invalidate_one(env
, addr
);
2717 /* Software driven TLBs management */
2718 /* PowerPC 602/603 software TLB load instructions helpers */
2719 static void do_6xx_tlb (target_ulong new_EPN
, int is_code
)
2721 target_ulong RPN
, CMP
, EPN
;
2724 RPN
= env
->spr
[SPR_RPA
];
2726 CMP
= env
->spr
[SPR_ICMP
];
2727 EPN
= env
->spr
[SPR_IMISS
];
2729 CMP
= env
->spr
[SPR_DCMP
];
2730 EPN
= env
->spr
[SPR_DMISS
];
2732 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
2733 #if defined (DEBUG_SOFTWARE_TLB)
2734 if (loglevel
!= 0) {
2735 fprintf(logfile
, "%s: EPN " TDX
" " ADDRX
" PTE0 " ADDRX
2736 " PTE1 " ADDRX
" way %d\n",
2737 __func__
, T0
, EPN
, CMP
, RPN
, way
);
2740 /* Store this TLB */
2741 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2742 way
, is_code
, CMP
, RPN
);
2745 void helper_6xx_tlbd (target_ulong EPN
)
2750 void helper_6xx_tlbi (target_ulong EPN
)
2755 /* PowerPC 74xx software TLB load instructions helpers */
2756 static void do_74xx_tlb (target_ulong new_EPN
, int is_code
)
2758 target_ulong RPN
, CMP
, EPN
;
2761 RPN
= env
->spr
[SPR_PTELO
];
2762 CMP
= env
->spr
[SPR_PTEHI
];
2763 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
2764 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
2765 #if defined (DEBUG_SOFTWARE_TLB)
2766 if (loglevel
!= 0) {
2767 fprintf(logfile
, "%s: EPN " TDX
" " ADDRX
" PTE0 " ADDRX
2768 " PTE1 " ADDRX
" way %d\n",
2769 __func__
, T0
, EPN
, CMP
, RPN
, way
);
2772 /* Store this TLB */
2773 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2774 way
, is_code
, CMP
, RPN
);
2777 void helper_74xx_tlbd (target_ulong EPN
)
2779 do_74xx_tlb(EPN
, 0);
2782 void helper_74xx_tlbi (target_ulong EPN
)
2784 do_74xx_tlb(EPN
, 1);
2787 static always_inline target_ulong
booke_tlb_to_page_size (int size
)
2789 return 1024 << (2 * size
);
2792 static always_inline
int booke_page_size_to_tlb (target_ulong page_size
)
2796 switch (page_size
) {
2830 #if defined (TARGET_PPC64)
2831 case 0x000100000000ULL
:
2834 case 0x000400000000ULL
:
2837 case 0x001000000000ULL
:
2840 case 0x004000000000ULL
:
2843 case 0x010000000000ULL
:
2855 /* Helpers for 4xx TLB management */
2856 target_ulong
helper_4xx_tlbre_lo (target_ulong entry
)
2863 tlb
= &env
->tlb
[entry
].tlbe
;
2865 if (tlb
->prot
& PAGE_VALID
)
2867 size
= booke_page_size_to_tlb(tlb
->size
);
2868 if (size
< 0 || size
> 0x7)
2871 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
2875 target_ulong
helper_4xx_tlbre_hi (target_ulong entry
)
2881 tlb
= &env
->tlb
[entry
].tlbe
;
2883 if (tlb
->prot
& PAGE_EXEC
)
2885 if (tlb
->prot
& PAGE_WRITE
)
2890 void helper_4xx_tlbwe_hi (target_ulong entry
, target_ulong val
)
2893 target_ulong page
, end
;
2895 #if defined (DEBUG_SOFTWARE_TLB)
2896 if (loglevel
!= 0) {
2897 fprintf(logfile
, "%s entry " TDX
" val " TDX
"\n", __func__
, entry
, val
);
2901 tlb
= &env
->tlb
[entry
].tlbe
;
2902 /* Invalidate previous TLB (if it's valid) */
2903 if (tlb
->prot
& PAGE_VALID
) {
2904 end
= tlb
->EPN
+ tlb
->size
;
2905 #if defined (DEBUG_SOFTWARE_TLB)
2906 if (loglevel
!= 0) {
2907 fprintf(logfile
, "%s: invalidate old TLB %d start " ADDRX
2908 " end " ADDRX
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2911 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
2912 tlb_flush_page(env
, page
);
2914 tlb
->size
= booke_tlb_to_page_size((val
>> 7) & 0x7);
2915 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2916 * If this ever occurs, one should use the ppcemb target instead
2917 * of the ppc or ppc64 one
2919 if ((val
& 0x40) && tlb
->size
< TARGET_PAGE_SIZE
) {
2920 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
2921 "are not supported (%d)\n",
2922 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
2924 tlb
->EPN
= val
& ~(tlb
->size
- 1);
2926 tlb
->prot
|= PAGE_VALID
;
2928 tlb
->prot
&= ~PAGE_VALID
;
2930 /* XXX: TO BE FIXED */
2931 cpu_abort(env
, "Little-endian TLB entries are not supported by now\n");
2933 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
2934 tlb
->attr
= val
& 0xFF;
2935 #if defined (DEBUG_SOFTWARE_TLB)
2936 if (loglevel
!= 0) {
2937 fprintf(logfile
, "%s: set up TLB %d RPN " PADDRX
" EPN " ADDRX
2938 " size " ADDRX
" prot %c%c%c%c PID %d\n", __func__
,
2939 (int)T0
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2940 tlb
->prot
& PAGE_READ
? 'r' : '-',
2941 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2942 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2943 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2946 /* Invalidate new TLB (if valid) */
2947 if (tlb
->prot
& PAGE_VALID
) {
2948 end
= tlb
->EPN
+ tlb
->size
;
2949 #if defined (DEBUG_SOFTWARE_TLB)
2950 if (loglevel
!= 0) {
2951 fprintf(logfile
, "%s: invalidate TLB %d start " ADDRX
2952 " end " ADDRX
"\n", __func__
, (int)T0
, tlb
->EPN
, end
);
2955 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
2956 tlb_flush_page(env
, page
);
2960 void helper_4xx_tlbwe_lo (target_ulong entry
, target_ulong val
)
2964 #if defined (DEBUG_SOFTWARE_TLB)
2965 if (loglevel
!= 0) {
2966 fprintf(logfile
, "%s entry " TDX
" val " TDX
"\n", __func__
, entry
, val
);
2970 tlb
= &env
->tlb
[entry
].tlbe
;
2971 tlb
->RPN
= val
& 0xFFFFFC00;
2972 tlb
->prot
= PAGE_READ
;
2974 tlb
->prot
|= PAGE_EXEC
;
2976 tlb
->prot
|= PAGE_WRITE
;
2977 #if defined (DEBUG_SOFTWARE_TLB)
2978 if (loglevel
!= 0) {
2979 fprintf(logfile
, "%s: set up TLB %d RPN " PADDRX
" EPN " ADDRX
2980 " size " ADDRX
" prot %c%c%c%c PID %d\n", __func__
,
2981 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2982 tlb
->prot
& PAGE_READ
? 'r' : '-',
2983 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2984 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2985 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2990 target_ulong
helper_4xx_tlbsx (target_ulong address
)
2992 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
2995 /* PowerPC 440 TLB management */
2996 void helper_440_tlbwe (uint32_t word
, target_ulong entry
, target_ulong value
)
2999 target_ulong EPN
, RPN
, size
;
3002 #if defined (DEBUG_SOFTWARE_TLB)
3003 if (loglevel
!= 0) {
3004 fprintf(logfile
, "%s word %d entry " TDX
" value " TDX
"\n",
3005 __func__
, word
, entry
, value
);
3010 tlb
= &env
->tlb
[entry
].tlbe
;
3013 /* Just here to please gcc */
3015 EPN
= value
& 0xFFFFFC00;
3016 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
)
3019 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
3020 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
)
3024 tlb
->attr
|= (value
>> 8) & 1;
3025 if (value
& 0x200) {
3026 tlb
->prot
|= PAGE_VALID
;
3028 if (tlb
->prot
& PAGE_VALID
) {
3029 tlb
->prot
&= ~PAGE_VALID
;
3033 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
3038 RPN
= value
& 0xFFFFFC0F;
3039 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
)
3044 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
3045 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
3047 tlb
->prot
|= PAGE_READ
<< 4;
3049 tlb
->prot
|= PAGE_WRITE
<< 4;
3051 tlb
->prot
|= PAGE_EXEC
<< 4;
3053 tlb
->prot
|= PAGE_READ
;
3055 tlb
->prot
|= PAGE_WRITE
;
3057 tlb
->prot
|= PAGE_EXEC
;
3062 target_ulong
helper_440_tlbre (uint32_t word
, target_ulong entry
)
3069 tlb
= &env
->tlb
[entry
].tlbe
;
3072 /* Just here to please gcc */
3075 size
= booke_page_size_to_tlb(tlb
->size
);
3076 if (size
< 0 || size
> 0xF)
3079 if (tlb
->attr
& 0x1)
3081 if (tlb
->prot
& PAGE_VALID
)
3083 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
3084 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
3090 ret
= tlb
->attr
& ~0x1;
3091 if (tlb
->prot
& (PAGE_READ
<< 4))
3093 if (tlb
->prot
& (PAGE_WRITE
<< 4))
3095 if (tlb
->prot
& (PAGE_EXEC
<< 4))
3097 if (tlb
->prot
& PAGE_READ
)
3099 if (tlb
->prot
& PAGE_WRITE
)
3101 if (tlb
->prot
& PAGE_EXEC
)
3108 target_ulong
helper_440_tlbsx (target_ulong address
)
3110 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
3113 #endif /* !CONFIG_USER_ONLY */