2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #include "host-utils.h"
25 #include "helper_regs.h"
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
31 #ifdef DEBUG_SOFTWARE_TLB
32 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
34 # define LOG_SWTLB(...) do { } while (0)
38 /*****************************************************************************/
39 /* Exceptions processing helpers */
41 void helper_raise_exception_err (uint32_t exception
, uint32_t error_code
)
44 printf("Raise exception %3x code : %d\n", exception
, error_code
);
46 env
->exception_index
= exception
;
47 env
->error_code
= error_code
;
51 void helper_raise_exception (uint32_t exception
)
53 helper_raise_exception_err(exception
, 0);
56 /*****************************************************************************/
58 void helper_load_dump_spr (uint32_t sprn
)
60 qemu_log("Read SPR %d %03x => " ADDRX
"\n",
61 sprn
, sprn
, env
->spr
[sprn
]);
64 void helper_store_dump_spr (uint32_t sprn
)
66 qemu_log("Write SPR %d %03x <= " ADDRX
"\n",
67 sprn
, sprn
, env
->spr
[sprn
]);
70 target_ulong
helper_load_tbl (void)
72 return cpu_ppc_load_tbl(env
);
75 target_ulong
helper_load_tbu (void)
77 return cpu_ppc_load_tbu(env
);
80 target_ulong
helper_load_atbl (void)
82 return cpu_ppc_load_atbl(env
);
85 target_ulong
helper_load_atbu (void)
87 return cpu_ppc_load_atbu(env
);
90 target_ulong
helper_load_601_rtcl (void)
92 return cpu_ppc601_load_rtcl(env
);
95 target_ulong
helper_load_601_rtcu (void)
97 return cpu_ppc601_load_rtcu(env
);
100 #if !defined(CONFIG_USER_ONLY)
101 #if defined (TARGET_PPC64)
102 void helper_store_asr (target_ulong val
)
104 ppc_store_asr(env
, val
);
108 void helper_store_sdr1 (target_ulong val
)
110 ppc_store_sdr1(env
, val
);
113 void helper_store_tbl (target_ulong val
)
115 cpu_ppc_store_tbl(env
, val
);
118 void helper_store_tbu (target_ulong val
)
120 cpu_ppc_store_tbu(env
, val
);
123 void helper_store_atbl (target_ulong val
)
125 cpu_ppc_store_atbl(env
, val
);
128 void helper_store_atbu (target_ulong val
)
130 cpu_ppc_store_atbu(env
, val
);
133 void helper_store_601_rtcl (target_ulong val
)
135 cpu_ppc601_store_rtcl(env
, val
);
138 void helper_store_601_rtcu (target_ulong val
)
140 cpu_ppc601_store_rtcu(env
, val
);
143 target_ulong
helper_load_decr (void)
145 return cpu_ppc_load_decr(env
);
148 void helper_store_decr (target_ulong val
)
150 cpu_ppc_store_decr(env
, val
);
153 void helper_store_hid0_601 (target_ulong val
)
157 hid0
= env
->spr
[SPR_HID0
];
158 if ((val
^ hid0
) & 0x00000008) {
159 /* Change current endianness */
160 env
->hflags
&= ~(1 << MSR_LE
);
161 env
->hflags_nmsr
&= ~(1 << MSR_LE
);
162 env
->hflags_nmsr
|= (1 << MSR_LE
) & (((val
>> 3) & 1) << MSR_LE
);
163 env
->hflags
|= env
->hflags_nmsr
;
164 qemu_log("%s: set endianness to %c => " ADDRX
"\n",
165 __func__
, val
& 0x8 ? 'l' : 'b', env
->hflags
);
167 env
->spr
[SPR_HID0
] = (uint32_t)val
;
170 void helper_store_403_pbr (uint32_t num
, target_ulong value
)
172 if (likely(env
->pb
[num
] != value
)) {
173 env
->pb
[num
] = value
;
174 /* Should be optimized */
179 target_ulong
helper_load_40x_pit (void)
181 return load_40x_pit(env
);
184 void helper_store_40x_pit (target_ulong val
)
186 store_40x_pit(env
, val
);
189 void helper_store_40x_dbcr0 (target_ulong val
)
191 store_40x_dbcr0(env
, val
);
194 void helper_store_40x_sler (target_ulong val
)
196 store_40x_sler(env
, val
);
199 void helper_store_booke_tcr (target_ulong val
)
201 store_booke_tcr(env
, val
);
204 void helper_store_booke_tsr (target_ulong val
)
206 store_booke_tsr(env
, val
);
209 void helper_store_ibatu (uint32_t nr
, target_ulong val
)
211 ppc_store_ibatu(env
, nr
, val
);
214 void helper_store_ibatl (uint32_t nr
, target_ulong val
)
216 ppc_store_ibatl(env
, nr
, val
);
219 void helper_store_dbatu (uint32_t nr
, target_ulong val
)
221 ppc_store_dbatu(env
, nr
, val
);
224 void helper_store_dbatl (uint32_t nr
, target_ulong val
)
226 ppc_store_dbatl(env
, nr
, val
);
229 void helper_store_601_batl (uint32_t nr
, target_ulong val
)
231 ppc_store_ibatl_601(env
, nr
, val
);
234 void helper_store_601_batu (uint32_t nr
, target_ulong val
)
236 ppc_store_ibatu_601(env
, nr
, val
);
240 /*****************************************************************************/
241 /* Memory load and stores */
243 static always_inline target_ulong
addr_add(target_ulong addr
, target_long arg
)
245 #if defined(TARGET_PPC64)
247 return (uint32_t)(addr
+ arg
);
253 void helper_lmw (target_ulong addr
, uint32_t reg
)
255 for (; reg
< 32; reg
++) {
257 env
->gpr
[reg
] = bswap32(ldl(addr
));
259 env
->gpr
[reg
] = ldl(addr
);
260 addr
= addr_add(addr
, 4);
264 void helper_stmw (target_ulong addr
, uint32_t reg
)
266 for (; reg
< 32; reg
++) {
268 stl(addr
, bswap32((uint32_t)env
->gpr
[reg
]));
270 stl(addr
, (uint32_t)env
->gpr
[reg
]);
271 addr
= addr_add(addr
, 4);
275 void helper_lsw(target_ulong addr
, uint32_t nb
, uint32_t reg
)
278 for (; nb
> 3; nb
-= 4) {
279 env
->gpr
[reg
] = ldl(addr
);
280 reg
= (reg
+ 1) % 32;
281 addr
= addr_add(addr
, 4);
283 if (unlikely(nb
> 0)) {
285 for (sh
= 24; nb
> 0; nb
--, sh
-= 8) {
286 env
->gpr
[reg
] |= ldub(addr
) << sh
;
287 addr
= addr_add(addr
, 1);
291 /* PPC32 specification says we must generate an exception if
292 * rA is in the range of registers to be loaded.
293 * In an other hand, IBM says this is valid, but rA won't be loaded.
294 * For now, I'll follow the spec...
296 void helper_lswx(target_ulong addr
, uint32_t reg
, uint32_t ra
, uint32_t rb
)
298 if (likely(xer_bc
!= 0)) {
299 if (unlikely((ra
!= 0 && reg
< ra
&& (reg
+ xer_bc
) > ra
) ||
300 (reg
< rb
&& (reg
+ xer_bc
) > rb
))) {
301 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
303 POWERPC_EXCP_INVAL_LSWX
);
305 helper_lsw(addr
, xer_bc
, reg
);
310 void helper_stsw(target_ulong addr
, uint32_t nb
, uint32_t reg
)
313 for (; nb
> 3; nb
-= 4) {
314 stl(addr
, env
->gpr
[reg
]);
315 reg
= (reg
+ 1) % 32;
316 addr
= addr_add(addr
, 4);
318 if (unlikely(nb
> 0)) {
319 for (sh
= 24; nb
> 0; nb
--, sh
-= 8) {
320 stb(addr
, (env
->gpr
[reg
] >> sh
) & 0xFF);
321 addr
= addr_add(addr
, 1);
326 static void do_dcbz(target_ulong addr
, int dcache_line_size
)
328 addr
&= ~(dcache_line_size
- 1);
330 for (i
= 0 ; i
< dcache_line_size
; i
+= 4) {
333 if (env
->reserve
== addr
)
334 env
->reserve
= (target_ulong
)-1ULL;
337 void helper_dcbz(target_ulong addr
)
339 do_dcbz(addr
, env
->dcache_line_size
);
342 void helper_dcbz_970(target_ulong addr
)
344 if (((env
->spr
[SPR_970_HID5
] >> 7) & 0x3) == 1)
347 do_dcbz(addr
, env
->dcache_line_size
);
350 void helper_icbi(target_ulong addr
)
354 addr
&= ~(env
->dcache_line_size
- 1);
355 /* Invalidate one cache line :
356 * PowerPC specification says this is to be treated like a load
357 * (not a fetch) by the MMU. To be sure it will be so,
358 * do the load "by hand".
361 tb_invalidate_page_range(addr
, addr
+ env
->icache_line_size
);
365 target_ulong
helper_lscbx (target_ulong addr
, uint32_t reg
, uint32_t ra
, uint32_t rb
)
369 for (i
= 0; i
< xer_bc
; i
++) {
371 addr
= addr_add(addr
, 1);
372 /* ra (if not 0) and rb are never modified */
373 if (likely(reg
!= rb
&& (ra
== 0 || reg
!= ra
))) {
374 env
->gpr
[reg
] = (env
->gpr
[reg
] & ~(0xFF << d
)) | (c
<< d
);
376 if (unlikely(c
== xer_cmp
))
378 if (likely(d
!= 0)) {
389 /*****************************************************************************/
390 /* Fixed point operations helpers */
391 #if defined(TARGET_PPC64)
393 /* multiply high word */
394 uint64_t helper_mulhd (uint64_t arg1
, uint64_t arg2
)
398 muls64(&tl
, &th
, arg1
, arg2
);
402 /* multiply high word unsigned */
403 uint64_t helper_mulhdu (uint64_t arg1
, uint64_t arg2
)
407 mulu64(&tl
, &th
, arg1
, arg2
);
411 uint64_t helper_mulldo (uint64_t arg1
, uint64_t arg2
)
416 muls64(&tl
, (uint64_t *)&th
, arg1
, arg2
);
417 /* If th != 0 && th != -1, then we had an overflow */
418 if (likely((uint64_t)(th
+ 1) <= 1)) {
419 env
->xer
&= ~(1 << XER_OV
);
421 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
427 target_ulong
helper_cntlzw (target_ulong t
)
432 #if defined(TARGET_PPC64)
433 target_ulong
helper_cntlzd (target_ulong t
)
439 /* shift right arithmetic helper */
440 target_ulong
helper_sraw (target_ulong value
, target_ulong shift
)
444 if (likely(!(shift
& 0x20))) {
445 if (likely((uint32_t)shift
!= 0)) {
447 ret
= (int32_t)value
>> shift
;
448 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
449 env
->xer
&= ~(1 << XER_CA
);
451 env
->xer
|= (1 << XER_CA
);
454 ret
= (int32_t)value
;
455 env
->xer
&= ~(1 << XER_CA
);
458 ret
= (int32_t)value
>> 31;
460 env
->xer
|= (1 << XER_CA
);
462 env
->xer
&= ~(1 << XER_CA
);
465 return (target_long
)ret
;
468 #if defined(TARGET_PPC64)
469 target_ulong
helper_srad (target_ulong value
, target_ulong shift
)
473 if (likely(!(shift
& 0x40))) {
474 if (likely((uint64_t)shift
!= 0)) {
476 ret
= (int64_t)value
>> shift
;
477 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
478 env
->xer
&= ~(1 << XER_CA
);
480 env
->xer
|= (1 << XER_CA
);
483 ret
= (int64_t)value
;
484 env
->xer
&= ~(1 << XER_CA
);
487 ret
= (int64_t)value
>> 63;
489 env
->xer
|= (1 << XER_CA
);
491 env
->xer
&= ~(1 << XER_CA
);
498 target_ulong
helper_popcntb (target_ulong val
)
500 val
= (val
& 0x55555555) + ((val
>> 1) & 0x55555555);
501 val
= (val
& 0x33333333) + ((val
>> 2) & 0x33333333);
502 val
= (val
& 0x0f0f0f0f) + ((val
>> 4) & 0x0f0f0f0f);
506 #if defined(TARGET_PPC64)
507 target_ulong
helper_popcntb_64 (target_ulong val
)
509 val
= (val
& 0x5555555555555555ULL
) + ((val
>> 1) & 0x5555555555555555ULL
);
510 val
= (val
& 0x3333333333333333ULL
) + ((val
>> 2) & 0x3333333333333333ULL
);
511 val
= (val
& 0x0f0f0f0f0f0f0f0fULL
) + ((val
>> 4) & 0x0f0f0f0f0f0f0f0fULL
);
516 /*****************************************************************************/
517 /* Floating point operations helpers */
518 uint64_t helper_float32_to_float64(uint32_t arg
)
523 d
.d
= float32_to_float64(f
.f
, &env
->fp_status
);
527 uint32_t helper_float64_to_float32(uint64_t arg
)
532 f
.f
= float64_to_float32(d
.d
, &env
->fp_status
);
536 static always_inline
int isden (float64 d
)
542 return ((u
.ll
>> 52) & 0x7FF) == 0;
545 uint32_t helper_compute_fprf (uint64_t arg
, uint32_t set_fprf
)
551 isneg
= float64_is_neg(farg
.d
);
552 if (unlikely(float64_is_nan(farg
.d
))) {
553 if (float64_is_signaling_nan(farg
.d
)) {
554 /* Signaling NaN: flags are undefined */
560 } else if (unlikely(float64_is_infinity(farg
.d
))) {
567 if (float64_is_zero(farg
.d
)) {
575 /* Denormalized numbers */
578 /* Normalized numbers */
589 /* We update FPSCR_FPRF */
590 env
->fpscr
&= ~(0x1F << FPSCR_FPRF
);
591 env
->fpscr
|= ret
<< FPSCR_FPRF
;
593 /* We just need fpcc to update Rc1 */
597 /* Floating-point invalid operations exception */
598 static always_inline
uint64_t fload_invalid_op_excp (int op
)
605 case POWERPC_EXCP_FP_VXSNAN
:
606 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
608 case POWERPC_EXCP_FP_VXSOFT
:
609 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
611 case POWERPC_EXCP_FP_VXISI
:
612 /* Magnitude subtraction of infinities */
613 env
->fpscr
|= 1 << FPSCR_VXISI
;
615 case POWERPC_EXCP_FP_VXIDI
:
616 /* Division of infinity by infinity */
617 env
->fpscr
|= 1 << FPSCR_VXIDI
;
619 case POWERPC_EXCP_FP_VXZDZ
:
620 /* Division of zero by zero */
621 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
623 case POWERPC_EXCP_FP_VXIMZ
:
624 /* Multiplication of zero by infinity */
625 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
627 case POWERPC_EXCP_FP_VXVC
:
628 /* Ordered comparison of NaN */
629 env
->fpscr
|= 1 << FPSCR_VXVC
;
630 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
631 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
632 /* We must update the target FPR before raising the exception */
634 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
635 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
636 /* Update the floating-point enabled exception summary */
637 env
->fpscr
|= 1 << FPSCR_FEX
;
638 /* Exception is differed */
642 case POWERPC_EXCP_FP_VXSQRT
:
643 /* Square root of a negative number */
644 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
646 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
648 /* Set the result to quiet NaN */
649 ret
= 0xFFF8000000000000ULL
;
650 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
651 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
654 case POWERPC_EXCP_FP_VXCVI
:
655 /* Invalid conversion */
656 env
->fpscr
|= 1 << FPSCR_VXCVI
;
657 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
659 /* Set the result to quiet NaN */
660 ret
= 0xFFF8000000000000ULL
;
661 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
662 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
666 /* Update the floating-point invalid operation summary */
667 env
->fpscr
|= 1 << FPSCR_VX
;
668 /* Update the floating-point exception summary */
669 env
->fpscr
|= 1 << FPSCR_FX
;
671 /* Update the floating-point enabled exception summary */
672 env
->fpscr
|= 1 << FPSCR_FEX
;
673 if (msr_fe0
!= 0 || msr_fe1
!= 0)
674 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_FP
| op
);
679 static always_inline
void float_zero_divide_excp (void)
681 env
->fpscr
|= 1 << FPSCR_ZX
;
682 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
683 /* Update the floating-point exception summary */
684 env
->fpscr
|= 1 << FPSCR_FX
;
686 /* Update the floating-point enabled exception summary */
687 env
->fpscr
|= 1 << FPSCR_FEX
;
688 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
689 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
690 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
);
695 static always_inline
void float_overflow_excp (void)
697 env
->fpscr
|= 1 << FPSCR_OX
;
698 /* Update the floating-point exception summary */
699 env
->fpscr
|= 1 << FPSCR_FX
;
701 /* XXX: should adjust the result */
702 /* Update the floating-point enabled exception summary */
703 env
->fpscr
|= 1 << FPSCR_FEX
;
704 /* We must update the target FPR before raising the exception */
705 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
706 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
708 env
->fpscr
|= 1 << FPSCR_XX
;
709 env
->fpscr
|= 1 << FPSCR_FI
;
713 static always_inline
void float_underflow_excp (void)
715 env
->fpscr
|= 1 << FPSCR_UX
;
716 /* Update the floating-point exception summary */
717 env
->fpscr
|= 1 << FPSCR_FX
;
719 /* XXX: should adjust the result */
720 /* Update the floating-point enabled exception summary */
721 env
->fpscr
|= 1 << FPSCR_FEX
;
722 /* We must update the target FPR before raising the exception */
723 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
724 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
728 static always_inline
void float_inexact_excp (void)
730 env
->fpscr
|= 1 << FPSCR_XX
;
731 /* Update the floating-point exception summary */
732 env
->fpscr
|= 1 << FPSCR_FX
;
734 /* Update the floating-point enabled exception summary */
735 env
->fpscr
|= 1 << FPSCR_FEX
;
736 /* We must update the target FPR before raising the exception */
737 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
738 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
742 static always_inline
void fpscr_set_rounding_mode (void)
746 /* Set rounding mode */
749 /* Best approximation (round to nearest) */
750 rnd_type
= float_round_nearest_even
;
753 /* Smaller magnitude (round toward zero) */
754 rnd_type
= float_round_to_zero
;
757 /* Round toward +infinite */
758 rnd_type
= float_round_up
;
762 /* Round toward -infinite */
763 rnd_type
= float_round_down
;
766 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
769 void helper_fpscr_clrbit (uint32_t bit
)
773 prev
= (env
->fpscr
>> bit
) & 1;
774 env
->fpscr
&= ~(1 << bit
);
779 fpscr_set_rounding_mode();
787 void helper_fpscr_setbit (uint32_t bit
)
791 prev
= (env
->fpscr
>> bit
) & 1;
792 env
->fpscr
|= 1 << bit
;
796 env
->fpscr
|= 1 << FPSCR_FX
;
800 env
->fpscr
|= 1 << FPSCR_FX
;
805 env
->fpscr
|= 1 << FPSCR_FX
;
810 env
->fpscr
|= 1 << FPSCR_FX
;
815 env
->fpscr
|= 1 << FPSCR_FX
;
828 env
->fpscr
|= 1 << FPSCR_VX
;
829 env
->fpscr
|= 1 << FPSCR_FX
;
836 env
->error_code
= POWERPC_EXCP_FP
;
838 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
840 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
842 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
844 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
846 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
848 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
850 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
852 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
854 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
861 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
868 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
875 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
882 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
888 fpscr_set_rounding_mode();
893 /* Update the floating-point enabled exception summary */
894 env
->fpscr
|= 1 << FPSCR_FEX
;
895 /* We have to update Rc1 before raising the exception */
896 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
902 void helper_store_fpscr (uint64_t arg
, uint32_t mask
)
905 * We use only the 32 LSB of the incoming fpr
913 new |= prev
& 0x60000000;
914 for (i
= 0; i
< 8; i
++) {
915 if (mask
& (1 << i
)) {
916 env
->fpscr
&= ~(0xF << (4 * i
));
917 env
->fpscr
|= new & (0xF << (4 * i
));
920 /* Update VX and FEX */
922 env
->fpscr
|= 1 << FPSCR_VX
;
924 env
->fpscr
&= ~(1 << FPSCR_VX
);
925 if ((fpscr_ex
& fpscr_eex
) != 0) {
926 env
->fpscr
|= 1 << FPSCR_FEX
;
927 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
928 /* XXX: we should compute it properly */
929 env
->error_code
= POWERPC_EXCP_FP
;
932 env
->fpscr
&= ~(1 << FPSCR_FEX
);
933 fpscr_set_rounding_mode();
936 void helper_float_check_status (void)
938 #ifdef CONFIG_SOFTFLOAT
939 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
940 (env
->error_code
& POWERPC_EXCP_FP
)) {
941 /* Differred floating-point exception after target FPR update */
942 if (msr_fe0
!= 0 || msr_fe1
!= 0)
943 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
945 int status
= get_float_exception_flags(&env
->fp_status
);
946 if (status
& float_flag_divbyzero
) {
947 float_zero_divide_excp();
948 } else if (status
& float_flag_overflow
) {
949 float_overflow_excp();
950 } else if (status
& float_flag_underflow
) {
951 float_underflow_excp();
952 } else if (status
& float_flag_inexact
) {
953 float_inexact_excp();
957 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
958 (env
->error_code
& POWERPC_EXCP_FP
)) {
959 /* Differred floating-point exception after target FPR update */
960 if (msr_fe0
!= 0 || msr_fe1
!= 0)
961 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
966 #ifdef CONFIG_SOFTFLOAT
967 void helper_reset_fpstatus (void)
969 set_float_exception_flags(0, &env
->fp_status
);
974 uint64_t helper_fadd (uint64_t arg1
, uint64_t arg2
)
976 CPU_DoubleU farg1
, farg2
;
980 #if USE_PRECISE_EMULATION
981 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
982 float64_is_signaling_nan(farg2
.d
))) {
984 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
985 } else if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
986 float64_is_neg(farg1
.d
) != float64_is_neg(farg2
.d
))) {
987 /* Magnitude subtraction of infinities */
988 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
990 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
993 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
999 uint64_t helper_fsub (uint64_t arg1
, uint64_t arg2
)
1001 CPU_DoubleU farg1
, farg2
;
1005 #if USE_PRECISE_EMULATION
1007 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1008 float64_is_signaling_nan(farg2
.d
))) {
1009 /* sNaN subtraction */
1010 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1011 } else if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
1012 float64_is_neg(farg1
.d
) == float64_is_neg(farg2
.d
))) {
1013 /* Magnitude subtraction of infinities */
1014 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1016 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
1020 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
1026 uint64_t helper_fmul (uint64_t arg1
, uint64_t arg2
)
1028 CPU_DoubleU farg1
, farg2
;
1032 #if USE_PRECISE_EMULATION
1033 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1034 float64_is_signaling_nan(farg2
.d
))) {
1035 /* sNaN multiplication */
1036 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1037 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1038 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1039 /* Multiplication of zero by infinity */
1040 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1042 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1045 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1051 uint64_t helper_fdiv (uint64_t arg1
, uint64_t arg2
)
1053 CPU_DoubleU farg1
, farg2
;
1057 #if USE_PRECISE_EMULATION
1058 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1059 float64_is_signaling_nan(farg2
.d
))) {
1061 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1062 } else if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
))) {
1063 /* Division of infinity by infinity */
1064 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI
);
1065 } else if (unlikely(float64_is_zero(farg1
.d
) && float64_is_zero(farg2
.d
))) {
1066 /* Division of zero by zero */
1067 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ
);
1069 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
1072 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
1078 uint64_t helper_fabs (uint64_t arg
)
1083 farg
.d
= float64_abs(farg
.d
);
1088 uint64_t helper_fnabs (uint64_t arg
)
1093 farg
.d
= float64_abs(farg
.d
);
1094 farg
.d
= float64_chs(farg
.d
);
1099 uint64_t helper_fneg (uint64_t arg
)
1104 farg
.d
= float64_chs(farg
.d
);
1108 /* fctiw - fctiw. */
1109 uint64_t helper_fctiw (uint64_t arg
)
1114 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1115 /* sNaN conversion */
1116 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1117 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1118 /* qNan / infinity conversion */
1119 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1121 farg
.ll
= float64_to_int32(farg
.d
, &env
->fp_status
);
1122 #if USE_PRECISE_EMULATION
1123 /* XXX: higher bits are not supposed to be significant.
1124 * to make tests easier, return the same as a real PowerPC 750
1126 farg
.ll
|= 0xFFF80000ULL
<< 32;
1132 /* fctiwz - fctiwz. */
1133 uint64_t helper_fctiwz (uint64_t arg
)
1138 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1139 /* sNaN conversion */
1140 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1141 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1142 /* qNan / infinity conversion */
1143 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1145 farg
.ll
= float64_to_int32_round_to_zero(farg
.d
, &env
->fp_status
);
1146 #if USE_PRECISE_EMULATION
1147 /* XXX: higher bits are not supposed to be significant.
1148 * to make tests easier, return the same as a real PowerPC 750
1150 farg
.ll
|= 0xFFF80000ULL
<< 32;
1156 #if defined(TARGET_PPC64)
1157 /* fcfid - fcfid. */
1158 uint64_t helper_fcfid (uint64_t arg
)
1161 farg
.d
= int64_to_float64(arg
, &env
->fp_status
);
1165 /* fctid - fctid. */
1166 uint64_t helper_fctid (uint64_t arg
)
1171 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1172 /* sNaN conversion */
1173 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1174 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1175 /* qNan / infinity conversion */
1176 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1178 farg
.ll
= float64_to_int64(farg
.d
, &env
->fp_status
);
1183 /* fctidz - fctidz. */
1184 uint64_t helper_fctidz (uint64_t arg
)
1189 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1190 /* sNaN conversion */
1191 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1192 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1193 /* qNan / infinity conversion */
1194 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1196 farg
.ll
= float64_to_int64_round_to_zero(farg
.d
, &env
->fp_status
);
1203 static always_inline
uint64_t do_fri (uint64_t arg
, int rounding_mode
)
1208 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1210 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1211 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1212 /* qNan / infinity round */
1213 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1215 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
1216 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
1217 /* Restore rounding mode from FPSCR */
1218 fpscr_set_rounding_mode();
1223 uint64_t helper_frin (uint64_t arg
)
1225 return do_fri(arg
, float_round_nearest_even
);
1228 uint64_t helper_friz (uint64_t arg
)
1230 return do_fri(arg
, float_round_to_zero
);
1233 uint64_t helper_frip (uint64_t arg
)
1235 return do_fri(arg
, float_round_up
);
1238 uint64_t helper_frim (uint64_t arg
)
1240 return do_fri(arg
, float_round_down
);
1243 /* fmadd - fmadd. */
1244 uint64_t helper_fmadd (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1246 CPU_DoubleU farg1
, farg2
, farg3
;
1251 #if USE_PRECISE_EMULATION
1252 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1253 float64_is_signaling_nan(farg2
.d
) ||
1254 float64_is_signaling_nan(farg3
.d
))) {
1255 /* sNaN operation */
1256 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1257 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1258 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1259 /* Multiplication of zero by infinity */
1260 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1263 /* This is the way the PowerPC specification defines it */
1264 float128 ft0_128
, ft1_128
;
1266 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1267 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1268 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1269 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1270 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
1271 /* Magnitude subtraction of infinities */
1272 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1274 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1275 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
1276 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1279 /* This is OK on x86 hosts */
1280 farg1
.d
= (farg1
.d
* farg2
.d
) + farg3
.d
;
1284 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1285 farg1
.d
= float64_add(farg1
.d
, farg3
.d
, &env
->fp_status
);
1290 /* fmsub - fmsub. */
1291 uint64_t helper_fmsub (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1293 CPU_DoubleU farg1
, farg2
, farg3
;
1298 #if USE_PRECISE_EMULATION
1299 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1300 float64_is_signaling_nan(farg2
.d
) ||
1301 float64_is_signaling_nan(farg3
.d
))) {
1302 /* sNaN operation */
1303 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1304 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1305 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1306 /* Multiplication of zero by infinity */
1307 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1310 /* This is the way the PowerPC specification defines it */
1311 float128 ft0_128
, ft1_128
;
1313 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1314 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1315 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1316 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1317 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
1318 /* Magnitude subtraction of infinities */
1319 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1321 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1322 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
1323 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1326 /* This is OK on x86 hosts */
1327 farg1
.d
= (farg1
.d
* farg2
.d
) - farg3
.d
;
1331 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1332 farg1
.d
= float64_sub(farg1
.d
, farg3
.d
, &env
->fp_status
);
1337 /* fnmadd - fnmadd. */
1338 uint64_t helper_fnmadd (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1340 CPU_DoubleU farg1
, farg2
, farg3
;
1346 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1347 float64_is_signaling_nan(farg2
.d
) ||
1348 float64_is_signaling_nan(farg3
.d
))) {
1349 /* sNaN operation */
1350 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1351 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1352 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1353 /* Multiplication of zero by infinity */
1354 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1356 #if USE_PRECISE_EMULATION
1358 /* This is the way the PowerPC specification defines it */
1359 float128 ft0_128
, ft1_128
;
1361 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1362 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1363 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1364 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1365 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
1366 /* Magnitude subtraction of infinities */
1367 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1369 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1370 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
1371 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1374 /* This is OK on x86 hosts */
1375 farg1
.d
= (farg1
.d
* farg2
.d
) + farg3
.d
;
1378 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1379 farg1
.d
= float64_add(farg1
.d
, farg3
.d
, &env
->fp_status
);
1381 if (likely(!float64_is_nan(farg1
.d
)))
1382 farg1
.d
= float64_chs(farg1
.d
);
1387 /* fnmsub - fnmsub. */
1388 uint64_t helper_fnmsub (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1390 CPU_DoubleU farg1
, farg2
, farg3
;
1396 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1397 float64_is_signaling_nan(farg2
.d
) ||
1398 float64_is_signaling_nan(farg3
.d
))) {
1399 /* sNaN operation */
1400 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1401 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1402 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1403 /* Multiplication of zero by infinity */
1404 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1406 #if USE_PRECISE_EMULATION
1408 /* This is the way the PowerPC specification defines it */
1409 float128 ft0_128
, ft1_128
;
1411 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1412 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1413 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1414 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1415 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
1416 /* Magnitude subtraction of infinities */
1417 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1419 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1420 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
1421 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1424 /* This is OK on x86 hosts */
1425 farg1
.d
= (farg1
.d
* farg2
.d
) - farg3
.d
;
1428 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1429 farg1
.d
= float64_sub(farg1
.d
, farg3
.d
, &env
->fp_status
);
1431 if (likely(!float64_is_nan(farg1
.d
)))
1432 farg1
.d
= float64_chs(farg1
.d
);
1438 uint64_t helper_frsp (uint64_t arg
)
1444 #if USE_PRECISE_EMULATION
1445 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1446 /* sNaN square root */
1447 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1449 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1450 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1453 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1454 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1459 /* fsqrt - fsqrt. */
1460 uint64_t helper_fsqrt (uint64_t arg
)
1465 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1466 /* sNaN square root */
1467 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1468 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
1469 /* Square root of a negative nonzero number */
1470 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT
);
1472 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1478 uint64_t helper_fre (uint64_t arg
)
1483 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1484 /* sNaN reciprocal */
1485 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1487 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1493 uint64_t helper_fres (uint64_t arg
)
1499 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1500 /* sNaN reciprocal */
1501 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1503 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1504 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1505 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1510 /* frsqrte - frsqrte. */
1511 uint64_t helper_frsqrte (uint64_t arg
)
1517 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1518 /* sNaN reciprocal square root */
1519 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1520 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
1521 /* Reciprocal square root of a negative nonzero number */
1522 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT
);
1524 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1525 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1526 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1527 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1533 uint64_t helper_fsel (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1539 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) && !float64_is_nan(farg1
.d
))
1545 void helper_fcmpu (uint64_t arg1
, uint64_t arg2
, uint32_t crfD
)
1547 CPU_DoubleU farg1
, farg2
;
1552 if (unlikely(float64_is_nan(farg1
.d
) ||
1553 float64_is_nan(farg2
.d
))) {
1555 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1557 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1563 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1564 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1565 env
->crf
[crfD
] = ret
;
1566 if (unlikely(ret
== 0x01UL
1567 && (float64_is_signaling_nan(farg1
.d
) ||
1568 float64_is_signaling_nan(farg2
.d
)))) {
1569 /* sNaN comparison */
1570 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1574 void helper_fcmpo (uint64_t arg1
, uint64_t arg2
, uint32_t crfD
)
1576 CPU_DoubleU farg1
, farg2
;
1581 if (unlikely(float64_is_nan(farg1
.d
) ||
1582 float64_is_nan(farg2
.d
))) {
1584 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1586 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1592 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1593 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1594 env
->crf
[crfD
] = ret
;
1595 if (unlikely (ret
== 0x01UL
)) {
1596 if (float64_is_signaling_nan(farg1
.d
) ||
1597 float64_is_signaling_nan(farg2
.d
)) {
1598 /* sNaN comparison */
1599 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
|
1600 POWERPC_EXCP_FP_VXVC
);
1602 /* qNaN comparison */
1603 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC
);
1608 #if !defined (CONFIG_USER_ONLY)
1609 void helper_store_msr (target_ulong val
)
1611 val
= hreg_store_msr(env
, val
, 0);
1613 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
1614 helper_raise_exception(val
);
1618 static always_inline
void do_rfi (target_ulong nip
, target_ulong msr
,
1619 target_ulong msrm
, int keep_msrh
)
1621 #if defined(TARGET_PPC64)
1622 if (msr
& (1ULL << MSR_SF
)) {
1623 nip
= (uint64_t)nip
;
1624 msr
&= (uint64_t)msrm
;
1626 nip
= (uint32_t)nip
;
1627 msr
= (uint32_t)(msr
& msrm
);
1629 msr
|= env
->msr
& ~((uint64_t)0xFFFFFFFF);
1632 nip
= (uint32_t)nip
;
1633 msr
&= (uint32_t)msrm
;
1635 /* XXX: beware: this is false if VLE is supported */
1636 env
->nip
= nip
& ~((target_ulong
)0x00000003);
1637 hreg_store_msr(env
, msr
, 1);
1638 #if defined (DEBUG_OP)
1639 cpu_dump_rfi(env
->nip
, env
->msr
);
1641 /* No need to raise an exception here,
1642 * as rfi is always the last insn of a TB
1644 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
1647 void helper_rfi (void)
1649 do_rfi(env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
],
1650 ~((target_ulong
)0x0), 1);
1653 #if defined(TARGET_PPC64)
1654 void helper_rfid (void)
1656 do_rfi(env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
],
1657 ~((target_ulong
)0x0), 0);
1660 void helper_hrfid (void)
1662 do_rfi(env
->spr
[SPR_HSRR0
], env
->spr
[SPR_HSRR1
],
1663 ~((target_ulong
)0x0), 0);
1668 void helper_tw (target_ulong arg1
, target_ulong arg2
, uint32_t flags
)
1670 if (!likely(!(((int32_t)arg1
< (int32_t)arg2
&& (flags
& 0x10)) ||
1671 ((int32_t)arg1
> (int32_t)arg2
&& (flags
& 0x08)) ||
1672 ((int32_t)arg1
== (int32_t)arg2
&& (flags
& 0x04)) ||
1673 ((uint32_t)arg1
< (uint32_t)arg2
&& (flags
& 0x02)) ||
1674 ((uint32_t)arg1
> (uint32_t)arg2
&& (flags
& 0x01))))) {
1675 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_TRAP
);
1679 #if defined(TARGET_PPC64)
1680 void helper_td (target_ulong arg1
, target_ulong arg2
, uint32_t flags
)
1682 if (!likely(!(((int64_t)arg1
< (int64_t)arg2
&& (flags
& 0x10)) ||
1683 ((int64_t)arg1
> (int64_t)arg2
&& (flags
& 0x08)) ||
1684 ((int64_t)arg1
== (int64_t)arg2
&& (flags
& 0x04)) ||
1685 ((uint64_t)arg1
< (uint64_t)arg2
&& (flags
& 0x02)) ||
1686 ((uint64_t)arg1
> (uint64_t)arg2
&& (flags
& 0x01)))))
1687 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_TRAP
);
1691 /*****************************************************************************/
1692 /* PowerPC 601 specific instructions (POWER bridge) */
1694 target_ulong
helper_clcs (uint32_t arg
)
1698 /* Instruction cache line size */
1699 return env
->icache_line_size
;
1702 /* Data cache line size */
1703 return env
->dcache_line_size
;
1706 /* Minimum cache line size */
1707 return (env
->icache_line_size
< env
->dcache_line_size
) ?
1708 env
->icache_line_size
: env
->dcache_line_size
;
1711 /* Maximum cache line size */
1712 return (env
->icache_line_size
> env
->dcache_line_size
) ?
1713 env
->icache_line_size
: env
->dcache_line_size
;
1722 target_ulong
helper_div (target_ulong arg1
, target_ulong arg2
)
1724 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
1726 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1727 (int32_t)arg2
== 0) {
1728 env
->spr
[SPR_MQ
] = 0;
1731 env
->spr
[SPR_MQ
] = tmp
% arg2
;
1732 return tmp
/ (int32_t)arg2
;
1736 target_ulong
helper_divo (target_ulong arg1
, target_ulong arg2
)
1738 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
1740 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1741 (int32_t)arg2
== 0) {
1742 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1743 env
->spr
[SPR_MQ
] = 0;
1746 env
->spr
[SPR_MQ
] = tmp
% arg2
;
1747 tmp
/= (int32_t)arg2
;
1748 if ((int32_t)tmp
!= tmp
) {
1749 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1751 env
->xer
&= ~(1 << XER_OV
);
1757 target_ulong
helper_divs (target_ulong arg1
, target_ulong arg2
)
1759 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1760 (int32_t)arg2
== 0) {
1761 env
->spr
[SPR_MQ
] = 0;
1764 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
1765 return (int32_t)arg1
/ (int32_t)arg2
;
1769 target_ulong
helper_divso (target_ulong arg1
, target_ulong arg2
)
1771 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1772 (int32_t)arg2
== 0) {
1773 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1774 env
->spr
[SPR_MQ
] = 0;
1777 env
->xer
&= ~(1 << XER_OV
);
1778 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
1779 return (int32_t)arg1
/ (int32_t)arg2
;
1783 #if !defined (CONFIG_USER_ONLY)
1784 target_ulong
helper_rac (target_ulong addr
)
1788 target_ulong ret
= 0;
1790 /* We don't have to generate many instances of this instruction,
1791 * as rac is supervisor only.
1793 /* XXX: FIX THIS: Pretend we have no BAT */
1794 nb_BATs
= env
->nb_BATs
;
1796 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0)
1798 env
->nb_BATs
= nb_BATs
;
1802 void helper_rfsvc (void)
1804 do_rfi(env
->lr
, env
->ctr
, 0x0000FFFF, 0);
1808 /*****************************************************************************/
1809 /* 602 specific instructions */
1810 /* mfrom is the most crazy instruction ever seen, imho ! */
1811 /* Real implementation uses a ROM table. Do the same */
1812 /* Extremly decomposed:
1814 * return 256 * log10(10 + 1.0) + 0.5
1816 #if !defined (CONFIG_USER_ONLY)
1817 target_ulong
helper_602_mfrom (target_ulong arg
)
1819 if (likely(arg
< 602)) {
1820 #include "mfrom_table.c"
1821 return mfrom_ROM_table
[arg
];
1828 /*****************************************************************************/
1829 /* Embedded PowerPC specific helpers */
1831 /* XXX: to be improved to check access rights when in user-mode */
1832 target_ulong
helper_load_dcr (target_ulong dcrn
)
1834 target_ulong val
= 0;
1836 if (unlikely(env
->dcr_env
== NULL
)) {
1837 qemu_log("No DCR environment\n");
1838 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1839 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1840 } else if (unlikely(ppc_dcr_read(env
->dcr_env
, dcrn
, &val
) != 0)) {
1841 qemu_log("DCR read error %d %03x\n", (int)dcrn
, (int)dcrn
);
1842 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1843 POWERPC_EXCP_INVAL
| POWERPC_EXCP_PRIV_REG
);
1848 void helper_store_dcr (target_ulong dcrn
, target_ulong val
)
1850 if (unlikely(env
->dcr_env
== NULL
)) {
1851 qemu_log("No DCR environment\n");
1852 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1853 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1854 } else if (unlikely(ppc_dcr_write(env
->dcr_env
, dcrn
, val
) != 0)) {
1855 qemu_log("DCR write error %d %03x\n", (int)dcrn
, (int)dcrn
);
1856 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1857 POWERPC_EXCP_INVAL
| POWERPC_EXCP_PRIV_REG
);
1861 #if !defined(CONFIG_USER_ONLY)
1862 void helper_40x_rfci (void)
1864 do_rfi(env
->spr
[SPR_40x_SRR2
], env
->spr
[SPR_40x_SRR3
],
1865 ~((target_ulong
)0xFFFF0000), 0);
1868 void helper_rfci (void)
1870 do_rfi(env
->spr
[SPR_BOOKE_CSRR0
], SPR_BOOKE_CSRR1
,
1871 ~((target_ulong
)0x3FFF0000), 0);
1874 void helper_rfdi (void)
1876 do_rfi(env
->spr
[SPR_BOOKE_DSRR0
], SPR_BOOKE_DSRR1
,
1877 ~((target_ulong
)0x3FFF0000), 0);
1880 void helper_rfmci (void)
1882 do_rfi(env
->spr
[SPR_BOOKE_MCSRR0
], SPR_BOOKE_MCSRR1
,
1883 ~((target_ulong
)0x3FFF0000), 0);
1888 target_ulong
helper_dlmzb (target_ulong high
, target_ulong low
, uint32_t update_Rc
)
1894 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1895 if ((high
& mask
) == 0) {
1903 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1904 if ((low
& mask
) == 0) {
1916 env
->xer
= (env
->xer
& ~0x7F) | i
;
1918 env
->crf
[0] |= xer_so
;
1923 /*****************************************************************************/
1924 /* Altivec extension helpers */
1925 #if defined(WORDS_BIGENDIAN)
1933 #if defined(WORDS_BIGENDIAN)
1934 #define VECTOR_FOR_INORDER_I(index, element) \
1935 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1937 #define VECTOR_FOR_INORDER_I(index, element) \
1938 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1941 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1942 * execute the following block. */
1943 #define DO_HANDLE_NAN(result, x) \
1944 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1947 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1951 #define HANDLE_NAN1(result, x) \
1952 DO_HANDLE_NAN(result, x)
1953 #define HANDLE_NAN2(result, x, y) \
1954 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1955 #define HANDLE_NAN3(result, x, y, z) \
1956 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1958 /* Saturating arithmetic helpers. */
1959 #define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1960 static always_inline to_type cvt##from##to (from_type x, int *sat) \
1963 if (use_min && x < min) { \
1966 } else if (use_max && x > max) { \
1974 SATCVT(sh
, sb
, int16_t, int8_t, INT8_MIN
, INT8_MAX
, 1, 1)
1975 SATCVT(sw
, sh
, int32_t, int16_t, INT16_MIN
, INT16_MAX
, 1, 1)
1976 SATCVT(sd
, sw
, int64_t, int32_t, INT32_MIN
, INT32_MAX
, 1, 1)
1977 SATCVT(uh
, ub
, uint16_t, uint8_t, 0, UINT8_MAX
, 0, 1)
1978 SATCVT(uw
, uh
, uint32_t, uint16_t, 0, UINT16_MAX
, 0, 1)
1979 SATCVT(ud
, uw
, uint64_t, uint32_t, 0, UINT32_MAX
, 0, 1)
1980 SATCVT(sh
, ub
, int16_t, uint8_t, 0, UINT8_MAX
, 1, 1)
1981 SATCVT(sw
, uh
, int32_t, uint16_t, 0, UINT16_MAX
, 1, 1)
1982 SATCVT(sd
, uw
, int64_t, uint32_t, 0, UINT32_MAX
, 1, 1)
1985 #define LVE(name, access, swap, element) \
1986 void helper_##name (ppc_avr_t *r, target_ulong addr) \
1988 size_t n_elems = ARRAY_SIZE(r->element); \
1989 int adjust = HI_IDX*(n_elems-1); \
1990 int sh = sizeof(r->element[0]) >> 1; \
1991 int index = (addr & 0xf) >> sh; \
1993 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
1995 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
1999 LVE(lvebx
, ldub
, I
, u8
)
2000 LVE(lvehx
, lduw
, bswap16
, u16
)
2001 LVE(lvewx
, ldl
, bswap32
, u32
)
2005 void helper_lvsl (ppc_avr_t
*r
, target_ulong sh
)
2007 int i
, j
= (sh
& 0xf);
2009 VECTOR_FOR_INORDER_I (i
, u8
) {
2014 void helper_lvsr (ppc_avr_t
*r
, target_ulong sh
)
2016 int i
, j
= 0x10 - (sh
& 0xf);
2018 VECTOR_FOR_INORDER_I (i
, u8
) {
2023 #define STVE(name, access, swap, element) \
2024 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2026 size_t n_elems = ARRAY_SIZE(r->element); \
2027 int adjust = HI_IDX*(n_elems-1); \
2028 int sh = sizeof(r->element[0]) >> 1; \
2029 int index = (addr & 0xf) >> sh; \
2031 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2033 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2037 STVE(stvebx
, stb
, I
, u8
)
2038 STVE(stvehx
, stw
, bswap16
, u16
)
2039 STVE(stvewx
, stl
, bswap32
, u32
)
2043 void helper_mtvscr (ppc_avr_t
*r
)
2045 #if defined(WORDS_BIGENDIAN)
2046 env
->vscr
= r
->u32
[3];
2048 env
->vscr
= r
->u32
[0];
2050 set_flush_to_zero(vscr_nj
, &env
->vec_status
);
2053 void helper_vaddcuw (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2056 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2057 r
->u32
[i
] = ~a
->u32
[i
] < b
->u32
[i
];
2061 #define VARITH_DO(name, op, element) \
2062 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2065 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2066 r->element[i] = a->element[i] op b->element[i]; \
2069 #define VARITH(suffix, element) \
2070 VARITH_DO(add##suffix, +, element) \
2071 VARITH_DO(sub##suffix, -, element)
2078 #define VARITHFP(suffix, func) \
2079 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2082 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2083 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2084 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2088 VARITHFP(addfp
, float32_add
)
2089 VARITHFP(subfp
, float32_sub
)
2092 #define VARITHSAT_CASE(type, op, cvt, element) \
2094 type result = (type)a->element[i] op (type)b->element[i]; \
2095 r->element[i] = cvt(result, &sat); \
2098 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2099 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2103 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2104 switch (sizeof(r->element[0])) { \
2105 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2106 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2107 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2111 env->vscr |= (1 << VSCR_SAT); \
2114 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2115 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2116 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2117 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2118 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2119 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2120 VARITHSAT_SIGNED(b
, s8
, int16_t, cvtshsb
)
2121 VARITHSAT_SIGNED(h
, s16
, int32_t, cvtswsh
)
2122 VARITHSAT_SIGNED(w
, s32
, int64_t, cvtsdsw
)
2123 VARITHSAT_UNSIGNED(b
, u8
, uint16_t, cvtshub
)
2124 VARITHSAT_UNSIGNED(h
, u16
, uint32_t, cvtswuh
)
2125 VARITHSAT_UNSIGNED(w
, u32
, uint64_t, cvtsduw
)
2126 #undef VARITHSAT_CASE
2128 #undef VARITHSAT_SIGNED
2129 #undef VARITHSAT_UNSIGNED
2131 #define VAVG_DO(name, element, etype) \
2132 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2135 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2136 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2137 r->element[i] = x >> 1; \
2141 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2142 VAVG_DO(avgs##type, signed_element, signed_type) \
2143 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2144 VAVG(b
, s8
, int16_t, u8
, uint16_t)
2145 VAVG(h
, s16
, int32_t, u16
, uint32_t)
2146 VAVG(w
, s32
, int64_t, u32
, uint64_t)
2150 #define VCF(suffix, cvt, element) \
2151 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2154 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2155 float32 t = cvt(b->element[i], &env->vec_status); \
2156 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2159 VCF(ux
, uint32_to_float32
, u32
)
2160 VCF(sx
, int32_to_float32
, s32
)
2163 #define VCMP_DO(suffix, compare, element, record) \
2164 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2166 uint32_t ones = (uint32_t)-1; \
2167 uint32_t all = ones; \
2168 uint32_t none = 0; \
2170 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2171 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2172 switch (sizeof (a->element[0])) { \
2173 case 4: r->u32[i] = result; break; \
2174 case 2: r->u16[i] = result; break; \
2175 case 1: r->u8[i] = result; break; \
2181 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2184 #define VCMP(suffix, compare, element) \
2185 VCMP_DO(suffix, compare, element, 0) \
2186 VCMP_DO(suffix##_dot, compare, element, 1)
2199 #define VCMPFP_DO(suffix, compare, order, record) \
2200 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2202 uint32_t ones = (uint32_t)-1; \
2203 uint32_t all = ones; \
2204 uint32_t none = 0; \
2206 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2208 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2209 if (rel == float_relation_unordered) { \
2211 } else if (rel compare order) { \
2216 r->u32[i] = result; \
2221 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2224 #define VCMPFP(suffix, compare, order) \
2225 VCMPFP_DO(suffix, compare, order, 0) \
2226 VCMPFP_DO(suffix##_dot, compare, order, 1)
2227 VCMPFP(eqfp
, ==, float_relation_equal
)
2228 VCMPFP(gefp
, !=, float_relation_less
)
2229 VCMPFP(gtfp
, ==, float_relation_greater
)
2233 static always_inline
void vcmpbfp_internal (ppc_avr_t
*r
, ppc_avr_t
*a
,
2234 ppc_avr_t
*b
, int record
)
2238 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2239 int le_rel
= float32_compare_quiet(a
->f
[i
], b
->f
[i
], &env
->vec_status
);
2240 if (le_rel
== float_relation_unordered
) {
2241 r
->u32
[i
] = 0xc0000000;
2242 /* ALL_IN does not need to be updated here. */
2244 float32 bneg
= float32_chs(b
->f
[i
]);
2245 int ge_rel
= float32_compare_quiet(a
->f
[i
], bneg
, &env
->vec_status
);
2246 int le
= le_rel
!= float_relation_greater
;
2247 int ge
= ge_rel
!= float_relation_less
;
2248 r
->u32
[i
] = ((!le
) << 31) | ((!ge
) << 30);
2249 all_in
|= (!le
| !ge
);
2253 env
->crf
[6] = (all_in
== 0) << 1;
2257 void helper_vcmpbfp (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2259 vcmpbfp_internal(r
, a
, b
, 0);
2262 void helper_vcmpbfp_dot (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2264 vcmpbfp_internal(r
, a
, b
, 1);
2267 #define VCT(suffix, satcvt, element) \
2268 void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2272 float_status s = env->vec_status; \
2273 set_float_rounding_mode(float_round_to_zero, &s); \
2274 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2275 if (float32_is_nan(b->f[i]) || \
2276 float32_is_signaling_nan(b->f[i])) { \
2277 r->element[i] = 0; \
2279 float64 t = float32_to_float64(b->f[i], &s); \
2281 t = float64_scalbn(t, uim, &s); \
2282 j = float64_to_int64(t, &s); \
2283 r->element[i] = satcvt(j, &sat); \
2287 env->vscr |= (1 << VSCR_SAT); \
2290 VCT(uxs
, cvtsduw
, u32
)
2291 VCT(sxs
, cvtsdsw
, s32
)
2294 void helper_vmaddfp (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2297 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2298 HANDLE_NAN3(r
->f
[i
], a
->f
[i
], b
->f
[i
], c
->f
[i
]) {
2299 /* Need to do the computation in higher precision and round
2300 * once at the end. */
2301 float64 af
, bf
, cf
, t
;
2302 af
= float32_to_float64(a
->f
[i
], &env
->vec_status
);
2303 bf
= float32_to_float64(b
->f
[i
], &env
->vec_status
);
2304 cf
= float32_to_float64(c
->f
[i
], &env
->vec_status
);
2305 t
= float64_mul(af
, cf
, &env
->vec_status
);
2306 t
= float64_add(t
, bf
, &env
->vec_status
);
2307 r
->f
[i
] = float64_to_float32(t
, &env
->vec_status
);
2312 void helper_vmhaddshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2317 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2318 int32_t prod
= a
->s16
[i
] * b
->s16
[i
];
2319 int32_t t
= (int32_t)c
->s16
[i
] + (prod
>> 15);
2320 r
->s16
[i
] = cvtswsh (t
, &sat
);
2324 env
->vscr
|= (1 << VSCR_SAT
);
2328 void helper_vmhraddshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2333 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2334 int32_t prod
= a
->s16
[i
] * b
->s16
[i
] + 0x00004000;
2335 int32_t t
= (int32_t)c
->s16
[i
] + (prod
>> 15);
2336 r
->s16
[i
] = cvtswsh (t
, &sat
);
2340 env
->vscr
|= (1 << VSCR_SAT
);
2344 #define VMINMAX_DO(name, compare, element) \
2345 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2348 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2349 if (a->element[i] compare b->element[i]) { \
2350 r->element[i] = b->element[i]; \
2352 r->element[i] = a->element[i]; \
2356 #define VMINMAX(suffix, element) \
2357 VMINMAX_DO(min##suffix, >, element) \
2358 VMINMAX_DO(max##suffix, <, element)
2368 #define VMINMAXFP(suffix, rT, rF) \
2369 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2372 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2373 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2374 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2375 r->f[i] = rT->f[i]; \
2377 r->f[i] = rF->f[i]; \
2382 VMINMAXFP(minfp
, a
, b
)
2383 VMINMAXFP(maxfp
, b
, a
)
2386 void helper_vmladduhm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2389 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2390 int32_t prod
= a
->s16
[i
] * b
->s16
[i
];
2391 r
->s16
[i
] = (int16_t) (prod
+ c
->s16
[i
]);
2395 #define VMRG_DO(name, element, highp) \
2396 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2400 size_t n_elems = ARRAY_SIZE(r->element); \
2401 for (i = 0; i < n_elems/2; i++) { \
2403 result.element[i*2+HI_IDX] = a->element[i]; \
2404 result.element[i*2+LO_IDX] = b->element[i]; \
2406 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2407 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2412 #if defined(WORDS_BIGENDIAN)
2419 #define VMRG(suffix, element) \
2420 VMRG_DO(mrgl##suffix, element, MRGHI) \
2421 VMRG_DO(mrgh##suffix, element, MRGLO)
2430 void helper_vmsummbm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2435 for (i
= 0; i
< ARRAY_SIZE(r
->s8
); i
++) {
2436 prod
[i
] = (int32_t)a
->s8
[i
] * b
->u8
[i
];
2439 VECTOR_FOR_INORDER_I(i
, s32
) {
2440 r
->s32
[i
] = c
->s32
[i
] + prod
[4*i
] + prod
[4*i
+1] + prod
[4*i
+2] + prod
[4*i
+3];
2444 void helper_vmsumshm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2449 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2450 prod
[i
] = a
->s16
[i
] * b
->s16
[i
];
2453 VECTOR_FOR_INORDER_I(i
, s32
) {
2454 r
->s32
[i
] = c
->s32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2458 void helper_vmsumshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2464 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2465 prod
[i
] = (int32_t)a
->s16
[i
] * b
->s16
[i
];
2468 VECTOR_FOR_INORDER_I (i
, s32
) {
2469 int64_t t
= (int64_t)c
->s32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2470 r
->u32
[i
] = cvtsdsw(t
, &sat
);
2474 env
->vscr
|= (1 << VSCR_SAT
);
2478 void helper_vmsumubm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2483 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2484 prod
[i
] = a
->u8
[i
] * b
->u8
[i
];
2487 VECTOR_FOR_INORDER_I(i
, u32
) {
2488 r
->u32
[i
] = c
->u32
[i
] + prod
[4*i
] + prod
[4*i
+1] + prod
[4*i
+2] + prod
[4*i
+3];
2492 void helper_vmsumuhm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2497 for (i
= 0; i
< ARRAY_SIZE(r
->u16
); i
++) {
2498 prod
[i
] = a
->u16
[i
] * b
->u16
[i
];
2501 VECTOR_FOR_INORDER_I(i
, u32
) {
2502 r
->u32
[i
] = c
->u32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2506 void helper_vmsumuhs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2512 for (i
= 0; i
< ARRAY_SIZE(r
->u16
); i
++) {
2513 prod
[i
] = a
->u16
[i
] * b
->u16
[i
];
2516 VECTOR_FOR_INORDER_I (i
, s32
) {
2517 uint64_t t
= (uint64_t)c
->u32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2518 r
->u32
[i
] = cvtuduw(t
, &sat
);
2522 env
->vscr
|= (1 << VSCR_SAT
);
2526 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2527 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2530 VECTOR_FOR_INORDER_I(i, prod_element) { \
2532 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2534 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2538 #define VMUL(suffix, mul_element, prod_element) \
2539 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2540 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2548 void helper_vnmsubfp (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2551 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2552 HANDLE_NAN3(r
->f
[i
], a
->f
[i
], b
->f
[i
], c
->f
[i
]) {
2553 /* Need to do the computation is higher precision and round
2554 * once at the end. */
2555 float64 af
, bf
, cf
, t
;
2556 af
= float32_to_float64(a
->f
[i
], &env
->vec_status
);
2557 bf
= float32_to_float64(b
->f
[i
], &env
->vec_status
);
2558 cf
= float32_to_float64(c
->f
[i
], &env
->vec_status
);
2559 t
= float64_mul(af
, cf
, &env
->vec_status
);
2560 t
= float64_sub(t
, bf
, &env
->vec_status
);
2562 r
->f
[i
] = float64_to_float32(t
, &env
->vec_status
);
2567 void helper_vperm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2571 VECTOR_FOR_INORDER_I (i
, u8
) {
2572 int s
= c
->u8
[i
] & 0x1f;
2573 #if defined(WORDS_BIGENDIAN)
2574 int index
= s
& 0xf;
2576 int index
= 15 - (s
& 0xf);
2579 result
.u8
[i
] = b
->u8
[index
];
2581 result
.u8
[i
] = a
->u8
[index
];
2587 #if defined(WORDS_BIGENDIAN)
2592 void helper_vpkpx (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2596 #if defined(WORDS_BIGENDIAN)
2597 const ppc_avr_t
*x
[2] = { a
, b
};
2599 const ppc_avr_t
*x
[2] = { b
, a
};
2602 VECTOR_FOR_INORDER_I (i
, u64
) {
2603 VECTOR_FOR_INORDER_I (j
, u32
){
2604 uint32_t e
= x
[i
]->u32
[j
];
2605 result
.u16
[4*i
+j
] = (((e
>> 9) & 0xfc00) |
2606 ((e
>> 6) & 0x3e0) |
2613 #define VPK(suffix, from, to, cvt, dosat) \
2614 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2619 ppc_avr_t *a0 = PKBIG ? a : b; \
2620 ppc_avr_t *a1 = PKBIG ? b : a; \
2621 VECTOR_FOR_INORDER_I (i, from) { \
2622 result.to[i] = cvt(a0->from[i], &sat); \
2623 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2626 if (dosat && sat) { \
2627 env->vscr |= (1 << VSCR_SAT); \
2631 VPK(shss
, s16
, s8
, cvtshsb
, 1)
2632 VPK(shus
, s16
, u8
, cvtshub
, 1)
2633 VPK(swss
, s32
, s16
, cvtswsh
, 1)
2634 VPK(swus
, s32
, u16
, cvtswuh
, 1)
2635 VPK(uhus
, u16
, u8
, cvtuhub
, 1)
2636 VPK(uwus
, u32
, u16
, cvtuwuh
, 1)
2637 VPK(uhum
, u16
, u8
, I
, 0)
2638 VPK(uwum
, u32
, u16
, I
, 0)
2643 void helper_vrefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2646 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2647 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2648 r
->f
[i
] = float32_div(float32_one
, b
->f
[i
], &env
->vec_status
);
2653 #define VRFI(suffix, rounding) \
2654 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2657 float_status s = env->vec_status; \
2658 set_float_rounding_mode(rounding, &s); \
2659 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2660 HANDLE_NAN1(r->f[i], b->f[i]) { \
2661 r->f[i] = float32_round_to_int (b->f[i], &s); \
2665 VRFI(n
, float_round_nearest_even
)
2666 VRFI(m
, float_round_down
)
2667 VRFI(p
, float_round_up
)
2668 VRFI(z
, float_round_to_zero
)
2671 #define VROTATE(suffix, element) \
2672 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2675 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2676 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2677 unsigned int shift = b->element[i] & mask; \
2678 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2686 void helper_vrsqrtefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2689 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2690 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2691 float32 t
= float32_sqrt(b
->f
[i
], &env
->vec_status
);
2692 r
->f
[i
] = float32_div(float32_one
, t
, &env
->vec_status
);
2697 void helper_vsel (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2699 r
->u64
[0] = (a
->u64
[0] & ~c
->u64
[0]) | (b
->u64
[0] & c
->u64
[0]);
2700 r
->u64
[1] = (a
->u64
[1] & ~c
->u64
[1]) | (b
->u64
[1] & c
->u64
[1]);
2703 void helper_vlogefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2706 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2707 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2708 r
->f
[i
] = float32_log2(b
->f
[i
], &env
->vec_status
);
2713 #if defined(WORDS_BIGENDIAN)
2720 /* The specification says that the results are undefined if all of the
2721 * shift counts are not identical. We check to make sure that they are
2722 * to conform to what real hardware appears to do. */
2723 #define VSHIFT(suffix, leftp) \
2724 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2726 int shift = b->u8[LO_IDX*0x15] & 0x7; \
2729 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2730 doit = doit && ((b->u8[i] & 0x7) == shift); \
2735 } else if (leftp) { \
2736 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2737 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2738 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2740 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2741 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2742 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2752 #define VSL(suffix, element) \
2753 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2756 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2757 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2758 unsigned int shift = b->element[i] & mask; \
2759 r->element[i] = a->element[i] << shift; \
2767 void helper_vsldoi (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, uint32_t shift
)
2769 int sh
= shift
& 0xf;
2773 #if defined(WORDS_BIGENDIAN)
2774 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2777 result
.u8
[i
] = b
->u8
[index
-0x10];
2779 result
.u8
[i
] = a
->u8
[index
];
2783 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2784 int index
= (16 - sh
) + i
;
2786 result
.u8
[i
] = a
->u8
[index
-0x10];
2788 result
.u8
[i
] = b
->u8
[index
];
2795 void helper_vslo (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2797 int sh
= (b
->u8
[LO_IDX
*0xf] >> 3) & 0xf;
2799 #if defined (WORDS_BIGENDIAN)
2800 memmove (&r
->u8
[0], &a
->u8
[sh
], 16-sh
);
2801 memset (&r
->u8
[16-sh
], 0, sh
);
2803 memmove (&r
->u8
[sh
], &a
->u8
[0], 16-sh
);
2804 memset (&r
->u8
[0], 0, sh
);
2808 /* Experimental testing shows that hardware masks the immediate. */
2809 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2810 #if defined(WORDS_BIGENDIAN)
2811 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2813 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2815 #define VSPLT(suffix, element) \
2816 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2818 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2820 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2821 r->element[i] = s; \
2828 #undef SPLAT_ELEMENT
2829 #undef _SPLAT_MASKED
2831 #define VSPLTI(suffix, element, splat_type) \
2832 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2834 splat_type x = (int8_t)(splat << 3) >> 3; \
2836 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2837 r->element[i] = x; \
2840 VSPLTI(b
, s8
, int8_t)
2841 VSPLTI(h
, s16
, int16_t)
2842 VSPLTI(w
, s32
, int32_t)
2845 #define VSR(suffix, element) \
2846 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2849 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2850 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2851 unsigned int shift = b->element[i] & mask; \
2852 r->element[i] = a->element[i] >> shift; \
2863 void helper_vsro (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2865 int sh
= (b
->u8
[LO_IDX
*0xf] >> 3) & 0xf;
2867 #if defined (WORDS_BIGENDIAN)
2868 memmove (&r
->u8
[sh
], &a
->u8
[0], 16-sh
);
2869 memset (&r
->u8
[0], 0, sh
);
2871 memmove (&r
->u8
[0], &a
->u8
[sh
], 16-sh
);
2872 memset (&r
->u8
[16-sh
], 0, sh
);
2876 void helper_vsubcuw (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2879 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2880 r
->u32
[i
] = a
->u32
[i
] >= b
->u32
[i
];
2884 void helper_vsumsws (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2891 #if defined(WORDS_BIGENDIAN)
2892 upper
= ARRAY_SIZE(r
->s32
)-1;
2896 t
= (int64_t)b
->s32
[upper
];
2897 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2901 result
.s32
[upper
] = cvtsdsw(t
, &sat
);
2905 env
->vscr
|= (1 << VSCR_SAT
);
2909 void helper_vsum2sws (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2915 #if defined(WORDS_BIGENDIAN)
2920 for (i
= 0; i
< ARRAY_SIZE(r
->u64
); i
++) {
2921 int64_t t
= (int64_t)b
->s32
[upper
+i
*2];
2923 for (j
= 0; j
< ARRAY_SIZE(r
->u64
); j
++) {
2926 result
.s32
[upper
+i
*2] = cvtsdsw(t
, &sat
);
2931 env
->vscr
|= (1 << VSCR_SAT
);
2935 void helper_vsum4sbs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2940 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2941 int64_t t
= (int64_t)b
->s32
[i
];
2942 for (j
= 0; j
< ARRAY_SIZE(r
->s32
); j
++) {
2945 r
->s32
[i
] = cvtsdsw(t
, &sat
);
2949 env
->vscr
|= (1 << VSCR_SAT
);
2953 void helper_vsum4shs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2958 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2959 int64_t t
= (int64_t)b
->s32
[i
];
2960 t
+= a
->s16
[2*i
] + a
->s16
[2*i
+1];
2961 r
->s32
[i
] = cvtsdsw(t
, &sat
);
2965 env
->vscr
|= (1 << VSCR_SAT
);
2969 void helper_vsum4ubs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2974 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2975 uint64_t t
= (uint64_t)b
->u32
[i
];
2976 for (j
= 0; j
< ARRAY_SIZE(r
->u32
); j
++) {
2979 r
->u32
[i
] = cvtuduw(t
, &sat
);
2983 env
->vscr
|= (1 << VSCR_SAT
);
2987 #if defined(WORDS_BIGENDIAN)
2994 #define VUPKPX(suffix, hi) \
2995 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2999 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3000 uint16_t e = b->u16[hi ? i : i+4]; \
3001 uint8_t a = (e >> 15) ? 0xff : 0; \
3002 uint8_t r = (e >> 10) & 0x1f; \
3003 uint8_t g = (e >> 5) & 0x1f; \
3004 uint8_t b = e & 0x1f; \
3005 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3013 #define VUPK(suffix, unpacked, packee, hi) \
3014 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3019 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3020 result.unpacked[i] = b->packee[i]; \
3023 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3024 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3029 VUPK(hsb
, s16
, s8
, UPKHI
)
3030 VUPK(hsh
, s32
, s16
, UPKHI
)
3031 VUPK(lsb
, s16
, s8
, UPKLO
)
3032 VUPK(lsh
, s32
, s16
, UPKLO
)
3037 #undef DO_HANDLE_NAN
3041 #undef VECTOR_FOR_INORDER_I
3045 /*****************************************************************************/
3046 /* SPE extension helpers */
3047 /* Use a table to make this quicker */
3048 static uint8_t hbrev
[16] = {
3049 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3050 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3053 static always_inline
uint8_t byte_reverse (uint8_t val
)
3055 return hbrev
[val
>> 4] | (hbrev
[val
& 0xF] << 4);
3058 static always_inline
uint32_t word_reverse (uint32_t val
)
3060 return byte_reverse(val
>> 24) | (byte_reverse(val
>> 16) << 8) |
3061 (byte_reverse(val
>> 8) << 16) | (byte_reverse(val
) << 24);
3064 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3065 target_ulong
helper_brinc (target_ulong arg1
, target_ulong arg2
)
3067 uint32_t a
, b
, d
, mask
;
3069 mask
= UINT32_MAX
>> (32 - MASKBITS
);
3072 d
= word_reverse(1 + word_reverse(a
| ~b
));
3073 return (arg1
& ~mask
) | (d
& b
);
3076 uint32_t helper_cntlsw32 (uint32_t val
)
3078 if (val
& 0x80000000)
3084 uint32_t helper_cntlzw32 (uint32_t val
)
3089 /* Single-precision floating-point conversions */
3090 static always_inline
uint32_t efscfsi (uint32_t val
)
3094 u
.f
= int32_to_float32(val
, &env
->vec_status
);
3099 static always_inline
uint32_t efscfui (uint32_t val
)
3103 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
3108 static always_inline
int32_t efsctsi (uint32_t val
)
3113 /* NaN are not treated the same way IEEE 754 does */
3114 if (unlikely(float32_is_nan(u
.f
)))
3117 return float32_to_int32(u
.f
, &env
->vec_status
);
3120 static always_inline
uint32_t efsctui (uint32_t val
)
3125 /* NaN are not treated the same way IEEE 754 does */
3126 if (unlikely(float32_is_nan(u
.f
)))
3129 return float32_to_uint32(u
.f
, &env
->vec_status
);
3132 static always_inline
uint32_t efsctsiz (uint32_t val
)
3137 /* NaN are not treated the same way IEEE 754 does */
3138 if (unlikely(float32_is_nan(u
.f
)))
3141 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
3144 static always_inline
uint32_t efsctuiz (uint32_t val
)
3149 /* NaN are not treated the same way IEEE 754 does */
3150 if (unlikely(float32_is_nan(u
.f
)))
3153 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
3156 static always_inline
uint32_t efscfsf (uint32_t val
)
3161 u
.f
= int32_to_float32(val
, &env
->vec_status
);
3162 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
3163 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
3168 static always_inline
uint32_t efscfuf (uint32_t val
)
3173 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
3174 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3175 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
3180 static always_inline
uint32_t efsctsf (uint32_t val
)
3186 /* NaN are not treated the same way IEEE 754 does */
3187 if (unlikely(float32_is_nan(u
.f
)))
3189 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3190 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
3192 return float32_to_int32(u
.f
, &env
->vec_status
);
3195 static always_inline
uint32_t efsctuf (uint32_t val
)
3201 /* NaN are not treated the same way IEEE 754 does */
3202 if (unlikely(float32_is_nan(u
.f
)))
3204 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3205 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
3207 return float32_to_uint32(u
.f
, &env
->vec_status
);
3210 #define HELPER_SPE_SINGLE_CONV(name) \
3211 uint32_t helper_e##name (uint32_t val) \
3213 return e##name(val); \
3216 HELPER_SPE_SINGLE_CONV(fscfsi
);
3218 HELPER_SPE_SINGLE_CONV(fscfui
);
3220 HELPER_SPE_SINGLE_CONV(fscfuf
);
3222 HELPER_SPE_SINGLE_CONV(fscfsf
);
3224 HELPER_SPE_SINGLE_CONV(fsctsi
);
3226 HELPER_SPE_SINGLE_CONV(fsctui
);
3228 HELPER_SPE_SINGLE_CONV(fsctsiz
);
3230 HELPER_SPE_SINGLE_CONV(fsctuiz
);
3232 HELPER_SPE_SINGLE_CONV(fsctsf
);
3234 HELPER_SPE_SINGLE_CONV(fsctuf
);
3236 #define HELPER_SPE_VECTOR_CONV(name) \
3237 uint64_t helper_ev##name (uint64_t val) \
3239 return ((uint64_t)e##name(val >> 32) << 32) | \
3240 (uint64_t)e##name(val); \
3243 HELPER_SPE_VECTOR_CONV(fscfsi
);
3245 HELPER_SPE_VECTOR_CONV(fscfui
);
3247 HELPER_SPE_VECTOR_CONV(fscfuf
);
3249 HELPER_SPE_VECTOR_CONV(fscfsf
);
3251 HELPER_SPE_VECTOR_CONV(fsctsi
);
3253 HELPER_SPE_VECTOR_CONV(fsctui
);
3255 HELPER_SPE_VECTOR_CONV(fsctsiz
);
3257 HELPER_SPE_VECTOR_CONV(fsctuiz
);
3259 HELPER_SPE_VECTOR_CONV(fsctsf
);
3261 HELPER_SPE_VECTOR_CONV(fsctuf
);
3263 /* Single-precision floating-point arithmetic */
3264 static always_inline
uint32_t efsadd (uint32_t op1
, uint32_t op2
)
3269 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
3273 static always_inline
uint32_t efssub (uint32_t op1
, uint32_t op2
)
3278 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
3282 static always_inline
uint32_t efsmul (uint32_t op1
, uint32_t op2
)
3287 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
3291 static always_inline
uint32_t efsdiv (uint32_t op1
, uint32_t op2
)
3296 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
3300 #define HELPER_SPE_SINGLE_ARITH(name) \
3301 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3303 return e##name(op1, op2); \
3306 HELPER_SPE_SINGLE_ARITH(fsadd
);
3308 HELPER_SPE_SINGLE_ARITH(fssub
);
3310 HELPER_SPE_SINGLE_ARITH(fsmul
);
3312 HELPER_SPE_SINGLE_ARITH(fsdiv
);
3314 #define HELPER_SPE_VECTOR_ARITH(name) \
3315 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3317 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3318 (uint64_t)e##name(op1, op2); \
3321 HELPER_SPE_VECTOR_ARITH(fsadd
);
3323 HELPER_SPE_VECTOR_ARITH(fssub
);
3325 HELPER_SPE_VECTOR_ARITH(fsmul
);
3327 HELPER_SPE_VECTOR_ARITH(fsdiv
);
3329 /* Single-precision floating-point comparisons */
3330 static always_inline
uint32_t efststlt (uint32_t op1
, uint32_t op2
)
3335 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
3338 static always_inline
uint32_t efststgt (uint32_t op1
, uint32_t op2
)
3343 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
3346 static always_inline
uint32_t efststeq (uint32_t op1
, uint32_t op2
)
3351 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
3354 static always_inline
uint32_t efscmplt (uint32_t op1
, uint32_t op2
)
3356 /* XXX: TODO: test special values (NaN, infinites, ...) */
3357 return efststlt(op1
, op2
);
3360 static always_inline
uint32_t efscmpgt (uint32_t op1
, uint32_t op2
)
3362 /* XXX: TODO: test special values (NaN, infinites, ...) */
3363 return efststgt(op1
, op2
);
3366 static always_inline
uint32_t efscmpeq (uint32_t op1
, uint32_t op2
)
3368 /* XXX: TODO: test special values (NaN, infinites, ...) */
3369 return efststeq(op1
, op2
);
3372 #define HELPER_SINGLE_SPE_CMP(name) \
3373 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3375 return e##name(op1, op2) << 2; \
3378 HELPER_SINGLE_SPE_CMP(fststlt
);
3380 HELPER_SINGLE_SPE_CMP(fststgt
);
3382 HELPER_SINGLE_SPE_CMP(fststeq
);
3384 HELPER_SINGLE_SPE_CMP(fscmplt
);
3386 HELPER_SINGLE_SPE_CMP(fscmpgt
);
3388 HELPER_SINGLE_SPE_CMP(fscmpeq
);
3390 static always_inline
uint32_t evcmp_merge (int t0
, int t1
)
3392 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
3395 #define HELPER_VECTOR_SPE_CMP(name) \
3396 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3398 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3401 HELPER_VECTOR_SPE_CMP(fststlt
);
3403 HELPER_VECTOR_SPE_CMP(fststgt
);
3405 HELPER_VECTOR_SPE_CMP(fststeq
);
3407 HELPER_VECTOR_SPE_CMP(fscmplt
);
3409 HELPER_VECTOR_SPE_CMP(fscmpgt
);
3411 HELPER_VECTOR_SPE_CMP(fscmpeq
);
3413 /* Double-precision floating-point conversion */
3414 uint64_t helper_efdcfsi (uint32_t val
)
3418 u
.d
= int32_to_float64(val
, &env
->vec_status
);
3423 uint64_t helper_efdcfsid (uint64_t val
)
3427 u
.d
= int64_to_float64(val
, &env
->vec_status
);
3432 uint64_t helper_efdcfui (uint32_t val
)
3436 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
3441 uint64_t helper_efdcfuid (uint64_t val
)
3445 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
3450 uint32_t helper_efdctsi (uint64_t val
)
3455 /* NaN are not treated the same way IEEE 754 does */
3456 if (unlikely(float64_is_nan(u
.d
)))
3459 return float64_to_int32(u
.d
, &env
->vec_status
);
3462 uint32_t helper_efdctui (uint64_t val
)
3467 /* NaN are not treated the same way IEEE 754 does */
3468 if (unlikely(float64_is_nan(u
.d
)))
3471 return float64_to_uint32(u
.d
, &env
->vec_status
);
3474 uint32_t helper_efdctsiz (uint64_t val
)
3479 /* NaN are not treated the same way IEEE 754 does */
3480 if (unlikely(float64_is_nan(u
.d
)))
3483 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
3486 uint64_t helper_efdctsidz (uint64_t val
)
3491 /* NaN are not treated the same way IEEE 754 does */
3492 if (unlikely(float64_is_nan(u
.d
)))
3495 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
3498 uint32_t helper_efdctuiz (uint64_t val
)
3503 /* NaN are not treated the same way IEEE 754 does */
3504 if (unlikely(float64_is_nan(u
.d
)))
3507 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
3510 uint64_t helper_efdctuidz (uint64_t val
)
3515 /* NaN are not treated the same way IEEE 754 does */
3516 if (unlikely(float64_is_nan(u
.d
)))
3519 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
3522 uint64_t helper_efdcfsf (uint32_t val
)
3527 u
.d
= int32_to_float64(val
, &env
->vec_status
);
3528 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
3529 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
3534 uint64_t helper_efdcfuf (uint32_t val
)
3539 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
3540 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
3541 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
3546 uint32_t helper_efdctsf (uint64_t val
)
3552 /* NaN are not treated the same way IEEE 754 does */
3553 if (unlikely(float64_is_nan(u
.d
)))
3555 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
3556 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
3558 return float64_to_int32(u
.d
, &env
->vec_status
);
3561 uint32_t helper_efdctuf (uint64_t val
)
3567 /* NaN are not treated the same way IEEE 754 does */
3568 if (unlikely(float64_is_nan(u
.d
)))
3570 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
3571 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
3573 return float64_to_uint32(u
.d
, &env
->vec_status
);
3576 uint32_t helper_efscfd (uint64_t val
)
3582 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
3587 uint64_t helper_efdcfs (uint32_t val
)
3593 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
3598 /* Double precision fixed-point arithmetic */
3599 uint64_t helper_efdadd (uint64_t op1
, uint64_t op2
)
3604 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
3608 uint64_t helper_efdsub (uint64_t op1
, uint64_t op2
)
3613 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
3617 uint64_t helper_efdmul (uint64_t op1
, uint64_t op2
)
3622 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
3626 uint64_t helper_efddiv (uint64_t op1
, uint64_t op2
)
3631 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
3635 /* Double precision floating point helpers */
3636 uint32_t helper_efdtstlt (uint64_t op1
, uint64_t op2
)
3641 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
3644 uint32_t helper_efdtstgt (uint64_t op1
, uint64_t op2
)
3649 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
3652 uint32_t helper_efdtsteq (uint64_t op1
, uint64_t op2
)
3657 return float64_eq(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
3660 uint32_t helper_efdcmplt (uint64_t op1
, uint64_t op2
)
3662 /* XXX: TODO: test special values (NaN, infinites, ...) */
3663 return helper_efdtstlt(op1
, op2
);
3666 uint32_t helper_efdcmpgt (uint64_t op1
, uint64_t op2
)
3668 /* XXX: TODO: test special values (NaN, infinites, ...) */
3669 return helper_efdtstgt(op1
, op2
);
3672 uint32_t helper_efdcmpeq (uint64_t op1
, uint64_t op2
)
3674 /* XXX: TODO: test special values (NaN, infinites, ...) */
3675 return helper_efdtsteq(op1
, op2
);
3678 /*****************************************************************************/
3679 /* Softmmu support */
3680 #if !defined (CONFIG_USER_ONLY)
3682 #define MMUSUFFIX _mmu
3685 #include "softmmu_template.h"
3688 #include "softmmu_template.h"
3691 #include "softmmu_template.h"
3694 #include "softmmu_template.h"
3696 /* try to fill the TLB and return an exception if error. If retaddr is
3697 NULL, it means that the function was called in C code (i.e. not
3698 from generated code or from helper.c) */
3699 /* XXX: fix it to restore all registers */
3700 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
3702 TranslationBlock
*tb
;
3703 CPUState
*saved_env
;
3707 /* XXX: hack to restore env in all cases, even if not called from
3710 env
= cpu_single_env
;
3711 ret
= cpu_ppc_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
3712 if (unlikely(ret
!= 0)) {
3713 if (likely(retaddr
)) {
3714 /* now we have a real cpu fault */
3715 pc
= (unsigned long)retaddr
;
3716 tb
= tb_find_pc(pc
);
3718 /* the PC is inside the translated code. It means that we have
3719 a virtual CPU fault */
3720 cpu_restore_state(tb
, env
, pc
, NULL
);
3723 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
3728 /* Segment registers load and store */
3729 target_ulong
helper_load_sr (target_ulong sr_num
)
3731 #if defined(TARGET_PPC64)
3732 if (env
->mmu_model
& POWERPC_MMU_64
)
3733 return ppc_load_sr(env
, sr_num
);
3735 return env
->sr
[sr_num
];
3738 void helper_store_sr (target_ulong sr_num
, target_ulong val
)
3740 ppc_store_sr(env
, sr_num
, val
);
3743 /* SLB management */
3744 #if defined(TARGET_PPC64)
3745 target_ulong
helper_load_slb (target_ulong slb_nr
)
3747 return ppc_load_slb(env
, slb_nr
);
3750 void helper_store_slb (target_ulong rb
, target_ulong rs
)
3752 ppc_store_slb(env
, rb
, rs
);
3755 void helper_slbia (void)
3757 ppc_slb_invalidate_all(env
);
3760 void helper_slbie (target_ulong addr
)
3762 ppc_slb_invalidate_one(env
, addr
);
3765 #endif /* defined(TARGET_PPC64) */
3767 /* TLB management */
3768 void helper_tlbia (void)
3770 ppc_tlb_invalidate_all(env
);
3773 void helper_tlbie (target_ulong addr
)
3775 ppc_tlb_invalidate_one(env
, addr
);
3778 /* Software driven TLBs management */
3779 /* PowerPC 602/603 software TLB load instructions helpers */
3780 static void do_6xx_tlb (target_ulong new_EPN
, int is_code
)
3782 target_ulong RPN
, CMP
, EPN
;
3785 RPN
= env
->spr
[SPR_RPA
];
3787 CMP
= env
->spr
[SPR_ICMP
];
3788 EPN
= env
->spr
[SPR_IMISS
];
3790 CMP
= env
->spr
[SPR_DCMP
];
3791 EPN
= env
->spr
[SPR_DMISS
];
3793 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
3794 LOG_SWTLB("%s: EPN " ADDRX
" " ADDRX
" PTE0 " ADDRX
3795 " PTE1 " ADDRX
" way %d\n",
3796 __func__
, new_EPN
, EPN
, CMP
, RPN
, way
);
3797 /* Store this TLB */
3798 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
3799 way
, is_code
, CMP
, RPN
);
3802 void helper_6xx_tlbd (target_ulong EPN
)
3807 void helper_6xx_tlbi (target_ulong EPN
)
3812 /* PowerPC 74xx software TLB load instructions helpers */
3813 static void do_74xx_tlb (target_ulong new_EPN
, int is_code
)
3815 target_ulong RPN
, CMP
, EPN
;
3818 RPN
= env
->spr
[SPR_PTELO
];
3819 CMP
= env
->spr
[SPR_PTEHI
];
3820 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
3821 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
3822 LOG_SWTLB("%s: EPN " ADDRX
" " ADDRX
" PTE0 " ADDRX
3823 " PTE1 " ADDRX
" way %d\n",
3824 __func__
, new_EPN
, EPN
, CMP
, RPN
, way
);
3825 /* Store this TLB */
3826 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
3827 way
, is_code
, CMP
, RPN
);
3830 void helper_74xx_tlbd (target_ulong EPN
)
3832 do_74xx_tlb(EPN
, 0);
3835 void helper_74xx_tlbi (target_ulong EPN
)
3837 do_74xx_tlb(EPN
, 1);
3840 static always_inline target_ulong
booke_tlb_to_page_size (int size
)
3842 return 1024 << (2 * size
);
3845 static always_inline
int booke_page_size_to_tlb (target_ulong page_size
)
3849 switch (page_size
) {
3883 #if defined (TARGET_PPC64)
3884 case 0x000100000000ULL
:
3887 case 0x000400000000ULL
:
3890 case 0x001000000000ULL
:
3893 case 0x004000000000ULL
:
3896 case 0x010000000000ULL
:
3908 /* Helpers for 4xx TLB management */
3909 target_ulong
helper_4xx_tlbre_lo (target_ulong entry
)
3916 tlb
= &env
->tlb
[entry
].tlbe
;
3918 if (tlb
->prot
& PAGE_VALID
)
3920 size
= booke_page_size_to_tlb(tlb
->size
);
3921 if (size
< 0 || size
> 0x7)
3924 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
3928 target_ulong
helper_4xx_tlbre_hi (target_ulong entry
)
3934 tlb
= &env
->tlb
[entry
].tlbe
;
3936 if (tlb
->prot
& PAGE_EXEC
)
3938 if (tlb
->prot
& PAGE_WRITE
)
3943 void helper_4xx_tlbwe_hi (target_ulong entry
, target_ulong val
)
3946 target_ulong page
, end
;
3948 LOG_SWTLB("%s entry %d val " ADDRX
"\n", __func__
, (int)entry
, val
);
3950 tlb
= &env
->tlb
[entry
].tlbe
;
3951 /* Invalidate previous TLB (if it's valid) */
3952 if (tlb
->prot
& PAGE_VALID
) {
3953 end
= tlb
->EPN
+ tlb
->size
;
3954 LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
3955 " end " ADDRX
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
3956 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
3957 tlb_flush_page(env
, page
);
3959 tlb
->size
= booke_tlb_to_page_size((val
>> 7) & 0x7);
3960 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3961 * If this ever occurs, one should use the ppcemb target instead
3962 * of the ppc or ppc64 one
3964 if ((val
& 0x40) && tlb
->size
< TARGET_PAGE_SIZE
) {
3965 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
3966 "are not supported (%d)\n",
3967 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
3969 tlb
->EPN
= val
& ~(tlb
->size
- 1);
3971 tlb
->prot
|= PAGE_VALID
;
3973 tlb
->prot
&= ~PAGE_VALID
;
3975 /* XXX: TO BE FIXED */
3976 cpu_abort(env
, "Little-endian TLB entries are not supported by now\n");
3978 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
3979 tlb
->attr
= val
& 0xFF;
3980 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX
" EPN " ADDRX
3981 " size " ADDRX
" prot %c%c%c%c PID %d\n", __func__
,
3982 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
3983 tlb
->prot
& PAGE_READ
? 'r' : '-',
3984 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
3985 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
3986 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
3987 /* Invalidate new TLB (if valid) */
3988 if (tlb
->prot
& PAGE_VALID
) {
3989 end
= tlb
->EPN
+ tlb
->size
;
3990 LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
3991 " end " ADDRX
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
3992 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
3993 tlb_flush_page(env
, page
);
3997 void helper_4xx_tlbwe_lo (target_ulong entry
, target_ulong val
)
4001 LOG_SWTLB("%s entry %i val " ADDRX
"\n", __func__
, (int)entry
, val
);
4003 tlb
= &env
->tlb
[entry
].tlbe
;
4004 tlb
->RPN
= val
& 0xFFFFFC00;
4005 tlb
->prot
= PAGE_READ
;
4007 tlb
->prot
|= PAGE_EXEC
;
4009 tlb
->prot
|= PAGE_WRITE
;
4010 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX
" EPN " ADDRX
4011 " size " ADDRX
" prot %c%c%c%c PID %d\n", __func__
,
4012 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
4013 tlb
->prot
& PAGE_READ
? 'r' : '-',
4014 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
4015 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
4016 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
4019 target_ulong
helper_4xx_tlbsx (target_ulong address
)
4021 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
4024 /* PowerPC 440 TLB management */
4025 void helper_440_tlbwe (uint32_t word
, target_ulong entry
, target_ulong value
)
4028 target_ulong EPN
, RPN
, size
;
4031 LOG_SWTLB("%s word %d entry %d value " ADDRX
"\n",
4032 __func__
, word
, (int)entry
, value
);
4035 tlb
= &env
->tlb
[entry
].tlbe
;
4038 /* Just here to please gcc */
4040 EPN
= value
& 0xFFFFFC00;
4041 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
)
4044 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
4045 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
)
4049 tlb
->attr
|= (value
>> 8) & 1;
4050 if (value
& 0x200) {
4051 tlb
->prot
|= PAGE_VALID
;
4053 if (tlb
->prot
& PAGE_VALID
) {
4054 tlb
->prot
&= ~PAGE_VALID
;
4058 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
4063 RPN
= value
& 0xFFFFFC0F;
4064 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
)
4069 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
4070 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
4072 tlb
->prot
|= PAGE_READ
<< 4;
4074 tlb
->prot
|= PAGE_WRITE
<< 4;
4076 tlb
->prot
|= PAGE_EXEC
<< 4;
4078 tlb
->prot
|= PAGE_READ
;
4080 tlb
->prot
|= PAGE_WRITE
;
4082 tlb
->prot
|= PAGE_EXEC
;
4087 target_ulong
helper_440_tlbre (uint32_t word
, target_ulong entry
)
4094 tlb
= &env
->tlb
[entry
].tlbe
;
4097 /* Just here to please gcc */
4100 size
= booke_page_size_to_tlb(tlb
->size
);
4101 if (size
< 0 || size
> 0xF)
4104 if (tlb
->attr
& 0x1)
4106 if (tlb
->prot
& PAGE_VALID
)
4108 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
4109 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
4115 ret
= tlb
->attr
& ~0x1;
4116 if (tlb
->prot
& (PAGE_READ
<< 4))
4118 if (tlb
->prot
& (PAGE_WRITE
<< 4))
4120 if (tlb
->prot
& (PAGE_EXEC
<< 4))
4122 if (tlb
->prot
& PAGE_READ
)
4124 if (tlb
->prot
& PAGE_WRITE
)
4126 if (tlb
->prot
& PAGE_EXEC
)
4133 target_ulong
helper_440_tlbsx (target_ulong address
)
4135 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
4138 #endif /* !CONFIG_USER_ONLY */