2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #include "host-utils.h"
25 #include "helper_regs.h"
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
31 #ifdef DEBUG_SOFTWARE_TLB
32 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
34 # define LOG_SWTLB(...) do { } while (0)
38 /*****************************************************************************/
39 /* Exceptions processing helpers */
41 void helper_raise_exception_err (uint32_t exception
, uint32_t error_code
)
44 printf("Raise exception %3x code : %d\n", exception
, error_code
);
46 env
->exception_index
= exception
;
47 env
->error_code
= error_code
;
51 void helper_raise_exception (uint32_t exception
)
53 helper_raise_exception_err(exception
, 0);
56 /*****************************************************************************/
57 /* Registers load and stores */
58 target_ulong
helper_load_cr (void)
60 return (env
->crf
[0] << 28) |
70 void helper_store_cr (target_ulong val
, uint32_t mask
)
74 for (i
= 0, sh
= 7; i
< 8; i
++, sh
--) {
76 env
->crf
[i
] = (val
>> (sh
* 4)) & 0xFUL
;
80 /*****************************************************************************/
82 void helper_load_dump_spr (uint32_t sprn
)
84 qemu_log("Read SPR %d %03x => " ADDRX
"\n",
85 sprn
, sprn
, env
->spr
[sprn
]);
88 void helper_store_dump_spr (uint32_t sprn
)
90 qemu_log("Write SPR %d %03x <= " ADDRX
"\n",
91 sprn
, sprn
, env
->spr
[sprn
]);
94 target_ulong
helper_load_tbl (void)
96 return cpu_ppc_load_tbl(env
);
99 target_ulong
helper_load_tbu (void)
101 return cpu_ppc_load_tbu(env
);
104 target_ulong
helper_load_atbl (void)
106 return cpu_ppc_load_atbl(env
);
109 target_ulong
helper_load_atbu (void)
111 return cpu_ppc_load_atbu(env
);
114 target_ulong
helper_load_601_rtcl (void)
116 return cpu_ppc601_load_rtcl(env
);
119 target_ulong
helper_load_601_rtcu (void)
121 return cpu_ppc601_load_rtcu(env
);
124 #if !defined(CONFIG_USER_ONLY)
125 #if defined (TARGET_PPC64)
126 void helper_store_asr (target_ulong val
)
128 ppc_store_asr(env
, val
);
132 void helper_store_sdr1 (target_ulong val
)
134 ppc_store_sdr1(env
, val
);
137 void helper_store_tbl (target_ulong val
)
139 cpu_ppc_store_tbl(env
, val
);
142 void helper_store_tbu (target_ulong val
)
144 cpu_ppc_store_tbu(env
, val
);
147 void helper_store_atbl (target_ulong val
)
149 cpu_ppc_store_atbl(env
, val
);
152 void helper_store_atbu (target_ulong val
)
154 cpu_ppc_store_atbu(env
, val
);
157 void helper_store_601_rtcl (target_ulong val
)
159 cpu_ppc601_store_rtcl(env
, val
);
162 void helper_store_601_rtcu (target_ulong val
)
164 cpu_ppc601_store_rtcu(env
, val
);
167 target_ulong
helper_load_decr (void)
169 return cpu_ppc_load_decr(env
);
172 void helper_store_decr (target_ulong val
)
174 cpu_ppc_store_decr(env
, val
);
177 void helper_store_hid0_601 (target_ulong val
)
181 hid0
= env
->spr
[SPR_HID0
];
182 if ((val
^ hid0
) & 0x00000008) {
183 /* Change current endianness */
184 env
->hflags
&= ~(1 << MSR_LE
);
185 env
->hflags_nmsr
&= ~(1 << MSR_LE
);
186 env
->hflags_nmsr
|= (1 << MSR_LE
) & (((val
>> 3) & 1) << MSR_LE
);
187 env
->hflags
|= env
->hflags_nmsr
;
188 qemu_log("%s: set endianness to %c => " ADDRX
"\n",
189 __func__
, val
& 0x8 ? 'l' : 'b', env
->hflags
);
191 env
->spr
[SPR_HID0
] = (uint32_t)val
;
194 void helper_store_403_pbr (uint32_t num
, target_ulong value
)
196 if (likely(env
->pb
[num
] != value
)) {
197 env
->pb
[num
] = value
;
198 /* Should be optimized */
203 target_ulong
helper_load_40x_pit (void)
205 return load_40x_pit(env
);
208 void helper_store_40x_pit (target_ulong val
)
210 store_40x_pit(env
, val
);
213 void helper_store_40x_dbcr0 (target_ulong val
)
215 store_40x_dbcr0(env
, val
);
218 void helper_store_40x_sler (target_ulong val
)
220 store_40x_sler(env
, val
);
223 void helper_store_booke_tcr (target_ulong val
)
225 store_booke_tcr(env
, val
);
228 void helper_store_booke_tsr (target_ulong val
)
230 store_booke_tsr(env
, val
);
233 void helper_store_ibatu (uint32_t nr
, target_ulong val
)
235 ppc_store_ibatu(env
, nr
, val
);
238 void helper_store_ibatl (uint32_t nr
, target_ulong val
)
240 ppc_store_ibatl(env
, nr
, val
);
243 void helper_store_dbatu (uint32_t nr
, target_ulong val
)
245 ppc_store_dbatu(env
, nr
, val
);
248 void helper_store_dbatl (uint32_t nr
, target_ulong val
)
250 ppc_store_dbatl(env
, nr
, val
);
253 void helper_store_601_batl (uint32_t nr
, target_ulong val
)
255 ppc_store_ibatl_601(env
, nr
, val
);
258 void helper_store_601_batu (uint32_t nr
, target_ulong val
)
260 ppc_store_ibatu_601(env
, nr
, val
);
264 /*****************************************************************************/
265 /* Memory load and stores */
267 static always_inline target_ulong
addr_add(target_ulong addr
, target_long arg
)
269 #if defined(TARGET_PPC64)
271 return (uint32_t)(addr
+ arg
);
277 void helper_lmw (target_ulong addr
, uint32_t reg
)
279 for (; reg
< 32; reg
++) {
281 env
->gpr
[reg
] = bswap32(ldl(addr
));
283 env
->gpr
[reg
] = ldl(addr
);
284 addr
= addr_add(addr
, 4);
288 void helper_stmw (target_ulong addr
, uint32_t reg
)
290 for (; reg
< 32; reg
++) {
292 stl(addr
, bswap32((uint32_t)env
->gpr
[reg
]));
294 stl(addr
, (uint32_t)env
->gpr
[reg
]);
295 addr
= addr_add(addr
, 4);
299 void helper_lsw(target_ulong addr
, uint32_t nb
, uint32_t reg
)
302 for (; nb
> 3; nb
-= 4) {
303 env
->gpr
[reg
] = ldl(addr
);
304 reg
= (reg
+ 1) % 32;
305 addr
= addr_add(addr
, 4);
307 if (unlikely(nb
> 0)) {
309 for (sh
= 24; nb
> 0; nb
--, sh
-= 8) {
310 env
->gpr
[reg
] |= ldub(addr
) << sh
;
311 addr
= addr_add(addr
, 1);
315 /* PPC32 specification says we must generate an exception if
316 * rA is in the range of registers to be loaded.
317 * In an other hand, IBM says this is valid, but rA won't be loaded.
318 * For now, I'll follow the spec...
320 void helper_lswx(target_ulong addr
, uint32_t reg
, uint32_t ra
, uint32_t rb
)
322 if (likely(xer_bc
!= 0)) {
323 if (unlikely((ra
!= 0 && reg
< ra
&& (reg
+ xer_bc
) > ra
) ||
324 (reg
< rb
&& (reg
+ xer_bc
) > rb
))) {
325 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
327 POWERPC_EXCP_INVAL_LSWX
);
329 helper_lsw(addr
, xer_bc
, reg
);
334 void helper_stsw(target_ulong addr
, uint32_t nb
, uint32_t reg
)
337 for (; nb
> 3; nb
-= 4) {
338 stl(addr
, env
->gpr
[reg
]);
339 reg
= (reg
+ 1) % 32;
340 addr
= addr_add(addr
, 4);
342 if (unlikely(nb
> 0)) {
343 for (sh
= 24; nb
> 0; nb
--, sh
-= 8) {
344 stb(addr
, (env
->gpr
[reg
] >> sh
) & 0xFF);
345 addr
= addr_add(addr
, 1);
350 static void do_dcbz(target_ulong addr
, int dcache_line_size
)
352 addr
&= ~(dcache_line_size
- 1);
354 for (i
= 0 ; i
< dcache_line_size
; i
+= 4) {
357 if (env
->reserve
== addr
)
358 env
->reserve
= (target_ulong
)-1ULL;
361 void helper_dcbz(target_ulong addr
)
363 do_dcbz(addr
, env
->dcache_line_size
);
366 void helper_dcbz_970(target_ulong addr
)
368 if (((env
->spr
[SPR_970_HID5
] >> 7) & 0x3) == 1)
371 do_dcbz(addr
, env
->dcache_line_size
);
374 void helper_icbi(target_ulong addr
)
378 addr
&= ~(env
->dcache_line_size
- 1);
379 /* Invalidate one cache line :
380 * PowerPC specification says this is to be treated like a load
381 * (not a fetch) by the MMU. To be sure it will be so,
382 * do the load "by hand".
385 tb_invalidate_page_range(addr
, addr
+ env
->icache_line_size
);
389 target_ulong
helper_lscbx (target_ulong addr
, uint32_t reg
, uint32_t ra
, uint32_t rb
)
393 for (i
= 0; i
< xer_bc
; i
++) {
395 addr
= addr_add(addr
, 1);
396 /* ra (if not 0) and rb are never modified */
397 if (likely(reg
!= rb
&& (ra
== 0 || reg
!= ra
))) {
398 env
->gpr
[reg
] = (env
->gpr
[reg
] & ~(0xFF << d
)) | (c
<< d
);
400 if (unlikely(c
== xer_cmp
))
402 if (likely(d
!= 0)) {
413 /*****************************************************************************/
414 /* Fixed point operations helpers */
415 #if defined(TARGET_PPC64)
417 /* multiply high word */
418 uint64_t helper_mulhd (uint64_t arg1
, uint64_t arg2
)
422 muls64(&tl
, &th
, arg1
, arg2
);
426 /* multiply high word unsigned */
427 uint64_t helper_mulhdu (uint64_t arg1
, uint64_t arg2
)
431 mulu64(&tl
, &th
, arg1
, arg2
);
435 uint64_t helper_mulldo (uint64_t arg1
, uint64_t arg2
)
440 muls64(&tl
, (uint64_t *)&th
, arg1
, arg2
);
441 /* If th != 0 && th != -1, then we had an overflow */
442 if (likely((uint64_t)(th
+ 1) <= 1)) {
443 env
->xer
&= ~(1 << XER_OV
);
445 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
451 target_ulong
helper_cntlzw (target_ulong t
)
456 #if defined(TARGET_PPC64)
457 target_ulong
helper_cntlzd (target_ulong t
)
463 /* shift right arithmetic helper */
464 target_ulong
helper_sraw (target_ulong value
, target_ulong shift
)
468 if (likely(!(shift
& 0x20))) {
469 if (likely((uint32_t)shift
!= 0)) {
471 ret
= (int32_t)value
>> shift
;
472 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
473 env
->xer
&= ~(1 << XER_CA
);
475 env
->xer
|= (1 << XER_CA
);
478 ret
= (int32_t)value
;
479 env
->xer
&= ~(1 << XER_CA
);
482 ret
= (int32_t)value
>> 31;
484 env
->xer
|= (1 << XER_CA
);
486 env
->xer
&= ~(1 << XER_CA
);
489 return (target_long
)ret
;
492 #if defined(TARGET_PPC64)
493 target_ulong
helper_srad (target_ulong value
, target_ulong shift
)
497 if (likely(!(shift
& 0x40))) {
498 if (likely((uint64_t)shift
!= 0)) {
500 ret
= (int64_t)value
>> shift
;
501 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
502 env
->xer
&= ~(1 << XER_CA
);
504 env
->xer
|= (1 << XER_CA
);
507 ret
= (int64_t)value
;
508 env
->xer
&= ~(1 << XER_CA
);
511 ret
= (int64_t)value
>> 63;
513 env
->xer
|= (1 << XER_CA
);
515 env
->xer
&= ~(1 << XER_CA
);
522 target_ulong
helper_popcntb (target_ulong val
)
524 val
= (val
& 0x55555555) + ((val
>> 1) & 0x55555555);
525 val
= (val
& 0x33333333) + ((val
>> 2) & 0x33333333);
526 val
= (val
& 0x0f0f0f0f) + ((val
>> 4) & 0x0f0f0f0f);
530 #if defined(TARGET_PPC64)
531 target_ulong
helper_popcntb_64 (target_ulong val
)
533 val
= (val
& 0x5555555555555555ULL
) + ((val
>> 1) & 0x5555555555555555ULL
);
534 val
= (val
& 0x3333333333333333ULL
) + ((val
>> 2) & 0x3333333333333333ULL
);
535 val
= (val
& 0x0f0f0f0f0f0f0f0fULL
) + ((val
>> 4) & 0x0f0f0f0f0f0f0f0fULL
);
540 /*****************************************************************************/
541 /* Floating point operations helpers */
542 uint64_t helper_float32_to_float64(uint32_t arg
)
547 d
.d
= float32_to_float64(f
.f
, &env
->fp_status
);
551 uint32_t helper_float64_to_float32(uint64_t arg
)
556 f
.f
= float64_to_float32(d
.d
, &env
->fp_status
);
560 static always_inline
int isden (float64 d
)
566 return ((u
.ll
>> 52) & 0x7FF) == 0;
569 uint32_t helper_compute_fprf (uint64_t arg
, uint32_t set_fprf
)
575 isneg
= float64_is_neg(farg
.d
);
576 if (unlikely(float64_is_nan(farg
.d
))) {
577 if (float64_is_signaling_nan(farg
.d
)) {
578 /* Signaling NaN: flags are undefined */
584 } else if (unlikely(float64_is_infinity(farg
.d
))) {
591 if (float64_is_zero(farg
.d
)) {
599 /* Denormalized numbers */
602 /* Normalized numbers */
613 /* We update FPSCR_FPRF */
614 env
->fpscr
&= ~(0x1F << FPSCR_FPRF
);
615 env
->fpscr
|= ret
<< FPSCR_FPRF
;
617 /* We just need fpcc to update Rc1 */
621 /* Floating-point invalid operations exception */
622 static always_inline
uint64_t fload_invalid_op_excp (int op
)
629 case POWERPC_EXCP_FP_VXSNAN
:
630 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
632 case POWERPC_EXCP_FP_VXSOFT
:
633 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
635 case POWERPC_EXCP_FP_VXISI
:
636 /* Magnitude subtraction of infinities */
637 env
->fpscr
|= 1 << FPSCR_VXISI
;
639 case POWERPC_EXCP_FP_VXIDI
:
640 /* Division of infinity by infinity */
641 env
->fpscr
|= 1 << FPSCR_VXIDI
;
643 case POWERPC_EXCP_FP_VXZDZ
:
644 /* Division of zero by zero */
645 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
647 case POWERPC_EXCP_FP_VXIMZ
:
648 /* Multiplication of zero by infinity */
649 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
651 case POWERPC_EXCP_FP_VXVC
:
652 /* Ordered comparison of NaN */
653 env
->fpscr
|= 1 << FPSCR_VXVC
;
654 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
655 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
656 /* We must update the target FPR before raising the exception */
658 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
659 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
660 /* Update the floating-point enabled exception summary */
661 env
->fpscr
|= 1 << FPSCR_FEX
;
662 /* Exception is differed */
666 case POWERPC_EXCP_FP_VXSQRT
:
667 /* Square root of a negative number */
668 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
670 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
672 /* Set the result to quiet NaN */
673 ret
= 0xFFF8000000000000ULL
;
674 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
675 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
678 case POWERPC_EXCP_FP_VXCVI
:
679 /* Invalid conversion */
680 env
->fpscr
|= 1 << FPSCR_VXCVI
;
681 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
683 /* Set the result to quiet NaN */
684 ret
= 0xFFF8000000000000ULL
;
685 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
686 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
690 /* Update the floating-point invalid operation summary */
691 env
->fpscr
|= 1 << FPSCR_VX
;
692 /* Update the floating-point exception summary */
693 env
->fpscr
|= 1 << FPSCR_FX
;
695 /* Update the floating-point enabled exception summary */
696 env
->fpscr
|= 1 << FPSCR_FEX
;
697 if (msr_fe0
!= 0 || msr_fe1
!= 0)
698 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_FP
| op
);
703 static always_inline
void float_zero_divide_excp (void)
705 env
->fpscr
|= 1 << FPSCR_ZX
;
706 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
707 /* Update the floating-point exception summary */
708 env
->fpscr
|= 1 << FPSCR_FX
;
710 /* Update the floating-point enabled exception summary */
711 env
->fpscr
|= 1 << FPSCR_FEX
;
712 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
713 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
714 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
);
719 static always_inline
void float_overflow_excp (void)
721 env
->fpscr
|= 1 << FPSCR_OX
;
722 /* Update the floating-point exception summary */
723 env
->fpscr
|= 1 << FPSCR_FX
;
725 /* XXX: should adjust the result */
726 /* Update the floating-point enabled exception summary */
727 env
->fpscr
|= 1 << FPSCR_FEX
;
728 /* We must update the target FPR before raising the exception */
729 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
730 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
732 env
->fpscr
|= 1 << FPSCR_XX
;
733 env
->fpscr
|= 1 << FPSCR_FI
;
737 static always_inline
void float_underflow_excp (void)
739 env
->fpscr
|= 1 << FPSCR_UX
;
740 /* Update the floating-point exception summary */
741 env
->fpscr
|= 1 << FPSCR_FX
;
743 /* XXX: should adjust the result */
744 /* Update the floating-point enabled exception summary */
745 env
->fpscr
|= 1 << FPSCR_FEX
;
746 /* We must update the target FPR before raising the exception */
747 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
748 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
752 static always_inline
void float_inexact_excp (void)
754 env
->fpscr
|= 1 << FPSCR_XX
;
755 /* Update the floating-point exception summary */
756 env
->fpscr
|= 1 << FPSCR_FX
;
758 /* Update the floating-point enabled exception summary */
759 env
->fpscr
|= 1 << FPSCR_FEX
;
760 /* We must update the target FPR before raising the exception */
761 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
762 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
766 static always_inline
void fpscr_set_rounding_mode (void)
770 /* Set rounding mode */
773 /* Best approximation (round to nearest) */
774 rnd_type
= float_round_nearest_even
;
777 /* Smaller magnitude (round toward zero) */
778 rnd_type
= float_round_to_zero
;
781 /* Round toward +infinite */
782 rnd_type
= float_round_up
;
786 /* Round toward -infinite */
787 rnd_type
= float_round_down
;
790 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
793 void helper_fpscr_clrbit (uint32_t bit
)
797 prev
= (env
->fpscr
>> bit
) & 1;
798 env
->fpscr
&= ~(1 << bit
);
803 fpscr_set_rounding_mode();
811 void helper_fpscr_setbit (uint32_t bit
)
815 prev
= (env
->fpscr
>> bit
) & 1;
816 env
->fpscr
|= 1 << bit
;
820 env
->fpscr
|= 1 << FPSCR_FX
;
824 env
->fpscr
|= 1 << FPSCR_FX
;
829 env
->fpscr
|= 1 << FPSCR_FX
;
834 env
->fpscr
|= 1 << FPSCR_FX
;
839 env
->fpscr
|= 1 << FPSCR_FX
;
852 env
->fpscr
|= 1 << FPSCR_VX
;
853 env
->fpscr
|= 1 << FPSCR_FX
;
860 env
->error_code
= POWERPC_EXCP_FP
;
862 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
864 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
866 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
868 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
870 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
872 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
874 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
876 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
878 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
885 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
892 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
899 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
906 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
912 fpscr_set_rounding_mode();
917 /* Update the floating-point enabled exception summary */
918 env
->fpscr
|= 1 << FPSCR_FEX
;
919 /* We have to update Rc1 before raising the exception */
920 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
926 void helper_store_fpscr (uint64_t arg
, uint32_t mask
)
929 * We use only the 32 LSB of the incoming fpr
937 new |= prev
& 0x60000000;
938 for (i
= 0; i
< 8; i
++) {
939 if (mask
& (1 << i
)) {
940 env
->fpscr
&= ~(0xF << (4 * i
));
941 env
->fpscr
|= new & (0xF << (4 * i
));
944 /* Update VX and FEX */
946 env
->fpscr
|= 1 << FPSCR_VX
;
948 env
->fpscr
&= ~(1 << FPSCR_VX
);
949 if ((fpscr_ex
& fpscr_eex
) != 0) {
950 env
->fpscr
|= 1 << FPSCR_FEX
;
951 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
952 /* XXX: we should compute it properly */
953 env
->error_code
= POWERPC_EXCP_FP
;
956 env
->fpscr
&= ~(1 << FPSCR_FEX
);
957 fpscr_set_rounding_mode();
960 void helper_float_check_status (void)
962 #ifdef CONFIG_SOFTFLOAT
963 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
964 (env
->error_code
& POWERPC_EXCP_FP
)) {
965 /* Differred floating-point exception after target FPR update */
966 if (msr_fe0
!= 0 || msr_fe1
!= 0)
967 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
969 int status
= get_float_exception_flags(&env
->fp_status
);
970 if (status
& float_flag_divbyzero
) {
971 float_zero_divide_excp();
972 } else if (status
& float_flag_overflow
) {
973 float_overflow_excp();
974 } else if (status
& float_flag_underflow
) {
975 float_underflow_excp();
976 } else if (status
& float_flag_inexact
) {
977 float_inexact_excp();
981 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
982 (env
->error_code
& POWERPC_EXCP_FP
)) {
983 /* Differred floating-point exception after target FPR update */
984 if (msr_fe0
!= 0 || msr_fe1
!= 0)
985 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
990 #ifdef CONFIG_SOFTFLOAT
991 void helper_reset_fpstatus (void)
993 set_float_exception_flags(0, &env
->fp_status
);
998 uint64_t helper_fadd (uint64_t arg1
, uint64_t arg2
)
1000 CPU_DoubleU farg1
, farg2
;
1004 #if USE_PRECISE_EMULATION
1005 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1006 float64_is_signaling_nan(farg2
.d
))) {
1008 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1009 } else if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
1010 float64_is_neg(farg1
.d
) != float64_is_neg(farg2
.d
))) {
1011 /* Magnitude subtraction of infinities */
1012 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1014 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
1017 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
1023 uint64_t helper_fsub (uint64_t arg1
, uint64_t arg2
)
1025 CPU_DoubleU farg1
, farg2
;
1029 #if USE_PRECISE_EMULATION
1031 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1032 float64_is_signaling_nan(farg2
.d
))) {
1033 /* sNaN subtraction */
1034 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1035 } else if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
1036 float64_is_neg(farg1
.d
) == float64_is_neg(farg2
.d
))) {
1037 /* Magnitude subtraction of infinities */
1038 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1040 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
1044 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
1050 uint64_t helper_fmul (uint64_t arg1
, uint64_t arg2
)
1052 CPU_DoubleU farg1
, farg2
;
1056 #if USE_PRECISE_EMULATION
1057 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1058 float64_is_signaling_nan(farg2
.d
))) {
1059 /* sNaN multiplication */
1060 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1061 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1062 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1063 /* Multiplication of zero by infinity */
1064 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1066 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1069 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1075 uint64_t helper_fdiv (uint64_t arg1
, uint64_t arg2
)
1077 CPU_DoubleU farg1
, farg2
;
1081 #if USE_PRECISE_EMULATION
1082 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1083 float64_is_signaling_nan(farg2
.d
))) {
1085 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1086 } else if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
))) {
1087 /* Division of infinity by infinity */
1088 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI
);
1089 } else if (unlikely(float64_is_zero(farg1
.d
) && float64_is_zero(farg2
.d
))) {
1090 /* Division of zero by zero */
1091 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ
);
1093 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
1096 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
1102 uint64_t helper_fabs (uint64_t arg
)
1107 farg
.d
= float64_abs(farg
.d
);
1112 uint64_t helper_fnabs (uint64_t arg
)
1117 farg
.d
= float64_abs(farg
.d
);
1118 farg
.d
= float64_chs(farg
.d
);
1123 uint64_t helper_fneg (uint64_t arg
)
1128 farg
.d
= float64_chs(farg
.d
);
1132 /* fctiw - fctiw. */
1133 uint64_t helper_fctiw (uint64_t arg
)
1138 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1139 /* sNaN conversion */
1140 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1141 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1142 /* qNan / infinity conversion */
1143 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1145 farg
.ll
= float64_to_int32(farg
.d
, &env
->fp_status
);
1146 #if USE_PRECISE_EMULATION
1147 /* XXX: higher bits are not supposed to be significant.
1148 * to make tests easier, return the same as a real PowerPC 750
1150 farg
.ll
|= 0xFFF80000ULL
<< 32;
1156 /* fctiwz - fctiwz. */
1157 uint64_t helper_fctiwz (uint64_t arg
)
1162 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1163 /* sNaN conversion */
1164 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1165 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1166 /* qNan / infinity conversion */
1167 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1169 farg
.ll
= float64_to_int32_round_to_zero(farg
.d
, &env
->fp_status
);
1170 #if USE_PRECISE_EMULATION
1171 /* XXX: higher bits are not supposed to be significant.
1172 * to make tests easier, return the same as a real PowerPC 750
1174 farg
.ll
|= 0xFFF80000ULL
<< 32;
1180 #if defined(TARGET_PPC64)
1181 /* fcfid - fcfid. */
1182 uint64_t helper_fcfid (uint64_t arg
)
1185 farg
.d
= int64_to_float64(arg
, &env
->fp_status
);
1189 /* fctid - fctid. */
1190 uint64_t helper_fctid (uint64_t arg
)
1195 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1196 /* sNaN conversion */
1197 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1198 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1199 /* qNan / infinity conversion */
1200 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1202 farg
.ll
= float64_to_int64(farg
.d
, &env
->fp_status
);
1207 /* fctidz - fctidz. */
1208 uint64_t helper_fctidz (uint64_t arg
)
1213 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1214 /* sNaN conversion */
1215 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1216 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1217 /* qNan / infinity conversion */
1218 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1220 farg
.ll
= float64_to_int64_round_to_zero(farg
.d
, &env
->fp_status
);
1227 static always_inline
uint64_t do_fri (uint64_t arg
, int rounding_mode
)
1232 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1234 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1235 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1236 /* qNan / infinity round */
1237 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1239 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
1240 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
1241 /* Restore rounding mode from FPSCR */
1242 fpscr_set_rounding_mode();
1247 uint64_t helper_frin (uint64_t arg
)
1249 return do_fri(arg
, float_round_nearest_even
);
1252 uint64_t helper_friz (uint64_t arg
)
1254 return do_fri(arg
, float_round_to_zero
);
1257 uint64_t helper_frip (uint64_t arg
)
1259 return do_fri(arg
, float_round_up
);
1262 uint64_t helper_frim (uint64_t arg
)
1264 return do_fri(arg
, float_round_down
);
1267 /* fmadd - fmadd. */
1268 uint64_t helper_fmadd (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1270 CPU_DoubleU farg1
, farg2
, farg3
;
1275 #if USE_PRECISE_EMULATION
1276 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1277 float64_is_signaling_nan(farg2
.d
) ||
1278 float64_is_signaling_nan(farg3
.d
))) {
1279 /* sNaN operation */
1280 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1281 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1282 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1283 /* Multiplication of zero by infinity */
1284 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1287 /* This is the way the PowerPC specification defines it */
1288 float128 ft0_128
, ft1_128
;
1290 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1291 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1292 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1293 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1294 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
1295 /* Magnitude subtraction of infinities */
1296 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1298 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1299 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
1300 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1303 /* This is OK on x86 hosts */
1304 farg1
.d
= (farg1
.d
* farg2
.d
) + farg3
.d
;
1308 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1309 farg1
.d
= float64_add(farg1
.d
, farg3
.d
, &env
->fp_status
);
1314 /* fmsub - fmsub. */
1315 uint64_t helper_fmsub (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1317 CPU_DoubleU farg1
, farg2
, farg3
;
1322 #if USE_PRECISE_EMULATION
1323 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1324 float64_is_signaling_nan(farg2
.d
) ||
1325 float64_is_signaling_nan(farg3
.d
))) {
1326 /* sNaN operation */
1327 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1328 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1329 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1330 /* Multiplication of zero by infinity */
1331 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1334 /* This is the way the PowerPC specification defines it */
1335 float128 ft0_128
, ft1_128
;
1337 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1338 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1339 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1340 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1341 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
1342 /* Magnitude subtraction of infinities */
1343 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1345 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1346 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
1347 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1350 /* This is OK on x86 hosts */
1351 farg1
.d
= (farg1
.d
* farg2
.d
) - farg3
.d
;
1355 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1356 farg1
.d
= float64_sub(farg1
.d
, farg3
.d
, &env
->fp_status
);
1361 /* fnmadd - fnmadd. */
1362 uint64_t helper_fnmadd (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1364 CPU_DoubleU farg1
, farg2
, farg3
;
1370 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1371 float64_is_signaling_nan(farg2
.d
) ||
1372 float64_is_signaling_nan(farg3
.d
))) {
1373 /* sNaN operation */
1374 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1375 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1376 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1377 /* Multiplication of zero by infinity */
1378 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1380 #if USE_PRECISE_EMULATION
1382 /* This is the way the PowerPC specification defines it */
1383 float128 ft0_128
, ft1_128
;
1385 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1386 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1387 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1388 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1389 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
1390 /* Magnitude subtraction of infinities */
1391 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1393 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1394 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
1395 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1398 /* This is OK on x86 hosts */
1399 farg1
.d
= (farg1
.d
* farg2
.d
) + farg3
.d
;
1402 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1403 farg1
.d
= float64_add(farg1
.d
, farg3
.d
, &env
->fp_status
);
1405 if (likely(!float64_is_nan(farg1
.d
)))
1406 farg1
.d
= float64_chs(farg1
.d
);
1411 /* fnmsub - fnmsub. */
1412 uint64_t helper_fnmsub (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1414 CPU_DoubleU farg1
, farg2
, farg3
;
1420 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1421 float64_is_signaling_nan(farg2
.d
) ||
1422 float64_is_signaling_nan(farg3
.d
))) {
1423 /* sNaN operation */
1424 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1425 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1426 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1427 /* Multiplication of zero by infinity */
1428 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1430 #if USE_PRECISE_EMULATION
1432 /* This is the way the PowerPC specification defines it */
1433 float128 ft0_128
, ft1_128
;
1435 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1436 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1437 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1438 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1439 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
1440 /* Magnitude subtraction of infinities */
1441 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1443 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1444 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
1445 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1448 /* This is OK on x86 hosts */
1449 farg1
.d
= (farg1
.d
* farg2
.d
) - farg3
.d
;
1452 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1453 farg1
.d
= float64_sub(farg1
.d
, farg3
.d
, &env
->fp_status
);
1455 if (likely(!float64_is_nan(farg1
.d
)))
1456 farg1
.d
= float64_chs(farg1
.d
);
1462 uint64_t helper_frsp (uint64_t arg
)
1468 #if USE_PRECISE_EMULATION
1469 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1470 /* sNaN square root */
1471 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1473 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1474 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1477 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1478 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1483 /* fsqrt - fsqrt. */
1484 uint64_t helper_fsqrt (uint64_t arg
)
1489 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1490 /* sNaN square root */
1491 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1492 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
1493 /* Square root of a negative nonzero number */
1494 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT
);
1496 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1502 uint64_t helper_fre (uint64_t arg
)
1507 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1508 /* sNaN reciprocal */
1509 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1511 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1517 uint64_t helper_fres (uint64_t arg
)
1523 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1524 /* sNaN reciprocal */
1525 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1527 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1528 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1529 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1534 /* frsqrte - frsqrte. */
1535 uint64_t helper_frsqrte (uint64_t arg
)
1541 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1542 /* sNaN reciprocal square root */
1543 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1544 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
1545 /* Reciprocal square root of a negative nonzero number */
1546 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT
);
1548 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1549 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1550 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1551 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1557 uint64_t helper_fsel (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1563 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) && !float64_is_nan(farg1
.d
))
1569 void helper_fcmpu (uint64_t arg1
, uint64_t arg2
, uint32_t crfD
)
1571 CPU_DoubleU farg1
, farg2
;
1576 if (unlikely(float64_is_nan(farg1
.d
) ||
1577 float64_is_nan(farg2
.d
))) {
1579 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1581 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1587 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1588 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1589 env
->crf
[crfD
] = ret
;
1590 if (unlikely(ret
== 0x01UL
1591 && (float64_is_signaling_nan(farg1
.d
) ||
1592 float64_is_signaling_nan(farg2
.d
)))) {
1593 /* sNaN comparison */
1594 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1598 void helper_fcmpo (uint64_t arg1
, uint64_t arg2
, uint32_t crfD
)
1600 CPU_DoubleU farg1
, farg2
;
1605 if (unlikely(float64_is_nan(farg1
.d
) ||
1606 float64_is_nan(farg2
.d
))) {
1608 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1610 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1616 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1617 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1618 env
->crf
[crfD
] = ret
;
1619 if (unlikely (ret
== 0x01UL
)) {
1620 if (float64_is_signaling_nan(farg1
.d
) ||
1621 float64_is_signaling_nan(farg2
.d
)) {
1622 /* sNaN comparison */
1623 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
|
1624 POWERPC_EXCP_FP_VXVC
);
1626 /* qNaN comparison */
1627 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC
);
1632 #if !defined (CONFIG_USER_ONLY)
1633 void helper_store_msr (target_ulong val
)
1635 val
= hreg_store_msr(env
, val
, 0);
1637 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
1638 helper_raise_exception(val
);
1642 static always_inline
void do_rfi (target_ulong nip
, target_ulong msr
,
1643 target_ulong msrm
, int keep_msrh
)
1645 #if defined(TARGET_PPC64)
1646 if (msr
& (1ULL << MSR_SF
)) {
1647 nip
= (uint64_t)nip
;
1648 msr
&= (uint64_t)msrm
;
1650 nip
= (uint32_t)nip
;
1651 msr
= (uint32_t)(msr
& msrm
);
1653 msr
|= env
->msr
& ~((uint64_t)0xFFFFFFFF);
1656 nip
= (uint32_t)nip
;
1657 msr
&= (uint32_t)msrm
;
1659 /* XXX: beware: this is false if VLE is supported */
1660 env
->nip
= nip
& ~((target_ulong
)0x00000003);
1661 hreg_store_msr(env
, msr
, 1);
1662 #if defined (DEBUG_OP)
1663 cpu_dump_rfi(env
->nip
, env
->msr
);
1665 /* No need to raise an exception here,
1666 * as rfi is always the last insn of a TB
1668 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
1671 void helper_rfi (void)
1673 do_rfi(env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
],
1674 ~((target_ulong
)0xFFFF0000), 1);
1677 #if defined(TARGET_PPC64)
1678 void helper_rfid (void)
1680 do_rfi(env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
],
1681 ~((target_ulong
)0xFFFF0000), 0);
1684 void helper_hrfid (void)
1686 do_rfi(env
->spr
[SPR_HSRR0
], env
->spr
[SPR_HSRR1
],
1687 ~((target_ulong
)0xFFFF0000), 0);
1692 void helper_tw (target_ulong arg1
, target_ulong arg2
, uint32_t flags
)
1694 if (!likely(!(((int32_t)arg1
< (int32_t)arg2
&& (flags
& 0x10)) ||
1695 ((int32_t)arg1
> (int32_t)arg2
&& (flags
& 0x08)) ||
1696 ((int32_t)arg1
== (int32_t)arg2
&& (flags
& 0x04)) ||
1697 ((uint32_t)arg1
< (uint32_t)arg2
&& (flags
& 0x02)) ||
1698 ((uint32_t)arg1
> (uint32_t)arg2
&& (flags
& 0x01))))) {
1699 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_TRAP
);
1703 #if defined(TARGET_PPC64)
1704 void helper_td (target_ulong arg1
, target_ulong arg2
, uint32_t flags
)
1706 if (!likely(!(((int64_t)arg1
< (int64_t)arg2
&& (flags
& 0x10)) ||
1707 ((int64_t)arg1
> (int64_t)arg2
&& (flags
& 0x08)) ||
1708 ((int64_t)arg1
== (int64_t)arg2
&& (flags
& 0x04)) ||
1709 ((uint64_t)arg1
< (uint64_t)arg2
&& (flags
& 0x02)) ||
1710 ((uint64_t)arg1
> (uint64_t)arg2
&& (flags
& 0x01)))))
1711 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_TRAP
);
1715 /*****************************************************************************/
1716 /* PowerPC 601 specific instructions (POWER bridge) */
1718 target_ulong
helper_clcs (uint32_t arg
)
1722 /* Instruction cache line size */
1723 return env
->icache_line_size
;
1726 /* Data cache line size */
1727 return env
->dcache_line_size
;
1730 /* Minimum cache line size */
1731 return (env
->icache_line_size
< env
->dcache_line_size
) ?
1732 env
->icache_line_size
: env
->dcache_line_size
;
1735 /* Maximum cache line size */
1736 return (env
->icache_line_size
> env
->dcache_line_size
) ?
1737 env
->icache_line_size
: env
->dcache_line_size
;
1746 target_ulong
helper_div (target_ulong arg1
, target_ulong arg2
)
1748 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
1750 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1751 (int32_t)arg2
== 0) {
1752 env
->spr
[SPR_MQ
] = 0;
1755 env
->spr
[SPR_MQ
] = tmp
% arg2
;
1756 return tmp
/ (int32_t)arg2
;
1760 target_ulong
helper_divo (target_ulong arg1
, target_ulong arg2
)
1762 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
1764 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1765 (int32_t)arg2
== 0) {
1766 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1767 env
->spr
[SPR_MQ
] = 0;
1770 env
->spr
[SPR_MQ
] = tmp
% arg2
;
1771 tmp
/= (int32_t)arg2
;
1772 if ((int32_t)tmp
!= tmp
) {
1773 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1775 env
->xer
&= ~(1 << XER_OV
);
1781 target_ulong
helper_divs (target_ulong arg1
, target_ulong arg2
)
1783 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1784 (int32_t)arg2
== 0) {
1785 env
->spr
[SPR_MQ
] = 0;
1788 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
1789 return (int32_t)arg1
/ (int32_t)arg2
;
1793 target_ulong
helper_divso (target_ulong arg1
, target_ulong arg2
)
1795 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1796 (int32_t)arg2
== 0) {
1797 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1798 env
->spr
[SPR_MQ
] = 0;
1801 env
->xer
&= ~(1 << XER_OV
);
1802 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
1803 return (int32_t)arg1
/ (int32_t)arg2
;
1807 #if !defined (CONFIG_USER_ONLY)
1808 target_ulong
helper_rac (target_ulong addr
)
1812 target_ulong ret
= 0;
1814 /* We don't have to generate many instances of this instruction,
1815 * as rac is supervisor only.
1817 /* XXX: FIX THIS: Pretend we have no BAT */
1818 nb_BATs
= env
->nb_BATs
;
1820 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0)
1822 env
->nb_BATs
= nb_BATs
;
1826 void helper_rfsvc (void)
1828 do_rfi(env
->lr
, env
->ctr
, 0x0000FFFF, 0);
1832 /*****************************************************************************/
1833 /* 602 specific instructions */
1834 /* mfrom is the most crazy instruction ever seen, imho ! */
1835 /* Real implementation uses a ROM table. Do the same */
1836 /* Extremly decomposed:
1838 * return 256 * log10(10 + 1.0) + 0.5
1840 #if !defined (CONFIG_USER_ONLY)
1841 target_ulong
helper_602_mfrom (target_ulong arg
)
1843 if (likely(arg
< 602)) {
1844 #include "mfrom_table.c"
1845 return mfrom_ROM_table
[arg
];
1852 /*****************************************************************************/
1853 /* Embedded PowerPC specific helpers */
1855 /* XXX: to be improved to check access rights when in user-mode */
1856 target_ulong
helper_load_dcr (target_ulong dcrn
)
1858 target_ulong val
= 0;
1860 if (unlikely(env
->dcr_env
== NULL
)) {
1861 qemu_log("No DCR environment\n");
1862 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1863 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1864 } else if (unlikely(ppc_dcr_read(env
->dcr_env
, dcrn
, &val
) != 0)) {
1865 qemu_log("DCR read error %d %03x\n", (int)dcrn
, (int)dcrn
);
1866 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1867 POWERPC_EXCP_INVAL
| POWERPC_EXCP_PRIV_REG
);
1872 void helper_store_dcr (target_ulong dcrn
, target_ulong val
)
1874 if (unlikely(env
->dcr_env
== NULL
)) {
1875 qemu_log("No DCR environment\n");
1876 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1877 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1878 } else if (unlikely(ppc_dcr_write(env
->dcr_env
, dcrn
, val
) != 0)) {
1879 qemu_log("DCR write error %d %03x\n", (int)dcrn
, (int)dcrn
);
1880 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1881 POWERPC_EXCP_INVAL
| POWERPC_EXCP_PRIV_REG
);
1885 #if !defined(CONFIG_USER_ONLY)
1886 void helper_40x_rfci (void)
1888 do_rfi(env
->spr
[SPR_40x_SRR2
], env
->spr
[SPR_40x_SRR3
],
1889 ~((target_ulong
)0xFFFF0000), 0);
1892 void helper_rfci (void)
1894 do_rfi(env
->spr
[SPR_BOOKE_CSRR0
], SPR_BOOKE_CSRR1
,
1895 ~((target_ulong
)0x3FFF0000), 0);
1898 void helper_rfdi (void)
1900 do_rfi(env
->spr
[SPR_BOOKE_DSRR0
], SPR_BOOKE_DSRR1
,
1901 ~((target_ulong
)0x3FFF0000), 0);
1904 void helper_rfmci (void)
1906 do_rfi(env
->spr
[SPR_BOOKE_MCSRR0
], SPR_BOOKE_MCSRR1
,
1907 ~((target_ulong
)0x3FFF0000), 0);
1912 target_ulong
helper_dlmzb (target_ulong high
, target_ulong low
, uint32_t update_Rc
)
1918 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1919 if ((high
& mask
) == 0) {
1927 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1928 if ((low
& mask
) == 0) {
1940 env
->xer
= (env
->xer
& ~0x7F) | i
;
1942 env
->crf
[0] |= xer_so
;
1947 /*****************************************************************************/
1948 /* Altivec extension helpers */
1949 #if defined(WORDS_BIGENDIAN)
1957 #if defined(WORDS_BIGENDIAN)
1958 #define VECTOR_FOR_INORDER_I(index, element) \
1959 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1961 #define VECTOR_FOR_INORDER_I(index, element) \
1962 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1965 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1966 * execute the following block. */
1967 #define DO_HANDLE_NAN(result, x) \
1968 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1971 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1975 #define HANDLE_NAN1(result, x) \
1976 DO_HANDLE_NAN(result, x)
1977 #define HANDLE_NAN2(result, x, y) \
1978 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1979 #define HANDLE_NAN3(result, x, y, z) \
1980 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1982 /* Saturating arithmetic helpers. */
1983 #define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1984 static always_inline to_type cvt##from##to (from_type x, int *sat) \
1987 if (use_min && x < min) { \
1990 } else if (use_max && x > max) { \
1998 SATCVT(sh
, sb
, int16_t, int8_t, INT8_MIN
, INT8_MAX
, 1, 1)
1999 SATCVT(sw
, sh
, int32_t, int16_t, INT16_MIN
, INT16_MAX
, 1, 1)
2000 SATCVT(sd
, sw
, int64_t, int32_t, INT32_MIN
, INT32_MAX
, 1, 1)
2001 SATCVT(uh
, ub
, uint16_t, uint8_t, 0, UINT8_MAX
, 0, 1)
2002 SATCVT(uw
, uh
, uint32_t, uint16_t, 0, UINT16_MAX
, 0, 1)
2003 SATCVT(ud
, uw
, uint64_t, uint32_t, 0, UINT32_MAX
, 0, 1)
2004 SATCVT(sh
, ub
, int16_t, uint8_t, 0, UINT8_MAX
, 1, 1)
2005 SATCVT(sw
, uh
, int32_t, uint16_t, 0, UINT16_MAX
, 1, 1)
2006 SATCVT(sd
, uw
, int64_t, uint32_t, 0, UINT32_MAX
, 1, 1)
2009 #define LVE(name, access, swap, element) \
2010 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2012 size_t n_elems = ARRAY_SIZE(r->element); \
2013 int adjust = HI_IDX*(n_elems-1); \
2014 int sh = sizeof(r->element[0]) >> 1; \
2015 int index = (addr & 0xf) >> sh; \
2017 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2019 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2023 LVE(lvebx
, ldub
, I
, u8
)
2024 LVE(lvehx
, lduw
, bswap16
, u16
)
2025 LVE(lvewx
, ldl
, bswap32
, u32
)
2029 void helper_lvsl (ppc_avr_t
*r
, target_ulong sh
)
2031 int i
, j
= (sh
& 0xf);
2033 VECTOR_FOR_INORDER_I (i
, u8
) {
2038 void helper_lvsr (ppc_avr_t
*r
, target_ulong sh
)
2040 int i
, j
= 0x10 - (sh
& 0xf);
2042 VECTOR_FOR_INORDER_I (i
, u8
) {
2047 #define STVE(name, access, swap, element) \
2048 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2050 size_t n_elems = ARRAY_SIZE(r->element); \
2051 int adjust = HI_IDX*(n_elems-1); \
2052 int sh = sizeof(r->element[0]) >> 1; \
2053 int index = (addr & 0xf) >> sh; \
2055 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2057 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2061 STVE(stvebx
, stb
, I
, u8
)
2062 STVE(stvehx
, stw
, bswap16
, u16
)
2063 STVE(stvewx
, stl
, bswap32
, u32
)
2067 void helper_mtvscr (ppc_avr_t
*r
)
2069 #if defined(WORDS_BIGENDIAN)
2070 env
->vscr
= r
->u32
[3];
2072 env
->vscr
= r
->u32
[0];
2074 set_flush_to_zero(vscr_nj
, &env
->vec_status
);
2077 void helper_vaddcuw (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2080 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2081 r
->u32
[i
] = ~a
->u32
[i
] < b
->u32
[i
];
2085 #define VARITH_DO(name, op, element) \
2086 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2089 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2090 r->element[i] = a->element[i] op b->element[i]; \
2093 #define VARITH(suffix, element) \
2094 VARITH_DO(add##suffix, +, element) \
2095 VARITH_DO(sub##suffix, -, element)
2102 #define VARITHSAT_CASE(type, op, cvt, element) \
2104 type result = (type)a->element[i] op (type)b->element[i]; \
2105 r->element[i] = cvt(result, &sat); \
2108 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2109 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2113 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2114 switch (sizeof(r->element[0])) { \
2115 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2116 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2117 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2121 env->vscr |= (1 << VSCR_SAT); \
2124 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2125 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2126 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2127 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2128 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2129 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2130 VARITHSAT_SIGNED(b
, s8
, int16_t, cvtshsb
)
2131 VARITHSAT_SIGNED(h
, s16
, int32_t, cvtswsh
)
2132 VARITHSAT_SIGNED(w
, s32
, int64_t, cvtsdsw
)
2133 VARITHSAT_UNSIGNED(b
, u8
, uint16_t, cvtshub
)
2134 VARITHSAT_UNSIGNED(h
, u16
, uint32_t, cvtswuh
)
2135 VARITHSAT_UNSIGNED(w
, u32
, uint64_t, cvtsduw
)
2136 #undef VARITHSAT_CASE
2138 #undef VARITHSAT_SIGNED
2139 #undef VARITHSAT_UNSIGNED
2141 #define VAVG_DO(name, element, etype) \
2142 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2145 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2146 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2147 r->element[i] = x >> 1; \
2151 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2152 VAVG_DO(avgs##type, signed_element, signed_type) \
2153 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2154 VAVG(b
, s8
, int16_t, u8
, uint16_t)
2155 VAVG(h
, s16
, int32_t, u16
, uint32_t)
2156 VAVG(w
, s32
, int64_t, u32
, uint64_t)
2160 #define VCF(suffix, cvt, element) \
2161 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2164 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2165 float32 t = cvt(b->element[i], &env->vec_status); \
2166 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2169 VCF(ux
, uint32_to_float32
, u32
)
2170 VCF(sx
, int32_to_float32
, s32
)
2173 #define VCMP_DO(suffix, compare, element, record) \
2174 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2176 uint32_t ones = (uint32_t)-1; \
2177 uint32_t all = ones; \
2178 uint32_t none = 0; \
2180 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2181 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2182 switch (sizeof (a->element[0])) { \
2183 case 4: r->u32[i] = result; break; \
2184 case 2: r->u16[i] = result; break; \
2185 case 1: r->u8[i] = result; break; \
2191 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2194 #define VCMP(suffix, compare, element) \
2195 VCMP_DO(suffix, compare, element, 0) \
2196 VCMP_DO(suffix##_dot, compare, element, 1)
2209 void helper_vmhaddshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2214 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2215 int32_t prod
= a
->s16
[i
] * b
->s16
[i
];
2216 int32_t t
= (int32_t)c
->s16
[i
] + (prod
>> 15);
2217 r
->s16
[i
] = cvtswsh (t
, &sat
);
2221 env
->vscr
|= (1 << VSCR_SAT
);
2225 void helper_vmhraddshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2230 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2231 int32_t prod
= a
->s16
[i
] * b
->s16
[i
] + 0x00004000;
2232 int32_t t
= (int32_t)c
->s16
[i
] + (prod
>> 15);
2233 r
->s16
[i
] = cvtswsh (t
, &sat
);
2237 env
->vscr
|= (1 << VSCR_SAT
);
2241 #define VMINMAX_DO(name, compare, element) \
2242 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2245 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2246 if (a->element[i] compare b->element[i]) { \
2247 r->element[i] = b->element[i]; \
2249 r->element[i] = a->element[i]; \
2253 #define VMINMAX(suffix, element) \
2254 VMINMAX_DO(min##suffix, >, element) \
2255 VMINMAX_DO(max##suffix, <, element)
2265 void helper_vmladduhm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2268 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2269 int32_t prod
= a
->s16
[i
] * b
->s16
[i
];
2270 r
->s16
[i
] = (int16_t) (prod
+ c
->s16
[i
]);
2274 #define VMRG_DO(name, element, highp) \
2275 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2279 size_t n_elems = ARRAY_SIZE(r->element); \
2280 for (i = 0; i < n_elems/2; i++) { \
2282 result.element[i*2+HI_IDX] = a->element[i]; \
2283 result.element[i*2+LO_IDX] = b->element[i]; \
2285 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2286 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2291 #if defined(WORDS_BIGENDIAN)
2298 #define VMRG(suffix, element) \
2299 VMRG_DO(mrgl##suffix, element, MRGHI) \
2300 VMRG_DO(mrgh##suffix, element, MRGLO)
2309 void helper_vmsummbm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2314 for (i
= 0; i
< ARRAY_SIZE(r
->s8
); i
++) {
2315 prod
[i
] = (int32_t)a
->s8
[i
] * b
->u8
[i
];
2318 VECTOR_FOR_INORDER_I(i
, s32
) {
2319 r
->s32
[i
] = c
->s32
[i
] + prod
[4*i
] + prod
[4*i
+1] + prod
[4*i
+2] + prod
[4*i
+3];
2323 void helper_vmsumshm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2328 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2329 prod
[i
] = a
->s16
[i
] * b
->s16
[i
];
2332 VECTOR_FOR_INORDER_I(i
, s32
) {
2333 r
->s32
[i
] = c
->s32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2337 void helper_vmsumshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2343 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2344 prod
[i
] = (int32_t)a
->s16
[i
] * b
->s16
[i
];
2347 VECTOR_FOR_INORDER_I (i
, s32
) {
2348 int64_t t
= (int64_t)c
->s32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2349 r
->u32
[i
] = cvtsdsw(t
, &sat
);
2353 env
->vscr
|= (1 << VSCR_SAT
);
2357 void helper_vmsumubm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2362 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2363 prod
[i
] = a
->u8
[i
] * b
->u8
[i
];
2366 VECTOR_FOR_INORDER_I(i
, u32
) {
2367 r
->u32
[i
] = c
->u32
[i
] + prod
[4*i
] + prod
[4*i
+1] + prod
[4*i
+2] + prod
[4*i
+3];
2371 void helper_vmsumuhm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2376 for (i
= 0; i
< ARRAY_SIZE(r
->u16
); i
++) {
2377 prod
[i
] = a
->u16
[i
] * b
->u16
[i
];
2380 VECTOR_FOR_INORDER_I(i
, u32
) {
2381 r
->u32
[i
] = c
->u32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2385 void helper_vmsumuhs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2391 for (i
= 0; i
< ARRAY_SIZE(r
->u16
); i
++) {
2392 prod
[i
] = a
->u16
[i
] * b
->u16
[i
];
2395 VECTOR_FOR_INORDER_I (i
, s32
) {
2396 uint64_t t
= (uint64_t)c
->u32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2397 r
->u32
[i
] = cvtuduw(t
, &sat
);
2401 env
->vscr
|= (1 << VSCR_SAT
);
2405 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2406 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2409 VECTOR_FOR_INORDER_I(i, prod_element) { \
2411 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2413 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2417 #define VMUL(suffix, mul_element, prod_element) \
2418 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2419 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2427 void helper_vperm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2431 VECTOR_FOR_INORDER_I (i
, u8
) {
2432 int s
= c
->u8
[i
] & 0x1f;
2433 #if defined(WORDS_BIGENDIAN)
2434 int index
= s
& 0xf;
2436 int index
= 15 - (s
& 0xf);
2439 result
.u8
[i
] = b
->u8
[index
];
2441 result
.u8
[i
] = a
->u8
[index
];
2447 #if defined(WORDS_BIGENDIAN)
2452 void helper_vpkpx (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2456 #if defined(WORDS_BIGENDIAN)
2457 const ppc_avr_t
*x
[2] = { a
, b
};
2459 const ppc_avr_t
*x
[2] = { b
, a
};
2462 VECTOR_FOR_INORDER_I (i
, u64
) {
2463 VECTOR_FOR_INORDER_I (j
, u32
){
2464 uint32_t e
= x
[i
]->u32
[j
];
2465 result
.u16
[4*i
+j
] = (((e
>> 9) & 0xfc00) |
2466 ((e
>> 6) & 0x3e0) |
2473 #define VPK(suffix, from, to, cvt, dosat) \
2474 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2479 ppc_avr_t *a0 = PKBIG ? a : b; \
2480 ppc_avr_t *a1 = PKBIG ? b : a; \
2481 VECTOR_FOR_INORDER_I (i, from) { \
2482 result.to[i] = cvt(a0->from[i], &sat); \
2483 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2486 if (dosat && sat) { \
2487 env->vscr |= (1 << VSCR_SAT); \
2491 VPK(shss
, s16
, s8
, cvtshsb
, 1)
2492 VPK(shus
, s16
, u8
, cvtshub
, 1)
2493 VPK(swss
, s32
, s16
, cvtswsh
, 1)
2494 VPK(swus
, s32
, u16
, cvtswuh
, 1)
2495 VPK(uhus
, u16
, u8
, cvtuhub
, 1)
2496 VPK(uwus
, u32
, u16
, cvtuwuh
, 1)
2497 VPK(uhum
, u16
, u8
, I
, 0)
2498 VPK(uwum
, u32
, u16
, I
, 0)
2503 #define VRFI(suffix, rounding) \
2504 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2507 float_status s = env->vec_status; \
2508 set_float_rounding_mode(rounding, &s); \
2509 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2510 HANDLE_NAN1(r->f[i], b->f[i]) { \
2511 r->f[i] = float32_round_to_int (b->f[i], &s); \
2515 VRFI(n
, float_round_nearest_even
)
2516 VRFI(m
, float_round_down
)
2517 VRFI(p
, float_round_up
)
2518 VRFI(z
, float_round_to_zero
)
2521 #define VROTATE(suffix, element) \
2522 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2525 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2526 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2527 unsigned int shift = b->element[i] & mask; \
2528 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2536 void helper_vsel (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2538 r
->u64
[0] = (a
->u64
[0] & ~c
->u64
[0]) | (b
->u64
[0] & c
->u64
[0]);
2539 r
->u64
[1] = (a
->u64
[1] & ~c
->u64
[1]) | (b
->u64
[1] & c
->u64
[1]);
2542 void helper_vlogefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2545 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2546 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2547 r
->f
[i
] = float32_log2(b
->f
[i
], &env
->vec_status
);
2552 #if defined(WORDS_BIGENDIAN)
2559 /* The specification says that the results are undefined if all of the
2560 * shift counts are not identical. We check to make sure that they are
2561 * to conform to what real hardware appears to do. */
2562 #define VSHIFT(suffix, leftp) \
2563 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2565 int shift = b->u8[LO_IDX*0x15] & 0x7; \
2568 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2569 doit = doit && ((b->u8[i] & 0x7) == shift); \
2574 } else if (leftp) { \
2575 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2576 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2577 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2579 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2580 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2581 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2591 #define VSL(suffix, element) \
2592 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2595 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2596 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2597 unsigned int shift = b->element[i] & mask; \
2598 r->element[i] = a->element[i] << shift; \
2606 void helper_vsldoi (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, uint32_t shift
)
2608 int sh
= shift
& 0xf;
2612 #if defined(WORDS_BIGENDIAN)
2613 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2616 result
.u8
[i
] = b
->u8
[index
-0x10];
2618 result
.u8
[i
] = a
->u8
[index
];
2622 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2623 int index
= (16 - sh
) + i
;
2625 result
.u8
[i
] = a
->u8
[index
-0x10];
2627 result
.u8
[i
] = b
->u8
[index
];
2634 void helper_vslo (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2636 int sh
= (b
->u8
[LO_IDX
*0xf] >> 3) & 0xf;
2638 #if defined (WORDS_BIGENDIAN)
2639 memmove (&r
->u8
[0], &a
->u8
[sh
], 16-sh
);
2640 memset (&r
->u8
[16-sh
], 0, sh
);
2642 memmove (&r
->u8
[sh
], &a
->u8
[0], 16-sh
);
2643 memset (&r
->u8
[0], 0, sh
);
2647 /* Experimental testing shows that hardware masks the immediate. */
2648 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2649 #if defined(WORDS_BIGENDIAN)
2650 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2652 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2654 #define VSPLT(suffix, element) \
2655 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2657 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2659 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2660 r->element[i] = s; \
2667 #undef SPLAT_ELEMENT
2668 #undef _SPLAT_MASKED
2670 #define VSPLTI(suffix, element, splat_type) \
2671 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2673 splat_type x = (int8_t)(splat << 3) >> 3; \
2675 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2676 r->element[i] = x; \
2679 VSPLTI(b
, s8
, int8_t)
2680 VSPLTI(h
, s16
, int16_t)
2681 VSPLTI(w
, s32
, int32_t)
2684 #define VSR(suffix, element) \
2685 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2688 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2689 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2690 unsigned int shift = b->element[i] & mask; \
2691 r->element[i] = a->element[i] >> shift; \
2702 void helper_vsro (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2704 int sh
= (b
->u8
[LO_IDX
*0xf] >> 3) & 0xf;
2706 #if defined (WORDS_BIGENDIAN)
2707 memmove (&r
->u8
[sh
], &a
->u8
[0], 16-sh
);
2708 memset (&r
->u8
[0], 0, sh
);
2710 memmove (&r
->u8
[0], &a
->u8
[sh
], 16-sh
);
2711 memset (&r
->u8
[16-sh
], 0, sh
);
2715 void helper_vsubcuw (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2718 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2719 r
->u32
[i
] = a
->u32
[i
] >= b
->u32
[i
];
2723 void helper_vsumsws (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2730 #if defined(WORDS_BIGENDIAN)
2731 upper
= ARRAY_SIZE(r
->s32
)-1;
2735 t
= (int64_t)b
->s32
[upper
];
2736 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2740 result
.s32
[upper
] = cvtsdsw(t
, &sat
);
2744 env
->vscr
|= (1 << VSCR_SAT
);
2748 void helper_vsum2sws (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2754 #if defined(WORDS_BIGENDIAN)
2759 for (i
= 0; i
< ARRAY_SIZE(r
->u64
); i
++) {
2760 int64_t t
= (int64_t)b
->s32
[upper
+i
*2];
2762 for (j
= 0; j
< ARRAY_SIZE(r
->u64
); j
++) {
2765 result
.s32
[upper
+i
*2] = cvtsdsw(t
, &sat
);
2770 env
->vscr
|= (1 << VSCR_SAT
);
2774 void helper_vsum4sbs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2779 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2780 int64_t t
= (int64_t)b
->s32
[i
];
2781 for (j
= 0; j
< ARRAY_SIZE(r
->s32
); j
++) {
2784 r
->s32
[i
] = cvtsdsw(t
, &sat
);
2788 env
->vscr
|= (1 << VSCR_SAT
);
2792 void helper_vsum4shs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2797 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2798 int64_t t
= (int64_t)b
->s32
[i
];
2799 t
+= a
->s16
[2*i
] + a
->s16
[2*i
+1];
2800 r
->s32
[i
] = cvtsdsw(t
, &sat
);
2804 env
->vscr
|= (1 << VSCR_SAT
);
2808 void helper_vsum4ubs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2813 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2814 uint64_t t
= (uint64_t)b
->u32
[i
];
2815 for (j
= 0; j
< ARRAY_SIZE(r
->u32
); j
++) {
2818 r
->u32
[i
] = cvtuduw(t
, &sat
);
2822 env
->vscr
|= (1 << VSCR_SAT
);
2826 #if defined(WORDS_BIGENDIAN)
2833 #define VUPKPX(suffix, hi) \
2834 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2838 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
2839 uint16_t e = b->u16[hi ? i : i+4]; \
2840 uint8_t a = (e >> 15) ? 0xff : 0; \
2841 uint8_t r = (e >> 10) & 0x1f; \
2842 uint8_t g = (e >> 5) & 0x1f; \
2843 uint8_t b = e & 0x1f; \
2844 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
2852 #define VUPK(suffix, unpacked, packee, hi) \
2853 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2858 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
2859 result.unpacked[i] = b->packee[i]; \
2862 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
2863 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
2868 VUPK(hsb
, s16
, s8
, UPKHI
)
2869 VUPK(hsh
, s32
, s16
, UPKHI
)
2870 VUPK(lsb
, s16
, s8
, UPKLO
)
2871 VUPK(lsh
, s32
, s16
, UPKLO
)
2876 #undef DO_HANDLE_NAN
2880 #undef VECTOR_FOR_INORDER_I
2884 /*****************************************************************************/
2885 /* SPE extension helpers */
2886 /* Use a table to make this quicker */
2887 static uint8_t hbrev
[16] = {
2888 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2889 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2892 static always_inline
uint8_t byte_reverse (uint8_t val
)
2894 return hbrev
[val
>> 4] | (hbrev
[val
& 0xF] << 4);
2897 static always_inline
uint32_t word_reverse (uint32_t val
)
2899 return byte_reverse(val
>> 24) | (byte_reverse(val
>> 16) << 8) |
2900 (byte_reverse(val
>> 8) << 16) | (byte_reverse(val
) << 24);
2903 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2904 target_ulong
helper_brinc (target_ulong arg1
, target_ulong arg2
)
2906 uint32_t a
, b
, d
, mask
;
2908 mask
= UINT32_MAX
>> (32 - MASKBITS
);
2911 d
= word_reverse(1 + word_reverse(a
| ~b
));
2912 return (arg1
& ~mask
) | (d
& b
);
2915 uint32_t helper_cntlsw32 (uint32_t val
)
2917 if (val
& 0x80000000)
2923 uint32_t helper_cntlzw32 (uint32_t val
)
2928 /* Single-precision floating-point conversions */
2929 static always_inline
uint32_t efscfsi (uint32_t val
)
2933 u
.f
= int32_to_float32(val
, &env
->vec_status
);
2938 static always_inline
uint32_t efscfui (uint32_t val
)
2942 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
2947 static always_inline
int32_t efsctsi (uint32_t val
)
2952 /* NaN are not treated the same way IEEE 754 does */
2953 if (unlikely(float32_is_nan(u
.f
)))
2956 return float32_to_int32(u
.f
, &env
->vec_status
);
2959 static always_inline
uint32_t efsctui (uint32_t val
)
2964 /* NaN are not treated the same way IEEE 754 does */
2965 if (unlikely(float32_is_nan(u
.f
)))
2968 return float32_to_uint32(u
.f
, &env
->vec_status
);
2971 static always_inline
uint32_t efsctsiz (uint32_t val
)
2976 /* NaN are not treated the same way IEEE 754 does */
2977 if (unlikely(float32_is_nan(u
.f
)))
2980 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
2983 static always_inline
uint32_t efsctuiz (uint32_t val
)
2988 /* NaN are not treated the same way IEEE 754 does */
2989 if (unlikely(float32_is_nan(u
.f
)))
2992 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
2995 static always_inline
uint32_t efscfsf (uint32_t val
)
3000 u
.f
= int32_to_float32(val
, &env
->vec_status
);
3001 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
3002 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
3007 static always_inline
uint32_t efscfuf (uint32_t val
)
3012 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
3013 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3014 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
3019 static always_inline
uint32_t efsctsf (uint32_t val
)
3025 /* NaN are not treated the same way IEEE 754 does */
3026 if (unlikely(float32_is_nan(u
.f
)))
3028 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3029 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
3031 return float32_to_int32(u
.f
, &env
->vec_status
);
3034 static always_inline
uint32_t efsctuf (uint32_t val
)
3040 /* NaN are not treated the same way IEEE 754 does */
3041 if (unlikely(float32_is_nan(u
.f
)))
3043 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3044 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
3046 return float32_to_uint32(u
.f
, &env
->vec_status
);
3049 #define HELPER_SPE_SINGLE_CONV(name) \
3050 uint32_t helper_e##name (uint32_t val) \
3052 return e##name(val); \
3055 HELPER_SPE_SINGLE_CONV(fscfsi
);
3057 HELPER_SPE_SINGLE_CONV(fscfui
);
3059 HELPER_SPE_SINGLE_CONV(fscfuf
);
3061 HELPER_SPE_SINGLE_CONV(fscfsf
);
3063 HELPER_SPE_SINGLE_CONV(fsctsi
);
3065 HELPER_SPE_SINGLE_CONV(fsctui
);
3067 HELPER_SPE_SINGLE_CONV(fsctsiz
);
3069 HELPER_SPE_SINGLE_CONV(fsctuiz
);
3071 HELPER_SPE_SINGLE_CONV(fsctsf
);
3073 HELPER_SPE_SINGLE_CONV(fsctuf
);
3075 #define HELPER_SPE_VECTOR_CONV(name) \
3076 uint64_t helper_ev##name (uint64_t val) \
3078 return ((uint64_t)e##name(val >> 32) << 32) | \
3079 (uint64_t)e##name(val); \
3082 HELPER_SPE_VECTOR_CONV(fscfsi
);
3084 HELPER_SPE_VECTOR_CONV(fscfui
);
3086 HELPER_SPE_VECTOR_CONV(fscfuf
);
3088 HELPER_SPE_VECTOR_CONV(fscfsf
);
3090 HELPER_SPE_VECTOR_CONV(fsctsi
);
3092 HELPER_SPE_VECTOR_CONV(fsctui
);
3094 HELPER_SPE_VECTOR_CONV(fsctsiz
);
3096 HELPER_SPE_VECTOR_CONV(fsctuiz
);
3098 HELPER_SPE_VECTOR_CONV(fsctsf
);
3100 HELPER_SPE_VECTOR_CONV(fsctuf
);
3102 /* Single-precision floating-point arithmetic */
3103 static always_inline
uint32_t efsadd (uint32_t op1
, uint32_t op2
)
3108 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
3112 static always_inline
uint32_t efssub (uint32_t op1
, uint32_t op2
)
3117 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
3121 static always_inline
uint32_t efsmul (uint32_t op1
, uint32_t op2
)
3126 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
3130 static always_inline
uint32_t efsdiv (uint32_t op1
, uint32_t op2
)
3135 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
3139 #define HELPER_SPE_SINGLE_ARITH(name) \
3140 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3142 return e##name(op1, op2); \
3145 HELPER_SPE_SINGLE_ARITH(fsadd
);
3147 HELPER_SPE_SINGLE_ARITH(fssub
);
3149 HELPER_SPE_SINGLE_ARITH(fsmul
);
3151 HELPER_SPE_SINGLE_ARITH(fsdiv
);
3153 #define HELPER_SPE_VECTOR_ARITH(name) \
3154 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3156 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3157 (uint64_t)e##name(op1, op2); \
3160 HELPER_SPE_VECTOR_ARITH(fsadd
);
3162 HELPER_SPE_VECTOR_ARITH(fssub
);
3164 HELPER_SPE_VECTOR_ARITH(fsmul
);
3166 HELPER_SPE_VECTOR_ARITH(fsdiv
);
3168 /* Single-precision floating-point comparisons */
3169 static always_inline
uint32_t efststlt (uint32_t op1
, uint32_t op2
)
3174 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
3177 static always_inline
uint32_t efststgt (uint32_t op1
, uint32_t op2
)
3182 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
3185 static always_inline
uint32_t efststeq (uint32_t op1
, uint32_t op2
)
3190 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
3193 static always_inline
uint32_t efscmplt (uint32_t op1
, uint32_t op2
)
3195 /* XXX: TODO: test special values (NaN, infinites, ...) */
3196 return efststlt(op1
, op2
);
3199 static always_inline
uint32_t efscmpgt (uint32_t op1
, uint32_t op2
)
3201 /* XXX: TODO: test special values (NaN, infinites, ...) */
3202 return efststgt(op1
, op2
);
3205 static always_inline
uint32_t efscmpeq (uint32_t op1
, uint32_t op2
)
3207 /* XXX: TODO: test special values (NaN, infinites, ...) */
3208 return efststeq(op1
, op2
);
3211 #define HELPER_SINGLE_SPE_CMP(name) \
3212 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3214 return e##name(op1, op2) << 2; \
3217 HELPER_SINGLE_SPE_CMP(fststlt
);
3219 HELPER_SINGLE_SPE_CMP(fststgt
);
3221 HELPER_SINGLE_SPE_CMP(fststeq
);
3223 HELPER_SINGLE_SPE_CMP(fscmplt
);
3225 HELPER_SINGLE_SPE_CMP(fscmpgt
);
3227 HELPER_SINGLE_SPE_CMP(fscmpeq
);
3229 static always_inline
uint32_t evcmp_merge (int t0
, int t1
)
3231 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
3234 #define HELPER_VECTOR_SPE_CMP(name) \
3235 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3237 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3240 HELPER_VECTOR_SPE_CMP(fststlt
);
3242 HELPER_VECTOR_SPE_CMP(fststgt
);
3244 HELPER_VECTOR_SPE_CMP(fststeq
);
3246 HELPER_VECTOR_SPE_CMP(fscmplt
);
3248 HELPER_VECTOR_SPE_CMP(fscmpgt
);
3250 HELPER_VECTOR_SPE_CMP(fscmpeq
);
3252 /* Double-precision floating-point conversion */
3253 uint64_t helper_efdcfsi (uint32_t val
)
3257 u
.d
= int32_to_float64(val
, &env
->vec_status
);
3262 uint64_t helper_efdcfsid (uint64_t val
)
3266 u
.d
= int64_to_float64(val
, &env
->vec_status
);
3271 uint64_t helper_efdcfui (uint32_t val
)
3275 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
3280 uint64_t helper_efdcfuid (uint64_t val
)
3284 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
3289 uint32_t helper_efdctsi (uint64_t val
)
3294 /* NaN are not treated the same way IEEE 754 does */
3295 if (unlikely(float64_is_nan(u
.d
)))
3298 return float64_to_int32(u
.d
, &env
->vec_status
);
3301 uint32_t helper_efdctui (uint64_t val
)
3306 /* NaN are not treated the same way IEEE 754 does */
3307 if (unlikely(float64_is_nan(u
.d
)))
3310 return float64_to_uint32(u
.d
, &env
->vec_status
);
3313 uint32_t helper_efdctsiz (uint64_t val
)
3318 /* NaN are not treated the same way IEEE 754 does */
3319 if (unlikely(float64_is_nan(u
.d
)))
3322 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
3325 uint64_t helper_efdctsidz (uint64_t val
)
3330 /* NaN are not treated the same way IEEE 754 does */
3331 if (unlikely(float64_is_nan(u
.d
)))
3334 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
3337 uint32_t helper_efdctuiz (uint64_t val
)
3342 /* NaN are not treated the same way IEEE 754 does */
3343 if (unlikely(float64_is_nan(u
.d
)))
3346 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
3349 uint64_t helper_efdctuidz (uint64_t val
)
3354 /* NaN are not treated the same way IEEE 754 does */
3355 if (unlikely(float64_is_nan(u
.d
)))
3358 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
3361 uint64_t helper_efdcfsf (uint32_t val
)
3366 u
.d
= int32_to_float64(val
, &env
->vec_status
);
3367 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
3368 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
3373 uint64_t helper_efdcfuf (uint32_t val
)
3378 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
3379 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
3380 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
3385 uint32_t helper_efdctsf (uint64_t val
)
3391 /* NaN are not treated the same way IEEE 754 does */
3392 if (unlikely(float64_is_nan(u
.d
)))
3394 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
3395 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
3397 return float64_to_int32(u
.d
, &env
->vec_status
);
3400 uint32_t helper_efdctuf (uint64_t val
)
3406 /* NaN are not treated the same way IEEE 754 does */
3407 if (unlikely(float64_is_nan(u
.d
)))
3409 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
3410 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
3412 return float64_to_uint32(u
.d
, &env
->vec_status
);
3415 uint32_t helper_efscfd (uint64_t val
)
3421 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
3426 uint64_t helper_efdcfs (uint32_t val
)
3432 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
3437 /* Double precision fixed-point arithmetic */
3438 uint64_t helper_efdadd (uint64_t op1
, uint64_t op2
)
3443 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
3447 uint64_t helper_efdsub (uint64_t op1
, uint64_t op2
)
3452 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
3456 uint64_t helper_efdmul (uint64_t op1
, uint64_t op2
)
3461 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
3465 uint64_t helper_efddiv (uint64_t op1
, uint64_t op2
)
3470 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
3474 /* Double precision floating point helpers */
3475 uint32_t helper_efdtstlt (uint64_t op1
, uint64_t op2
)
3480 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
3483 uint32_t helper_efdtstgt (uint64_t op1
, uint64_t op2
)
3488 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
3491 uint32_t helper_efdtsteq (uint64_t op1
, uint64_t op2
)
3496 return float64_eq(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
3499 uint32_t helper_efdcmplt (uint64_t op1
, uint64_t op2
)
3501 /* XXX: TODO: test special values (NaN, infinites, ...) */
3502 return helper_efdtstlt(op1
, op2
);
3505 uint32_t helper_efdcmpgt (uint64_t op1
, uint64_t op2
)
3507 /* XXX: TODO: test special values (NaN, infinites, ...) */
3508 return helper_efdtstgt(op1
, op2
);
3511 uint32_t helper_efdcmpeq (uint64_t op1
, uint64_t op2
)
3513 /* XXX: TODO: test special values (NaN, infinites, ...) */
3514 return helper_efdtsteq(op1
, op2
);
3517 /*****************************************************************************/
3518 /* Softmmu support */
3519 #if !defined (CONFIG_USER_ONLY)
3521 #define MMUSUFFIX _mmu
3524 #include "softmmu_template.h"
3527 #include "softmmu_template.h"
3530 #include "softmmu_template.h"
3533 #include "softmmu_template.h"
3535 /* try to fill the TLB and return an exception if error. If retaddr is
3536 NULL, it means that the function was called in C code (i.e. not
3537 from generated code or from helper.c) */
3538 /* XXX: fix it to restore all registers */
3539 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
3541 TranslationBlock
*tb
;
3542 CPUState
*saved_env
;
3546 /* XXX: hack to restore env in all cases, even if not called from
3549 env
= cpu_single_env
;
3550 ret
= cpu_ppc_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
3551 if (unlikely(ret
!= 0)) {
3552 if (likely(retaddr
)) {
3553 /* now we have a real cpu fault */
3554 pc
= (unsigned long)retaddr
;
3555 tb
= tb_find_pc(pc
);
3557 /* the PC is inside the translated code. It means that we have
3558 a virtual CPU fault */
3559 cpu_restore_state(tb
, env
, pc
, NULL
);
3562 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
3567 /* Segment registers load and store */
3568 target_ulong
helper_load_sr (target_ulong sr_num
)
3570 return env
->sr
[sr_num
];
3573 void helper_store_sr (target_ulong sr_num
, target_ulong val
)
3575 ppc_store_sr(env
, sr_num
, val
);
3578 /* SLB management */
3579 #if defined(TARGET_PPC64)
3580 target_ulong
helper_load_slb (target_ulong slb_nr
)
3582 return ppc_load_slb(env
, slb_nr
);
3585 void helper_store_slb (target_ulong slb_nr
, target_ulong rs
)
3587 ppc_store_slb(env
, slb_nr
, rs
);
3590 void helper_slbia (void)
3592 ppc_slb_invalidate_all(env
);
3595 void helper_slbie (target_ulong addr
)
3597 ppc_slb_invalidate_one(env
, addr
);
3600 #endif /* defined(TARGET_PPC64) */
3602 /* TLB management */
3603 void helper_tlbia (void)
3605 ppc_tlb_invalidate_all(env
);
3608 void helper_tlbie (target_ulong addr
)
3610 ppc_tlb_invalidate_one(env
, addr
);
3613 /* Software driven TLBs management */
3614 /* PowerPC 602/603 software TLB load instructions helpers */
3615 static void do_6xx_tlb (target_ulong new_EPN
, int is_code
)
3617 target_ulong RPN
, CMP
, EPN
;
3620 RPN
= env
->spr
[SPR_RPA
];
3622 CMP
= env
->spr
[SPR_ICMP
];
3623 EPN
= env
->spr
[SPR_IMISS
];
3625 CMP
= env
->spr
[SPR_DCMP
];
3626 EPN
= env
->spr
[SPR_DMISS
];
3628 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
3629 LOG_SWTLB("%s: EPN " ADDRX
" " ADDRX
" PTE0 " ADDRX
3630 " PTE1 " ADDRX
" way %d\n",
3631 __func__
, new_EPN
, EPN
, CMP
, RPN
, way
);
3632 /* Store this TLB */
3633 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
3634 way
, is_code
, CMP
, RPN
);
3637 void helper_6xx_tlbd (target_ulong EPN
)
3642 void helper_6xx_tlbi (target_ulong EPN
)
3647 /* PowerPC 74xx software TLB load instructions helpers */
3648 static void do_74xx_tlb (target_ulong new_EPN
, int is_code
)
3650 target_ulong RPN
, CMP
, EPN
;
3653 RPN
= env
->spr
[SPR_PTELO
];
3654 CMP
= env
->spr
[SPR_PTEHI
];
3655 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
3656 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
3657 LOG_SWTLB("%s: EPN " ADDRX
" " ADDRX
" PTE0 " ADDRX
3658 " PTE1 " ADDRX
" way %d\n",
3659 __func__
, new_EPN
, EPN
, CMP
, RPN
, way
);
3660 /* Store this TLB */
3661 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
3662 way
, is_code
, CMP
, RPN
);
3665 void helper_74xx_tlbd (target_ulong EPN
)
3667 do_74xx_tlb(EPN
, 0);
3670 void helper_74xx_tlbi (target_ulong EPN
)
3672 do_74xx_tlb(EPN
, 1);
3675 static always_inline target_ulong
booke_tlb_to_page_size (int size
)
3677 return 1024 << (2 * size
);
3680 static always_inline
int booke_page_size_to_tlb (target_ulong page_size
)
3684 switch (page_size
) {
3718 #if defined (TARGET_PPC64)
3719 case 0x000100000000ULL
:
3722 case 0x000400000000ULL
:
3725 case 0x001000000000ULL
:
3728 case 0x004000000000ULL
:
3731 case 0x010000000000ULL
:
3743 /* Helpers for 4xx TLB management */
3744 target_ulong
helper_4xx_tlbre_lo (target_ulong entry
)
3751 tlb
= &env
->tlb
[entry
].tlbe
;
3753 if (tlb
->prot
& PAGE_VALID
)
3755 size
= booke_page_size_to_tlb(tlb
->size
);
3756 if (size
< 0 || size
> 0x7)
3759 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
3763 target_ulong
helper_4xx_tlbre_hi (target_ulong entry
)
3769 tlb
= &env
->tlb
[entry
].tlbe
;
3771 if (tlb
->prot
& PAGE_EXEC
)
3773 if (tlb
->prot
& PAGE_WRITE
)
3778 void helper_4xx_tlbwe_hi (target_ulong entry
, target_ulong val
)
3781 target_ulong page
, end
;
3783 LOG_SWTLB("%s entry %d val " ADDRX
"\n", __func__
, (int)entry
, val
);
3785 tlb
= &env
->tlb
[entry
].tlbe
;
3786 /* Invalidate previous TLB (if it's valid) */
3787 if (tlb
->prot
& PAGE_VALID
) {
3788 end
= tlb
->EPN
+ tlb
->size
;
3789 LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
3790 " end " ADDRX
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
3791 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
3792 tlb_flush_page(env
, page
);
3794 tlb
->size
= booke_tlb_to_page_size((val
>> 7) & 0x7);
3795 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3796 * If this ever occurs, one should use the ppcemb target instead
3797 * of the ppc or ppc64 one
3799 if ((val
& 0x40) && tlb
->size
< TARGET_PAGE_SIZE
) {
3800 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
3801 "are not supported (%d)\n",
3802 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
3804 tlb
->EPN
= val
& ~(tlb
->size
- 1);
3806 tlb
->prot
|= PAGE_VALID
;
3808 tlb
->prot
&= ~PAGE_VALID
;
3810 /* XXX: TO BE FIXED */
3811 cpu_abort(env
, "Little-endian TLB entries are not supported by now\n");
3813 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
3814 tlb
->attr
= val
& 0xFF;
3815 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX
" EPN " ADDRX
3816 " size " ADDRX
" prot %c%c%c%c PID %d\n", __func__
,
3817 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
3818 tlb
->prot
& PAGE_READ
? 'r' : '-',
3819 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
3820 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
3821 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
3822 /* Invalidate new TLB (if valid) */
3823 if (tlb
->prot
& PAGE_VALID
) {
3824 end
= tlb
->EPN
+ tlb
->size
;
3825 LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
3826 " end " ADDRX
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
3827 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
3828 tlb_flush_page(env
, page
);
3832 void helper_4xx_tlbwe_lo (target_ulong entry
, target_ulong val
)
3836 LOG_SWTLB("%s entry %i val " ADDRX
"\n", __func__
, (int)entry
, val
);
3838 tlb
= &env
->tlb
[entry
].tlbe
;
3839 tlb
->RPN
= val
& 0xFFFFFC00;
3840 tlb
->prot
= PAGE_READ
;
3842 tlb
->prot
|= PAGE_EXEC
;
3844 tlb
->prot
|= PAGE_WRITE
;
3845 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX
" EPN " ADDRX
3846 " size " ADDRX
" prot %c%c%c%c PID %d\n", __func__
,
3847 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
3848 tlb
->prot
& PAGE_READ
? 'r' : '-',
3849 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
3850 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
3851 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
3854 target_ulong
helper_4xx_tlbsx (target_ulong address
)
3856 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
3859 /* PowerPC 440 TLB management */
3860 void helper_440_tlbwe (uint32_t word
, target_ulong entry
, target_ulong value
)
3863 target_ulong EPN
, RPN
, size
;
3866 LOG_SWTLB("%s word %d entry %d value " ADDRX
"\n",
3867 __func__
, word
, (int)entry
, value
);
3870 tlb
= &env
->tlb
[entry
].tlbe
;
3873 /* Just here to please gcc */
3875 EPN
= value
& 0xFFFFFC00;
3876 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
)
3879 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
3880 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
)
3884 tlb
->attr
|= (value
>> 8) & 1;
3885 if (value
& 0x200) {
3886 tlb
->prot
|= PAGE_VALID
;
3888 if (tlb
->prot
& PAGE_VALID
) {
3889 tlb
->prot
&= ~PAGE_VALID
;
3893 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
3898 RPN
= value
& 0xFFFFFC0F;
3899 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
)
3904 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
3905 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
3907 tlb
->prot
|= PAGE_READ
<< 4;
3909 tlb
->prot
|= PAGE_WRITE
<< 4;
3911 tlb
->prot
|= PAGE_EXEC
<< 4;
3913 tlb
->prot
|= PAGE_READ
;
3915 tlb
->prot
|= PAGE_WRITE
;
3917 tlb
->prot
|= PAGE_EXEC
;
3922 target_ulong
helper_440_tlbre (uint32_t word
, target_ulong entry
)
3929 tlb
= &env
->tlb
[entry
].tlbe
;
3932 /* Just here to please gcc */
3935 size
= booke_page_size_to_tlb(tlb
->size
);
3936 if (size
< 0 || size
> 0xF)
3939 if (tlb
->attr
& 0x1)
3941 if (tlb
->prot
& PAGE_VALID
)
3943 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
3944 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
3950 ret
= tlb
->attr
& ~0x1;
3951 if (tlb
->prot
& (PAGE_READ
<< 4))
3953 if (tlb
->prot
& (PAGE_WRITE
<< 4))
3955 if (tlb
->prot
& (PAGE_EXEC
<< 4))
3957 if (tlb
->prot
& PAGE_READ
)
3959 if (tlb
->prot
& PAGE_WRITE
)
3961 if (tlb
->prot
& PAGE_EXEC
)
3968 target_ulong
helper_440_tlbsx (target_ulong address
)
3970 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
3973 #endif /* !CONFIG_USER_ONLY */