2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "host-utils.h"
24 #include "helper_regs.h"
27 //#define DEBUG_EXCEPTIONS
28 //#define DEBUG_SOFTWARE_TLB
30 #ifdef DEBUG_SOFTWARE_TLB
31 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
33 # define LOG_SWTLB(...) do { } while (0)
37 /*****************************************************************************/
38 /* Exceptions processing helpers */
40 void helper_raise_exception_err (uint32_t exception
, uint32_t error_code
)
43 printf("Raise exception %3x code : %d\n", exception
, error_code
);
45 env
->exception_index
= exception
;
46 env
->error_code
= error_code
;
50 void helper_raise_exception (uint32_t exception
)
52 helper_raise_exception_err(exception
, 0);
55 /*****************************************************************************/
57 void helper_load_dump_spr (uint32_t sprn
)
59 qemu_log("Read SPR %d %03x => " TARGET_FMT_lx
"\n", sprn
, sprn
,
63 void helper_store_dump_spr (uint32_t sprn
)
65 qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx
"\n", sprn
, sprn
,
69 target_ulong
helper_load_tbl (void)
71 return (target_ulong
)cpu_ppc_load_tbl(env
);
74 target_ulong
helper_load_tbu (void)
76 return cpu_ppc_load_tbu(env
);
79 target_ulong
helper_load_atbl (void)
81 return (target_ulong
)cpu_ppc_load_atbl(env
);
84 target_ulong
helper_load_atbu (void)
86 return cpu_ppc_load_atbu(env
);
89 target_ulong
helper_load_601_rtcl (void)
91 return cpu_ppc601_load_rtcl(env
);
94 target_ulong
helper_load_601_rtcu (void)
96 return cpu_ppc601_load_rtcu(env
);
99 #if !defined(CONFIG_USER_ONLY)
100 #if defined (TARGET_PPC64)
101 void helper_store_asr (target_ulong val
)
103 ppc_store_asr(env
, val
);
107 void helper_store_sdr1 (target_ulong val
)
109 ppc_store_sdr1(env
, val
);
112 void helper_store_tbl (target_ulong val
)
114 cpu_ppc_store_tbl(env
, val
);
117 void helper_store_tbu (target_ulong val
)
119 cpu_ppc_store_tbu(env
, val
);
122 void helper_store_atbl (target_ulong val
)
124 cpu_ppc_store_atbl(env
, val
);
127 void helper_store_atbu (target_ulong val
)
129 cpu_ppc_store_atbu(env
, val
);
132 void helper_store_601_rtcl (target_ulong val
)
134 cpu_ppc601_store_rtcl(env
, val
);
137 void helper_store_601_rtcu (target_ulong val
)
139 cpu_ppc601_store_rtcu(env
, val
);
142 target_ulong
helper_load_decr (void)
144 return cpu_ppc_load_decr(env
);
147 void helper_store_decr (target_ulong val
)
149 cpu_ppc_store_decr(env
, val
);
152 void helper_store_hid0_601 (target_ulong val
)
156 hid0
= env
->spr
[SPR_HID0
];
157 if ((val
^ hid0
) & 0x00000008) {
158 /* Change current endianness */
159 env
->hflags
&= ~(1 << MSR_LE
);
160 env
->hflags_nmsr
&= ~(1 << MSR_LE
);
161 env
->hflags_nmsr
|= (1 << MSR_LE
) & (((val
>> 3) & 1) << MSR_LE
);
162 env
->hflags
|= env
->hflags_nmsr
;
163 qemu_log("%s: set endianness to %c => " TARGET_FMT_lx
"\n", __func__
,
164 val
& 0x8 ? 'l' : 'b', env
->hflags
);
166 env
->spr
[SPR_HID0
] = (uint32_t)val
;
169 void helper_store_403_pbr (uint32_t num
, target_ulong value
)
171 if (likely(env
->pb
[num
] != value
)) {
172 env
->pb
[num
] = value
;
173 /* Should be optimized */
178 target_ulong
helper_load_40x_pit (void)
180 return load_40x_pit(env
);
183 void helper_store_40x_pit (target_ulong val
)
185 store_40x_pit(env
, val
);
188 void helper_store_40x_dbcr0 (target_ulong val
)
190 store_40x_dbcr0(env
, val
);
193 void helper_store_40x_sler (target_ulong val
)
195 store_40x_sler(env
, val
);
198 void helper_store_booke_tcr (target_ulong val
)
200 store_booke_tcr(env
, val
);
203 void helper_store_booke_tsr (target_ulong val
)
205 store_booke_tsr(env
, val
);
208 void helper_store_ibatu (uint32_t nr
, target_ulong val
)
210 ppc_store_ibatu(env
, nr
, val
);
213 void helper_store_ibatl (uint32_t nr
, target_ulong val
)
215 ppc_store_ibatl(env
, nr
, val
);
218 void helper_store_dbatu (uint32_t nr
, target_ulong val
)
220 ppc_store_dbatu(env
, nr
, val
);
223 void helper_store_dbatl (uint32_t nr
, target_ulong val
)
225 ppc_store_dbatl(env
, nr
, val
);
228 void helper_store_601_batl (uint32_t nr
, target_ulong val
)
230 ppc_store_ibatl_601(env
, nr
, val
);
233 void helper_store_601_batu (uint32_t nr
, target_ulong val
)
235 ppc_store_ibatu_601(env
, nr
, val
);
239 /*****************************************************************************/
240 /* Memory load and stores */
242 static inline target_ulong
addr_add(target_ulong addr
, target_long arg
)
244 #if defined(TARGET_PPC64)
246 return (uint32_t)(addr
+ arg
);
252 void helper_lmw (target_ulong addr
, uint32_t reg
)
254 for (; reg
< 32; reg
++) {
256 env
->gpr
[reg
] = bswap32(ldl(addr
));
258 env
->gpr
[reg
] = ldl(addr
);
259 addr
= addr_add(addr
, 4);
263 void helper_stmw (target_ulong addr
, uint32_t reg
)
265 for (; reg
< 32; reg
++) {
267 stl(addr
, bswap32((uint32_t)env
->gpr
[reg
]));
269 stl(addr
, (uint32_t)env
->gpr
[reg
]);
270 addr
= addr_add(addr
, 4);
274 void helper_lsw(target_ulong addr
, uint32_t nb
, uint32_t reg
)
277 for (; nb
> 3; nb
-= 4) {
278 env
->gpr
[reg
] = ldl(addr
);
279 reg
= (reg
+ 1) % 32;
280 addr
= addr_add(addr
, 4);
282 if (unlikely(nb
> 0)) {
284 for (sh
= 24; nb
> 0; nb
--, sh
-= 8) {
285 env
->gpr
[reg
] |= ldub(addr
) << sh
;
286 addr
= addr_add(addr
, 1);
290 /* PPC32 specification says we must generate an exception if
291 * rA is in the range of registers to be loaded.
292 * In an other hand, IBM says this is valid, but rA won't be loaded.
293 * For now, I'll follow the spec...
295 void helper_lswx(target_ulong addr
, uint32_t reg
, uint32_t ra
, uint32_t rb
)
297 if (likely(xer_bc
!= 0)) {
298 if (unlikely((ra
!= 0 && reg
< ra
&& (reg
+ xer_bc
) > ra
) ||
299 (reg
< rb
&& (reg
+ xer_bc
) > rb
))) {
300 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
302 POWERPC_EXCP_INVAL_LSWX
);
304 helper_lsw(addr
, xer_bc
, reg
);
309 void helper_stsw(target_ulong addr
, uint32_t nb
, uint32_t reg
)
312 for (; nb
> 3; nb
-= 4) {
313 stl(addr
, env
->gpr
[reg
]);
314 reg
= (reg
+ 1) % 32;
315 addr
= addr_add(addr
, 4);
317 if (unlikely(nb
> 0)) {
318 for (sh
= 24; nb
> 0; nb
--, sh
-= 8) {
319 stb(addr
, (env
->gpr
[reg
] >> sh
) & 0xFF);
320 addr
= addr_add(addr
, 1);
325 static void do_dcbz(target_ulong addr
, int dcache_line_size
)
327 addr
&= ~(dcache_line_size
- 1);
329 for (i
= 0 ; i
< dcache_line_size
; i
+= 4) {
332 if (env
->reserve_addr
== addr
)
333 env
->reserve_addr
= (target_ulong
)-1ULL;
336 void helper_dcbz(target_ulong addr
)
338 do_dcbz(addr
, env
->dcache_line_size
);
341 void helper_dcbz_970(target_ulong addr
)
343 if (((env
->spr
[SPR_970_HID5
] >> 7) & 0x3) == 1)
346 do_dcbz(addr
, env
->dcache_line_size
);
349 void helper_icbi(target_ulong addr
)
353 addr
&= ~(env
->dcache_line_size
- 1);
354 /* Invalidate one cache line :
355 * PowerPC specification says this is to be treated like a load
356 * (not a fetch) by the MMU. To be sure it will be so,
357 * do the load "by hand".
360 tb_invalidate_page_range(addr
, addr
+ env
->icache_line_size
);
364 target_ulong
helper_lscbx (target_ulong addr
, uint32_t reg
, uint32_t ra
, uint32_t rb
)
368 for (i
= 0; i
< xer_bc
; i
++) {
370 addr
= addr_add(addr
, 1);
371 /* ra (if not 0) and rb are never modified */
372 if (likely(reg
!= rb
&& (ra
== 0 || reg
!= ra
))) {
373 env
->gpr
[reg
] = (env
->gpr
[reg
] & ~(0xFF << d
)) | (c
<< d
);
375 if (unlikely(c
== xer_cmp
))
377 if (likely(d
!= 0)) {
388 /*****************************************************************************/
389 /* Fixed point operations helpers */
390 #if defined(TARGET_PPC64)
392 /* multiply high word */
393 uint64_t helper_mulhd (uint64_t arg1
, uint64_t arg2
)
397 muls64(&tl
, &th
, arg1
, arg2
);
401 /* multiply high word unsigned */
402 uint64_t helper_mulhdu (uint64_t arg1
, uint64_t arg2
)
406 mulu64(&tl
, &th
, arg1
, arg2
);
410 uint64_t helper_mulldo (uint64_t arg1
, uint64_t arg2
)
415 muls64(&tl
, (uint64_t *)&th
, arg1
, arg2
);
416 /* If th != 0 && th != -1, then we had an overflow */
417 if (likely((uint64_t)(th
+ 1) <= 1)) {
418 env
->xer
&= ~(1 << XER_OV
);
420 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
426 target_ulong
helper_cntlzw (target_ulong t
)
431 #if defined(TARGET_PPC64)
432 target_ulong
helper_cntlzd (target_ulong t
)
438 /* shift right arithmetic helper */
439 target_ulong
helper_sraw (target_ulong value
, target_ulong shift
)
443 if (likely(!(shift
& 0x20))) {
444 if (likely((uint32_t)shift
!= 0)) {
446 ret
= (int32_t)value
>> shift
;
447 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
448 env
->xer
&= ~(1 << XER_CA
);
450 env
->xer
|= (1 << XER_CA
);
453 ret
= (int32_t)value
;
454 env
->xer
&= ~(1 << XER_CA
);
457 ret
= (int32_t)value
>> 31;
459 env
->xer
|= (1 << XER_CA
);
461 env
->xer
&= ~(1 << XER_CA
);
464 return (target_long
)ret
;
467 #if defined(TARGET_PPC64)
468 target_ulong
helper_srad (target_ulong value
, target_ulong shift
)
472 if (likely(!(shift
& 0x40))) {
473 if (likely((uint64_t)shift
!= 0)) {
475 ret
= (int64_t)value
>> shift
;
476 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
477 env
->xer
&= ~(1 << XER_CA
);
479 env
->xer
|= (1 << XER_CA
);
482 ret
= (int64_t)value
;
483 env
->xer
&= ~(1 << XER_CA
);
486 ret
= (int64_t)value
>> 63;
488 env
->xer
|= (1 << XER_CA
);
490 env
->xer
&= ~(1 << XER_CA
);
497 target_ulong
helper_popcntb (target_ulong val
)
499 val
= (val
& 0x55555555) + ((val
>> 1) & 0x55555555);
500 val
= (val
& 0x33333333) + ((val
>> 2) & 0x33333333);
501 val
= (val
& 0x0f0f0f0f) + ((val
>> 4) & 0x0f0f0f0f);
505 #if defined(TARGET_PPC64)
506 target_ulong
helper_popcntb_64 (target_ulong val
)
508 val
= (val
& 0x5555555555555555ULL
) + ((val
>> 1) & 0x5555555555555555ULL
);
509 val
= (val
& 0x3333333333333333ULL
) + ((val
>> 2) & 0x3333333333333333ULL
);
510 val
= (val
& 0x0f0f0f0f0f0f0f0fULL
) + ((val
>> 4) & 0x0f0f0f0f0f0f0f0fULL
);
515 /*****************************************************************************/
516 /* Floating point operations helpers */
517 uint64_t helper_float32_to_float64(uint32_t arg
)
522 d
.d
= float32_to_float64(f
.f
, &env
->fp_status
);
526 uint32_t helper_float64_to_float32(uint64_t arg
)
531 f
.f
= float64_to_float32(d
.d
, &env
->fp_status
);
535 static inline int isden(float64 d
)
541 return ((u
.ll
>> 52) & 0x7FF) == 0;
544 uint32_t helper_compute_fprf (uint64_t arg
, uint32_t set_fprf
)
550 isneg
= float64_is_neg(farg
.d
);
551 if (unlikely(float64_is_nan(farg
.d
))) {
552 if (float64_is_signaling_nan(farg
.d
)) {
553 /* Signaling NaN: flags are undefined */
559 } else if (unlikely(float64_is_infinity(farg
.d
))) {
566 if (float64_is_zero(farg
.d
)) {
574 /* Denormalized numbers */
577 /* Normalized numbers */
588 /* We update FPSCR_FPRF */
589 env
->fpscr
&= ~(0x1F << FPSCR_FPRF
);
590 env
->fpscr
|= ret
<< FPSCR_FPRF
;
592 /* We just need fpcc to update Rc1 */
596 /* Floating-point invalid operations exception */
597 static inline uint64_t fload_invalid_op_excp(int op
)
604 case POWERPC_EXCP_FP_VXSNAN
:
605 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
607 case POWERPC_EXCP_FP_VXSOFT
:
608 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
610 case POWERPC_EXCP_FP_VXISI
:
611 /* Magnitude subtraction of infinities */
612 env
->fpscr
|= 1 << FPSCR_VXISI
;
614 case POWERPC_EXCP_FP_VXIDI
:
615 /* Division of infinity by infinity */
616 env
->fpscr
|= 1 << FPSCR_VXIDI
;
618 case POWERPC_EXCP_FP_VXZDZ
:
619 /* Division of zero by zero */
620 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
622 case POWERPC_EXCP_FP_VXIMZ
:
623 /* Multiplication of zero by infinity */
624 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
626 case POWERPC_EXCP_FP_VXVC
:
627 /* Ordered comparison of NaN */
628 env
->fpscr
|= 1 << FPSCR_VXVC
;
629 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
630 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
631 /* We must update the target FPR before raising the exception */
633 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
634 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
635 /* Update the floating-point enabled exception summary */
636 env
->fpscr
|= 1 << FPSCR_FEX
;
637 /* Exception is differed */
641 case POWERPC_EXCP_FP_VXSQRT
:
642 /* Square root of a negative number */
643 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
645 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
647 /* Set the result to quiet NaN */
648 ret
= 0xFFF8000000000000ULL
;
649 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
650 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
653 case POWERPC_EXCP_FP_VXCVI
:
654 /* Invalid conversion */
655 env
->fpscr
|= 1 << FPSCR_VXCVI
;
656 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
658 /* Set the result to quiet NaN */
659 ret
= 0xFFF8000000000000ULL
;
660 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
661 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
665 /* Update the floating-point invalid operation summary */
666 env
->fpscr
|= 1 << FPSCR_VX
;
667 /* Update the floating-point exception summary */
668 env
->fpscr
|= 1 << FPSCR_FX
;
670 /* Update the floating-point enabled exception summary */
671 env
->fpscr
|= 1 << FPSCR_FEX
;
672 if (msr_fe0
!= 0 || msr_fe1
!= 0)
673 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_FP
| op
);
678 static inline void float_zero_divide_excp(void)
680 env
->fpscr
|= 1 << FPSCR_ZX
;
681 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
682 /* Update the floating-point exception summary */
683 env
->fpscr
|= 1 << FPSCR_FX
;
685 /* Update the floating-point enabled exception summary */
686 env
->fpscr
|= 1 << FPSCR_FEX
;
687 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
688 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
689 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
);
694 static inline void float_overflow_excp(void)
696 env
->fpscr
|= 1 << FPSCR_OX
;
697 /* Update the floating-point exception summary */
698 env
->fpscr
|= 1 << FPSCR_FX
;
700 /* XXX: should adjust the result */
701 /* Update the floating-point enabled exception summary */
702 env
->fpscr
|= 1 << FPSCR_FEX
;
703 /* We must update the target FPR before raising the exception */
704 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
705 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
707 env
->fpscr
|= 1 << FPSCR_XX
;
708 env
->fpscr
|= 1 << FPSCR_FI
;
712 static inline void float_underflow_excp(void)
714 env
->fpscr
|= 1 << FPSCR_UX
;
715 /* Update the floating-point exception summary */
716 env
->fpscr
|= 1 << FPSCR_FX
;
718 /* XXX: should adjust the result */
719 /* Update the floating-point enabled exception summary */
720 env
->fpscr
|= 1 << FPSCR_FEX
;
721 /* We must update the target FPR before raising the exception */
722 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
723 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
727 static inline void float_inexact_excp(void)
729 env
->fpscr
|= 1 << FPSCR_XX
;
730 /* Update the floating-point exception summary */
731 env
->fpscr
|= 1 << FPSCR_FX
;
733 /* Update the floating-point enabled exception summary */
734 env
->fpscr
|= 1 << FPSCR_FEX
;
735 /* We must update the target FPR before raising the exception */
736 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
737 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
741 static inline void fpscr_set_rounding_mode(void)
745 /* Set rounding mode */
748 /* Best approximation (round to nearest) */
749 rnd_type
= float_round_nearest_even
;
752 /* Smaller magnitude (round toward zero) */
753 rnd_type
= float_round_to_zero
;
756 /* Round toward +infinite */
757 rnd_type
= float_round_up
;
761 /* Round toward -infinite */
762 rnd_type
= float_round_down
;
765 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
768 void helper_fpscr_clrbit (uint32_t bit
)
772 prev
= (env
->fpscr
>> bit
) & 1;
773 env
->fpscr
&= ~(1 << bit
);
778 fpscr_set_rounding_mode();
786 void helper_fpscr_setbit (uint32_t bit
)
790 prev
= (env
->fpscr
>> bit
) & 1;
791 env
->fpscr
|= 1 << bit
;
795 env
->fpscr
|= 1 << FPSCR_FX
;
799 env
->fpscr
|= 1 << FPSCR_FX
;
804 env
->fpscr
|= 1 << FPSCR_FX
;
809 env
->fpscr
|= 1 << FPSCR_FX
;
814 env
->fpscr
|= 1 << FPSCR_FX
;
827 env
->fpscr
|= 1 << FPSCR_VX
;
828 env
->fpscr
|= 1 << FPSCR_FX
;
835 env
->error_code
= POWERPC_EXCP_FP
;
837 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
839 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
841 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
843 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
845 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
847 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
849 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
851 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
853 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
860 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
867 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
874 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
881 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
887 fpscr_set_rounding_mode();
892 /* Update the floating-point enabled exception summary */
893 env
->fpscr
|= 1 << FPSCR_FEX
;
894 /* We have to update Rc1 before raising the exception */
895 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
901 void helper_store_fpscr (uint64_t arg
, uint32_t mask
)
904 * We use only the 32 LSB of the incoming fpr
912 new |= prev
& 0x60000000;
913 for (i
= 0; i
< 8; i
++) {
914 if (mask
& (1 << i
)) {
915 env
->fpscr
&= ~(0xF << (4 * i
));
916 env
->fpscr
|= new & (0xF << (4 * i
));
919 /* Update VX and FEX */
921 env
->fpscr
|= 1 << FPSCR_VX
;
923 env
->fpscr
&= ~(1 << FPSCR_VX
);
924 if ((fpscr_ex
& fpscr_eex
) != 0) {
925 env
->fpscr
|= 1 << FPSCR_FEX
;
926 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
927 /* XXX: we should compute it properly */
928 env
->error_code
= POWERPC_EXCP_FP
;
931 env
->fpscr
&= ~(1 << FPSCR_FEX
);
932 fpscr_set_rounding_mode();
935 void helper_float_check_status (void)
937 #ifdef CONFIG_SOFTFLOAT
938 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
939 (env
->error_code
& POWERPC_EXCP_FP
)) {
940 /* Differred floating-point exception after target FPR update */
941 if (msr_fe0
!= 0 || msr_fe1
!= 0)
942 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
944 int status
= get_float_exception_flags(&env
->fp_status
);
945 if (status
& float_flag_divbyzero
) {
946 float_zero_divide_excp();
947 } else if (status
& float_flag_overflow
) {
948 float_overflow_excp();
949 } else if (status
& float_flag_underflow
) {
950 float_underflow_excp();
951 } else if (status
& float_flag_inexact
) {
952 float_inexact_excp();
956 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
957 (env
->error_code
& POWERPC_EXCP_FP
)) {
958 /* Differred floating-point exception after target FPR update */
959 if (msr_fe0
!= 0 || msr_fe1
!= 0)
960 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
965 #ifdef CONFIG_SOFTFLOAT
966 void helper_reset_fpstatus (void)
968 set_float_exception_flags(0, &env
->fp_status
);
973 uint64_t helper_fadd (uint64_t arg1
, uint64_t arg2
)
975 CPU_DoubleU farg1
, farg2
;
979 #if USE_PRECISE_EMULATION
980 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
981 float64_is_signaling_nan(farg2
.d
))) {
983 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
984 } else if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
985 float64_is_neg(farg1
.d
) != float64_is_neg(farg2
.d
))) {
986 /* Magnitude subtraction of infinities */
987 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
989 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
992 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
998 uint64_t helper_fsub (uint64_t arg1
, uint64_t arg2
)
1000 CPU_DoubleU farg1
, farg2
;
1004 #if USE_PRECISE_EMULATION
1006 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1007 float64_is_signaling_nan(farg2
.d
))) {
1008 /* sNaN subtraction */
1009 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1010 } else if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
1011 float64_is_neg(farg1
.d
) == float64_is_neg(farg2
.d
))) {
1012 /* Magnitude subtraction of infinities */
1013 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1015 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
1019 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
1025 uint64_t helper_fmul (uint64_t arg1
, uint64_t arg2
)
1027 CPU_DoubleU farg1
, farg2
;
1031 #if USE_PRECISE_EMULATION
1032 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1033 float64_is_signaling_nan(farg2
.d
))) {
1034 /* sNaN multiplication */
1035 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1036 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1037 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1038 /* Multiplication of zero by infinity */
1039 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1041 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1044 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1050 uint64_t helper_fdiv (uint64_t arg1
, uint64_t arg2
)
1052 CPU_DoubleU farg1
, farg2
;
1056 #if USE_PRECISE_EMULATION
1057 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1058 float64_is_signaling_nan(farg2
.d
))) {
1060 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1061 } else if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
))) {
1062 /* Division of infinity by infinity */
1063 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI
);
1064 } else if (unlikely(float64_is_zero(farg1
.d
) && float64_is_zero(farg2
.d
))) {
1065 /* Division of zero by zero */
1066 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ
);
1068 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
1071 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
1077 uint64_t helper_fabs (uint64_t arg
)
1082 farg
.d
= float64_abs(farg
.d
);
1087 uint64_t helper_fnabs (uint64_t arg
)
1092 farg
.d
= float64_abs(farg
.d
);
1093 farg
.d
= float64_chs(farg
.d
);
1098 uint64_t helper_fneg (uint64_t arg
)
1103 farg
.d
= float64_chs(farg
.d
);
1107 /* fctiw - fctiw. */
1108 uint64_t helper_fctiw (uint64_t arg
)
1113 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1114 /* sNaN conversion */
1115 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1116 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1117 /* qNan / infinity conversion */
1118 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1120 farg
.ll
= float64_to_int32(farg
.d
, &env
->fp_status
);
1121 #if USE_PRECISE_EMULATION
1122 /* XXX: higher bits are not supposed to be significant.
1123 * to make tests easier, return the same as a real PowerPC 750
1125 farg
.ll
|= 0xFFF80000ULL
<< 32;
1131 /* fctiwz - fctiwz. */
1132 uint64_t helper_fctiwz (uint64_t arg
)
1137 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1138 /* sNaN conversion */
1139 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1140 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1141 /* qNan / infinity conversion */
1142 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1144 farg
.ll
= float64_to_int32_round_to_zero(farg
.d
, &env
->fp_status
);
1145 #if USE_PRECISE_EMULATION
1146 /* XXX: higher bits are not supposed to be significant.
1147 * to make tests easier, return the same as a real PowerPC 750
1149 farg
.ll
|= 0xFFF80000ULL
<< 32;
1155 #if defined(TARGET_PPC64)
1156 /* fcfid - fcfid. */
1157 uint64_t helper_fcfid (uint64_t arg
)
1160 farg
.d
= int64_to_float64(arg
, &env
->fp_status
);
1164 /* fctid - fctid. */
1165 uint64_t helper_fctid (uint64_t arg
)
1170 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1171 /* sNaN conversion */
1172 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1173 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1174 /* qNan / infinity conversion */
1175 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1177 farg
.ll
= float64_to_int64(farg
.d
, &env
->fp_status
);
1182 /* fctidz - fctidz. */
1183 uint64_t helper_fctidz (uint64_t arg
)
1188 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1189 /* sNaN conversion */
1190 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1191 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1192 /* qNan / infinity conversion */
1193 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1195 farg
.ll
= float64_to_int64_round_to_zero(farg
.d
, &env
->fp_status
);
1202 static inline uint64_t do_fri(uint64_t arg
, int rounding_mode
)
1207 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1209 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1210 } else if (unlikely(float64_is_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1211 /* qNan / infinity round */
1212 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1214 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
1215 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
1216 /* Restore rounding mode from FPSCR */
1217 fpscr_set_rounding_mode();
1222 uint64_t helper_frin (uint64_t arg
)
1224 return do_fri(arg
, float_round_nearest_even
);
1227 uint64_t helper_friz (uint64_t arg
)
1229 return do_fri(arg
, float_round_to_zero
);
1232 uint64_t helper_frip (uint64_t arg
)
1234 return do_fri(arg
, float_round_up
);
1237 uint64_t helper_frim (uint64_t arg
)
1239 return do_fri(arg
, float_round_down
);
1242 /* fmadd - fmadd. */
1243 uint64_t helper_fmadd (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1245 CPU_DoubleU farg1
, farg2
, farg3
;
1250 #if USE_PRECISE_EMULATION
1251 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1252 float64_is_signaling_nan(farg2
.d
) ||
1253 float64_is_signaling_nan(farg3
.d
))) {
1254 /* sNaN operation */
1255 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1256 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1257 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1258 /* Multiplication of zero by infinity */
1259 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1262 /* This is the way the PowerPC specification defines it */
1263 float128 ft0_128
, ft1_128
;
1265 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1266 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1267 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1268 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1269 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
1270 /* Magnitude subtraction of infinities */
1271 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1273 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1274 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
1275 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1278 /* This is OK on x86 hosts */
1279 farg1
.d
= (farg1
.d
* farg2
.d
) + farg3
.d
;
1283 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1284 farg1
.d
= float64_add(farg1
.d
, farg3
.d
, &env
->fp_status
);
1289 /* fmsub - fmsub. */
1290 uint64_t helper_fmsub (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1292 CPU_DoubleU farg1
, farg2
, farg3
;
1297 #if USE_PRECISE_EMULATION
1298 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1299 float64_is_signaling_nan(farg2
.d
) ||
1300 float64_is_signaling_nan(farg3
.d
))) {
1301 /* sNaN operation */
1302 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1303 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1304 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1305 /* Multiplication of zero by infinity */
1306 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1309 /* This is the way the PowerPC specification defines it */
1310 float128 ft0_128
, ft1_128
;
1312 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1313 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1314 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1315 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1316 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
1317 /* Magnitude subtraction of infinities */
1318 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1320 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1321 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
1322 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1325 /* This is OK on x86 hosts */
1326 farg1
.d
= (farg1
.d
* farg2
.d
) - farg3
.d
;
1330 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1331 farg1
.d
= float64_sub(farg1
.d
, farg3
.d
, &env
->fp_status
);
1336 /* fnmadd - fnmadd. */
1337 uint64_t helper_fnmadd (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1339 CPU_DoubleU farg1
, farg2
, farg3
;
1345 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1346 float64_is_signaling_nan(farg2
.d
) ||
1347 float64_is_signaling_nan(farg3
.d
))) {
1348 /* sNaN operation */
1349 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1350 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1351 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1352 /* Multiplication of zero by infinity */
1353 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1355 #if USE_PRECISE_EMULATION
1357 /* This is the way the PowerPC specification defines it */
1358 float128 ft0_128
, ft1_128
;
1360 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1361 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1362 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1363 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1364 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
1365 /* Magnitude subtraction of infinities */
1366 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1368 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1369 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
1370 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1373 /* This is OK on x86 hosts */
1374 farg1
.d
= (farg1
.d
* farg2
.d
) + farg3
.d
;
1377 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1378 farg1
.d
= float64_add(farg1
.d
, farg3
.d
, &env
->fp_status
);
1380 if (likely(!float64_is_nan(farg1
.d
)))
1381 farg1
.d
= float64_chs(farg1
.d
);
1386 /* fnmsub - fnmsub. */
1387 uint64_t helper_fnmsub (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1389 CPU_DoubleU farg1
, farg2
, farg3
;
1395 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1396 float64_is_signaling_nan(farg2
.d
) ||
1397 float64_is_signaling_nan(farg3
.d
))) {
1398 /* sNaN operation */
1399 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1400 } else if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1401 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1402 /* Multiplication of zero by infinity */
1403 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1405 #if USE_PRECISE_EMULATION
1407 /* This is the way the PowerPC specification defines it */
1408 float128 ft0_128
, ft1_128
;
1410 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1411 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1412 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1413 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1414 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
1415 /* Magnitude subtraction of infinities */
1416 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1418 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1419 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
1420 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1423 /* This is OK on x86 hosts */
1424 farg1
.d
= (farg1
.d
* farg2
.d
) - farg3
.d
;
1427 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1428 farg1
.d
= float64_sub(farg1
.d
, farg3
.d
, &env
->fp_status
);
1430 if (likely(!float64_is_nan(farg1
.d
)))
1431 farg1
.d
= float64_chs(farg1
.d
);
1437 uint64_t helper_frsp (uint64_t arg
)
1443 #if USE_PRECISE_EMULATION
1444 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1445 /* sNaN square root */
1446 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1448 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1449 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1452 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1453 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1458 /* fsqrt - fsqrt. */
1459 uint64_t helper_fsqrt (uint64_t arg
)
1464 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1465 /* sNaN square root */
1466 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1467 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
1468 /* Square root of a negative nonzero number */
1469 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT
);
1471 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1477 uint64_t helper_fre (uint64_t arg
)
1482 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1483 /* sNaN reciprocal */
1484 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1486 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1492 uint64_t helper_fres (uint64_t arg
)
1498 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1499 /* sNaN reciprocal */
1500 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1502 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1503 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1504 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1509 /* frsqrte - frsqrte. */
1510 uint64_t helper_frsqrte (uint64_t arg
)
1516 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1517 /* sNaN reciprocal square root */
1518 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1519 } else if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
1520 /* Reciprocal square root of a negative nonzero number */
1521 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT
);
1523 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1524 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1525 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1526 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1532 uint64_t helper_fsel (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1538 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) && !float64_is_nan(farg1
.d
))
1544 void helper_fcmpu (uint64_t arg1
, uint64_t arg2
, uint32_t crfD
)
1546 CPU_DoubleU farg1
, farg2
;
1551 if (unlikely(float64_is_nan(farg1
.d
) ||
1552 float64_is_nan(farg2
.d
))) {
1554 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1556 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1562 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1563 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1564 env
->crf
[crfD
] = ret
;
1565 if (unlikely(ret
== 0x01UL
1566 && (float64_is_signaling_nan(farg1
.d
) ||
1567 float64_is_signaling_nan(farg2
.d
)))) {
1568 /* sNaN comparison */
1569 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1573 void helper_fcmpo (uint64_t arg1
, uint64_t arg2
, uint32_t crfD
)
1575 CPU_DoubleU farg1
, farg2
;
1580 if (unlikely(float64_is_nan(farg1
.d
) ||
1581 float64_is_nan(farg2
.d
))) {
1583 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1585 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1591 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1592 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1593 env
->crf
[crfD
] = ret
;
1594 if (unlikely (ret
== 0x01UL
)) {
1595 if (float64_is_signaling_nan(farg1
.d
) ||
1596 float64_is_signaling_nan(farg2
.d
)) {
1597 /* sNaN comparison */
1598 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
|
1599 POWERPC_EXCP_FP_VXVC
);
1601 /* qNaN comparison */
1602 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC
);
1607 #if !defined (CONFIG_USER_ONLY)
1608 void helper_store_msr (target_ulong val
)
1610 val
= hreg_store_msr(env
, val
, 0);
1612 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
1613 helper_raise_exception(val
);
1617 static inline void do_rfi(target_ulong nip
, target_ulong msr
,
1618 target_ulong msrm
, int keep_msrh
)
1620 #if defined(TARGET_PPC64)
1621 if (msr
& (1ULL << MSR_SF
)) {
1622 nip
= (uint64_t)nip
;
1623 msr
&= (uint64_t)msrm
;
1625 nip
= (uint32_t)nip
;
1626 msr
= (uint32_t)(msr
& msrm
);
1628 msr
|= env
->msr
& ~((uint64_t)0xFFFFFFFF);
1631 nip
= (uint32_t)nip
;
1632 msr
&= (uint32_t)msrm
;
1634 /* XXX: beware: this is false if VLE is supported */
1635 env
->nip
= nip
& ~((target_ulong
)0x00000003);
1636 hreg_store_msr(env
, msr
, 1);
1637 #if defined (DEBUG_OP)
1638 cpu_dump_rfi(env
->nip
, env
->msr
);
1640 /* No need to raise an exception here,
1641 * as rfi is always the last insn of a TB
1643 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
1646 void helper_rfi (void)
1648 do_rfi(env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
],
1649 ~((target_ulong
)0x783F0000), 1);
1652 #if defined(TARGET_PPC64)
1653 void helper_rfid (void)
1655 do_rfi(env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
],
1656 ~((target_ulong
)0x783F0000), 0);
1659 void helper_hrfid (void)
1661 do_rfi(env
->spr
[SPR_HSRR0
], env
->spr
[SPR_HSRR1
],
1662 ~((target_ulong
)0x783F0000), 0);
1667 void helper_tw (target_ulong arg1
, target_ulong arg2
, uint32_t flags
)
1669 if (!likely(!(((int32_t)arg1
< (int32_t)arg2
&& (flags
& 0x10)) ||
1670 ((int32_t)arg1
> (int32_t)arg2
&& (flags
& 0x08)) ||
1671 ((int32_t)arg1
== (int32_t)arg2
&& (flags
& 0x04)) ||
1672 ((uint32_t)arg1
< (uint32_t)arg2
&& (flags
& 0x02)) ||
1673 ((uint32_t)arg1
> (uint32_t)arg2
&& (flags
& 0x01))))) {
1674 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_TRAP
);
1678 #if defined(TARGET_PPC64)
1679 void helper_td (target_ulong arg1
, target_ulong arg2
, uint32_t flags
)
1681 if (!likely(!(((int64_t)arg1
< (int64_t)arg2
&& (flags
& 0x10)) ||
1682 ((int64_t)arg1
> (int64_t)arg2
&& (flags
& 0x08)) ||
1683 ((int64_t)arg1
== (int64_t)arg2
&& (flags
& 0x04)) ||
1684 ((uint64_t)arg1
< (uint64_t)arg2
&& (flags
& 0x02)) ||
1685 ((uint64_t)arg1
> (uint64_t)arg2
&& (flags
& 0x01)))))
1686 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_TRAP
);
1690 /*****************************************************************************/
1691 /* PowerPC 601 specific instructions (POWER bridge) */
1693 target_ulong
helper_clcs (uint32_t arg
)
1697 /* Instruction cache line size */
1698 return env
->icache_line_size
;
1701 /* Data cache line size */
1702 return env
->dcache_line_size
;
1705 /* Minimum cache line size */
1706 return (env
->icache_line_size
< env
->dcache_line_size
) ?
1707 env
->icache_line_size
: env
->dcache_line_size
;
1710 /* Maximum cache line size */
1711 return (env
->icache_line_size
> env
->dcache_line_size
) ?
1712 env
->icache_line_size
: env
->dcache_line_size
;
1721 target_ulong
helper_div (target_ulong arg1
, target_ulong arg2
)
1723 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
1725 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1726 (int32_t)arg2
== 0) {
1727 env
->spr
[SPR_MQ
] = 0;
1730 env
->spr
[SPR_MQ
] = tmp
% arg2
;
1731 return tmp
/ (int32_t)arg2
;
1735 target_ulong
helper_divo (target_ulong arg1
, target_ulong arg2
)
1737 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
1739 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1740 (int32_t)arg2
== 0) {
1741 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1742 env
->spr
[SPR_MQ
] = 0;
1745 env
->spr
[SPR_MQ
] = tmp
% arg2
;
1746 tmp
/= (int32_t)arg2
;
1747 if ((int32_t)tmp
!= tmp
) {
1748 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1750 env
->xer
&= ~(1 << XER_OV
);
1756 target_ulong
helper_divs (target_ulong arg1
, target_ulong arg2
)
1758 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1759 (int32_t)arg2
== 0) {
1760 env
->spr
[SPR_MQ
] = 0;
1763 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
1764 return (int32_t)arg1
/ (int32_t)arg2
;
1768 target_ulong
helper_divso (target_ulong arg1
, target_ulong arg2
)
1770 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1771 (int32_t)arg2
== 0) {
1772 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1773 env
->spr
[SPR_MQ
] = 0;
1776 env
->xer
&= ~(1 << XER_OV
);
1777 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
1778 return (int32_t)arg1
/ (int32_t)arg2
;
1782 #if !defined (CONFIG_USER_ONLY)
1783 target_ulong
helper_rac (target_ulong addr
)
1787 target_ulong ret
= 0;
1789 /* We don't have to generate many instances of this instruction,
1790 * as rac is supervisor only.
1792 /* XXX: FIX THIS: Pretend we have no BAT */
1793 nb_BATs
= env
->nb_BATs
;
1795 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0)
1797 env
->nb_BATs
= nb_BATs
;
1801 void helper_rfsvc (void)
1803 do_rfi(env
->lr
, env
->ctr
, 0x0000FFFF, 0);
1807 /*****************************************************************************/
1808 /* 602 specific instructions */
1809 /* mfrom is the most crazy instruction ever seen, imho ! */
1810 /* Real implementation uses a ROM table. Do the same */
1811 /* Extremly decomposed:
1813 * return 256 * log10(10 + 1.0) + 0.5
1815 #if !defined (CONFIG_USER_ONLY)
1816 target_ulong
helper_602_mfrom (target_ulong arg
)
1818 if (likely(arg
< 602)) {
1819 #include "mfrom_table.c"
1820 return mfrom_ROM_table
[arg
];
1827 /*****************************************************************************/
1828 /* Embedded PowerPC specific helpers */
1830 /* XXX: to be improved to check access rights when in user-mode */
1831 target_ulong
helper_load_dcr (target_ulong dcrn
)
1835 if (unlikely(env
->dcr_env
== NULL
)) {
1836 qemu_log("No DCR environment\n");
1837 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1838 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1839 } else if (unlikely(ppc_dcr_read(env
->dcr_env
, (uint32_t)dcrn
, &val
) != 0)) {
1840 qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn
, (uint32_t)dcrn
);
1841 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1842 POWERPC_EXCP_INVAL
| POWERPC_EXCP_PRIV_REG
);
1847 void helper_store_dcr (target_ulong dcrn
, target_ulong val
)
1849 if (unlikely(env
->dcr_env
== NULL
)) {
1850 qemu_log("No DCR environment\n");
1851 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1852 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1853 } else if (unlikely(ppc_dcr_write(env
->dcr_env
, (uint32_t)dcrn
, (uint32_t)val
) != 0)) {
1854 qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn
, (uint32_t)dcrn
);
1855 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1856 POWERPC_EXCP_INVAL
| POWERPC_EXCP_PRIV_REG
);
1860 #if !defined(CONFIG_USER_ONLY)
1861 void helper_40x_rfci (void)
1863 do_rfi(env
->spr
[SPR_40x_SRR2
], env
->spr
[SPR_40x_SRR3
],
1864 ~((target_ulong
)0xFFFF0000), 0);
1867 void helper_rfci (void)
1869 do_rfi(env
->spr
[SPR_BOOKE_CSRR0
], SPR_BOOKE_CSRR1
,
1870 ~((target_ulong
)0x3FFF0000), 0);
1873 void helper_rfdi (void)
1875 do_rfi(env
->spr
[SPR_BOOKE_DSRR0
], SPR_BOOKE_DSRR1
,
1876 ~((target_ulong
)0x3FFF0000), 0);
1879 void helper_rfmci (void)
1881 do_rfi(env
->spr
[SPR_BOOKE_MCSRR0
], SPR_BOOKE_MCSRR1
,
1882 ~((target_ulong
)0x3FFF0000), 0);
1887 target_ulong
helper_dlmzb (target_ulong high
, target_ulong low
, uint32_t update_Rc
)
1893 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1894 if ((high
& mask
) == 0) {
1902 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1903 if ((low
& mask
) == 0) {
1915 env
->xer
= (env
->xer
& ~0x7F) | i
;
1917 env
->crf
[0] |= xer_so
;
1922 /*****************************************************************************/
1923 /* Altivec extension helpers */
1924 #if defined(HOST_WORDS_BIGENDIAN)
1932 #if defined(HOST_WORDS_BIGENDIAN)
1933 #define VECTOR_FOR_INORDER_I(index, element) \
1934 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1936 #define VECTOR_FOR_INORDER_I(index, element) \
1937 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1940 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1941 * execute the following block. */
1942 #define DO_HANDLE_NAN(result, x) \
1943 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1946 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1950 #define HANDLE_NAN1(result, x) \
1951 DO_HANDLE_NAN(result, x)
1952 #define HANDLE_NAN2(result, x, y) \
1953 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1954 #define HANDLE_NAN3(result, x, y, z) \
1955 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1957 /* Saturating arithmetic helpers. */
1958 #define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1959 static inline to_type cvt##from##to(from_type x, int *sat) \
1962 if (use_min && x < min) { \
1965 } else if (use_max && x > max) { \
1973 SATCVT(sh
, sb
, int16_t, int8_t, INT8_MIN
, INT8_MAX
, 1, 1)
1974 SATCVT(sw
, sh
, int32_t, int16_t, INT16_MIN
, INT16_MAX
, 1, 1)
1975 SATCVT(sd
, sw
, int64_t, int32_t, INT32_MIN
, INT32_MAX
, 1, 1)
1977 /* Work around gcc problems with the macro version */
1978 static inline uint8_t cvtuhub(uint16_t x
, int *sat
)
1982 if (x
> UINT8_MAX
) {
1990 //SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
1991 SATCVT(uw
, uh
, uint32_t, uint16_t, 0, UINT16_MAX
, 0, 1)
1992 SATCVT(ud
, uw
, uint64_t, uint32_t, 0, UINT32_MAX
, 0, 1)
1993 SATCVT(sh
, ub
, int16_t, uint8_t, 0, UINT8_MAX
, 1, 1)
1994 SATCVT(sw
, uh
, int32_t, uint16_t, 0, UINT16_MAX
, 1, 1)
1995 SATCVT(sd
, uw
, int64_t, uint32_t, 0, UINT32_MAX
, 1, 1)
1998 #define LVE(name, access, swap, element) \
1999 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2001 size_t n_elems = ARRAY_SIZE(r->element); \
2002 int adjust = HI_IDX*(n_elems-1); \
2003 int sh = sizeof(r->element[0]) >> 1; \
2004 int index = (addr & 0xf) >> sh; \
2006 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2008 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2012 LVE(lvebx
, ldub
, I
, u8
)
2013 LVE(lvehx
, lduw
, bswap16
, u16
)
2014 LVE(lvewx
, ldl
, bswap32
, u32
)
2018 void helper_lvsl (ppc_avr_t
*r
, target_ulong sh
)
2020 int i
, j
= (sh
& 0xf);
2022 VECTOR_FOR_INORDER_I (i
, u8
) {
2027 void helper_lvsr (ppc_avr_t
*r
, target_ulong sh
)
2029 int i
, j
= 0x10 - (sh
& 0xf);
2031 VECTOR_FOR_INORDER_I (i
, u8
) {
2036 #define STVE(name, access, swap, element) \
2037 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2039 size_t n_elems = ARRAY_SIZE(r->element); \
2040 int adjust = HI_IDX*(n_elems-1); \
2041 int sh = sizeof(r->element[0]) >> 1; \
2042 int index = (addr & 0xf) >> sh; \
2044 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2046 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2050 STVE(stvebx
, stb
, I
, u8
)
2051 STVE(stvehx
, stw
, bswap16
, u16
)
2052 STVE(stvewx
, stl
, bswap32
, u32
)
2056 void helper_mtvscr (ppc_avr_t
*r
)
2058 #if defined(HOST_WORDS_BIGENDIAN)
2059 env
->vscr
= r
->u32
[3];
2061 env
->vscr
= r
->u32
[0];
2063 set_flush_to_zero(vscr_nj
, &env
->vec_status
);
2066 void helper_vaddcuw (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2069 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2070 r
->u32
[i
] = ~a
->u32
[i
] < b
->u32
[i
];
2074 #define VARITH_DO(name, op, element) \
2075 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2078 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2079 r->element[i] = a->element[i] op b->element[i]; \
2082 #define VARITH(suffix, element) \
2083 VARITH_DO(add##suffix, +, element) \
2084 VARITH_DO(sub##suffix, -, element)
2091 #define VARITHFP(suffix, func) \
2092 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2095 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2096 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2097 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2101 VARITHFP(addfp
, float32_add
)
2102 VARITHFP(subfp
, float32_sub
)
2105 #define VARITHSAT_CASE(type, op, cvt, element) \
2107 type result = (type)a->element[i] op (type)b->element[i]; \
2108 r->element[i] = cvt(result, &sat); \
2111 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2112 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2116 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2117 switch (sizeof(r->element[0])) { \
2118 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2119 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2120 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2124 env->vscr |= (1 << VSCR_SAT); \
2127 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2128 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2129 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2130 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2131 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2132 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2133 VARITHSAT_SIGNED(b
, s8
, int16_t, cvtshsb
)
2134 VARITHSAT_SIGNED(h
, s16
, int32_t, cvtswsh
)
2135 VARITHSAT_SIGNED(w
, s32
, int64_t, cvtsdsw
)
2136 VARITHSAT_UNSIGNED(b
, u8
, uint16_t, cvtshub
)
2137 VARITHSAT_UNSIGNED(h
, u16
, uint32_t, cvtswuh
)
2138 VARITHSAT_UNSIGNED(w
, u32
, uint64_t, cvtsduw
)
2139 #undef VARITHSAT_CASE
2141 #undef VARITHSAT_SIGNED
2142 #undef VARITHSAT_UNSIGNED
2144 #define VAVG_DO(name, element, etype) \
2145 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2148 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2149 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2150 r->element[i] = x >> 1; \
2154 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2155 VAVG_DO(avgs##type, signed_element, signed_type) \
2156 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2157 VAVG(b
, s8
, int16_t, u8
, uint16_t)
2158 VAVG(h
, s16
, int32_t, u16
, uint32_t)
2159 VAVG(w
, s32
, int64_t, u32
, uint64_t)
2163 #define VCF(suffix, cvt, element) \
2164 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2167 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2168 float32 t = cvt(b->element[i], &env->vec_status); \
2169 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2172 VCF(ux
, uint32_to_float32
, u32
)
2173 VCF(sx
, int32_to_float32
, s32
)
2176 #define VCMP_DO(suffix, compare, element, record) \
2177 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2179 uint32_t ones = (uint32_t)-1; \
2180 uint32_t all = ones; \
2181 uint32_t none = 0; \
2183 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2184 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2185 switch (sizeof (a->element[0])) { \
2186 case 4: r->u32[i] = result; break; \
2187 case 2: r->u16[i] = result; break; \
2188 case 1: r->u8[i] = result; break; \
2194 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2197 #define VCMP(suffix, compare, element) \
2198 VCMP_DO(suffix, compare, element, 0) \
2199 VCMP_DO(suffix##_dot, compare, element, 1)
2212 #define VCMPFP_DO(suffix, compare, order, record) \
2213 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2215 uint32_t ones = (uint32_t)-1; \
2216 uint32_t all = ones; \
2217 uint32_t none = 0; \
2219 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2221 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2222 if (rel == float_relation_unordered) { \
2224 } else if (rel compare order) { \
2229 r->u32[i] = result; \
2234 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2237 #define VCMPFP(suffix, compare, order) \
2238 VCMPFP_DO(suffix, compare, order, 0) \
2239 VCMPFP_DO(suffix##_dot, compare, order, 1)
2240 VCMPFP(eqfp
, ==, float_relation_equal
)
2241 VCMPFP(gefp
, !=, float_relation_less
)
2242 VCMPFP(gtfp
, ==, float_relation_greater
)
2246 static inline void vcmpbfp_internal(ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
,
2251 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2252 int le_rel
= float32_compare_quiet(a
->f
[i
], b
->f
[i
], &env
->vec_status
);
2253 if (le_rel
== float_relation_unordered
) {
2254 r
->u32
[i
] = 0xc0000000;
2255 /* ALL_IN does not need to be updated here. */
2257 float32 bneg
= float32_chs(b
->f
[i
]);
2258 int ge_rel
= float32_compare_quiet(a
->f
[i
], bneg
, &env
->vec_status
);
2259 int le
= le_rel
!= float_relation_greater
;
2260 int ge
= ge_rel
!= float_relation_less
;
2261 r
->u32
[i
] = ((!le
) << 31) | ((!ge
) << 30);
2262 all_in
|= (!le
| !ge
);
2266 env
->crf
[6] = (all_in
== 0) << 1;
2270 void helper_vcmpbfp (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2272 vcmpbfp_internal(r
, a
, b
, 0);
2275 void helper_vcmpbfp_dot (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2277 vcmpbfp_internal(r
, a
, b
, 1);
2280 #define VCT(suffix, satcvt, element) \
2281 void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2285 float_status s = env->vec_status; \
2286 set_float_rounding_mode(float_round_to_zero, &s); \
2287 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2288 if (float32_is_nan(b->f[i]) || \
2289 float32_is_signaling_nan(b->f[i])) { \
2290 r->element[i] = 0; \
2292 float64 t = float32_to_float64(b->f[i], &s); \
2294 t = float64_scalbn(t, uim, &s); \
2295 j = float64_to_int64(t, &s); \
2296 r->element[i] = satcvt(j, &sat); \
2300 env->vscr |= (1 << VSCR_SAT); \
2303 VCT(uxs
, cvtsduw
, u32
)
2304 VCT(sxs
, cvtsdsw
, s32
)
2307 void helper_vmaddfp (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2310 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2311 HANDLE_NAN3(r
->f
[i
], a
->f
[i
], b
->f
[i
], c
->f
[i
]) {
2312 /* Need to do the computation in higher precision and round
2313 * once at the end. */
2314 float64 af
, bf
, cf
, t
;
2315 af
= float32_to_float64(a
->f
[i
], &env
->vec_status
);
2316 bf
= float32_to_float64(b
->f
[i
], &env
->vec_status
);
2317 cf
= float32_to_float64(c
->f
[i
], &env
->vec_status
);
2318 t
= float64_mul(af
, cf
, &env
->vec_status
);
2319 t
= float64_add(t
, bf
, &env
->vec_status
);
2320 r
->f
[i
] = float64_to_float32(t
, &env
->vec_status
);
2325 void helper_vmhaddshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2330 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2331 int32_t prod
= a
->s16
[i
] * b
->s16
[i
];
2332 int32_t t
= (int32_t)c
->s16
[i
] + (prod
>> 15);
2333 r
->s16
[i
] = cvtswsh (t
, &sat
);
2337 env
->vscr
|= (1 << VSCR_SAT
);
2341 void helper_vmhraddshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2346 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2347 int32_t prod
= a
->s16
[i
] * b
->s16
[i
] + 0x00004000;
2348 int32_t t
= (int32_t)c
->s16
[i
] + (prod
>> 15);
2349 r
->s16
[i
] = cvtswsh (t
, &sat
);
2353 env
->vscr
|= (1 << VSCR_SAT
);
2357 #define VMINMAX_DO(name, compare, element) \
2358 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2361 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2362 if (a->element[i] compare b->element[i]) { \
2363 r->element[i] = b->element[i]; \
2365 r->element[i] = a->element[i]; \
2369 #define VMINMAX(suffix, element) \
2370 VMINMAX_DO(min##suffix, >, element) \
2371 VMINMAX_DO(max##suffix, <, element)
2381 #define VMINMAXFP(suffix, rT, rF) \
2382 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2385 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2386 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2387 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2388 r->f[i] = rT->f[i]; \
2390 r->f[i] = rF->f[i]; \
2395 VMINMAXFP(minfp
, a
, b
)
2396 VMINMAXFP(maxfp
, b
, a
)
2399 void helper_vmladduhm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2402 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2403 int32_t prod
= a
->s16
[i
] * b
->s16
[i
];
2404 r
->s16
[i
] = (int16_t) (prod
+ c
->s16
[i
]);
2408 #define VMRG_DO(name, element, highp) \
2409 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2413 size_t n_elems = ARRAY_SIZE(r->element); \
2414 for (i = 0; i < n_elems/2; i++) { \
2416 result.element[i*2+HI_IDX] = a->element[i]; \
2417 result.element[i*2+LO_IDX] = b->element[i]; \
2419 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2420 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2425 #if defined(HOST_WORDS_BIGENDIAN)
2432 #define VMRG(suffix, element) \
2433 VMRG_DO(mrgl##suffix, element, MRGHI) \
2434 VMRG_DO(mrgh##suffix, element, MRGLO)
2443 void helper_vmsummbm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2448 for (i
= 0; i
< ARRAY_SIZE(r
->s8
); i
++) {
2449 prod
[i
] = (int32_t)a
->s8
[i
] * b
->u8
[i
];
2452 VECTOR_FOR_INORDER_I(i
, s32
) {
2453 r
->s32
[i
] = c
->s32
[i
] + prod
[4*i
] + prod
[4*i
+1] + prod
[4*i
+2] + prod
[4*i
+3];
2457 void helper_vmsumshm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2462 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2463 prod
[i
] = a
->s16
[i
] * b
->s16
[i
];
2466 VECTOR_FOR_INORDER_I(i
, s32
) {
2467 r
->s32
[i
] = c
->s32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2471 void helper_vmsumshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2477 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2478 prod
[i
] = (int32_t)a
->s16
[i
] * b
->s16
[i
];
2481 VECTOR_FOR_INORDER_I (i
, s32
) {
2482 int64_t t
= (int64_t)c
->s32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2483 r
->u32
[i
] = cvtsdsw(t
, &sat
);
2487 env
->vscr
|= (1 << VSCR_SAT
);
2491 void helper_vmsumubm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2496 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2497 prod
[i
] = a
->u8
[i
] * b
->u8
[i
];
2500 VECTOR_FOR_INORDER_I(i
, u32
) {
2501 r
->u32
[i
] = c
->u32
[i
] + prod
[4*i
] + prod
[4*i
+1] + prod
[4*i
+2] + prod
[4*i
+3];
2505 void helper_vmsumuhm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2510 for (i
= 0; i
< ARRAY_SIZE(r
->u16
); i
++) {
2511 prod
[i
] = a
->u16
[i
] * b
->u16
[i
];
2514 VECTOR_FOR_INORDER_I(i
, u32
) {
2515 r
->u32
[i
] = c
->u32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2519 void helper_vmsumuhs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2525 for (i
= 0; i
< ARRAY_SIZE(r
->u16
); i
++) {
2526 prod
[i
] = a
->u16
[i
] * b
->u16
[i
];
2529 VECTOR_FOR_INORDER_I (i
, s32
) {
2530 uint64_t t
= (uint64_t)c
->u32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2531 r
->u32
[i
] = cvtuduw(t
, &sat
);
2535 env
->vscr
|= (1 << VSCR_SAT
);
2539 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2540 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2543 VECTOR_FOR_INORDER_I(i, prod_element) { \
2545 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2547 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2551 #define VMUL(suffix, mul_element, prod_element) \
2552 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2553 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2561 void helper_vnmsubfp (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2564 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2565 HANDLE_NAN3(r
->f
[i
], a
->f
[i
], b
->f
[i
], c
->f
[i
]) {
2566 /* Need to do the computation is higher precision and round
2567 * once at the end. */
2568 float64 af
, bf
, cf
, t
;
2569 af
= float32_to_float64(a
->f
[i
], &env
->vec_status
);
2570 bf
= float32_to_float64(b
->f
[i
], &env
->vec_status
);
2571 cf
= float32_to_float64(c
->f
[i
], &env
->vec_status
);
2572 t
= float64_mul(af
, cf
, &env
->vec_status
);
2573 t
= float64_sub(t
, bf
, &env
->vec_status
);
2575 r
->f
[i
] = float64_to_float32(t
, &env
->vec_status
);
2580 void helper_vperm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2584 VECTOR_FOR_INORDER_I (i
, u8
) {
2585 int s
= c
->u8
[i
] & 0x1f;
2586 #if defined(HOST_WORDS_BIGENDIAN)
2587 int index
= s
& 0xf;
2589 int index
= 15 - (s
& 0xf);
2592 result
.u8
[i
] = b
->u8
[index
];
2594 result
.u8
[i
] = a
->u8
[index
];
2600 #if defined(HOST_WORDS_BIGENDIAN)
2605 void helper_vpkpx (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2609 #if defined(HOST_WORDS_BIGENDIAN)
2610 const ppc_avr_t
*x
[2] = { a
, b
};
2612 const ppc_avr_t
*x
[2] = { b
, a
};
2615 VECTOR_FOR_INORDER_I (i
, u64
) {
2616 VECTOR_FOR_INORDER_I (j
, u32
){
2617 uint32_t e
= x
[i
]->u32
[j
];
2618 result
.u16
[4*i
+j
] = (((e
>> 9) & 0xfc00) |
2619 ((e
>> 6) & 0x3e0) |
2626 #define VPK(suffix, from, to, cvt, dosat) \
2627 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2632 ppc_avr_t *a0 = PKBIG ? a : b; \
2633 ppc_avr_t *a1 = PKBIG ? b : a; \
2634 VECTOR_FOR_INORDER_I (i, from) { \
2635 result.to[i] = cvt(a0->from[i], &sat); \
2636 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2639 if (dosat && sat) { \
2640 env->vscr |= (1 << VSCR_SAT); \
2644 VPK(shss
, s16
, s8
, cvtshsb
, 1)
2645 VPK(shus
, s16
, u8
, cvtshub
, 1)
2646 VPK(swss
, s32
, s16
, cvtswsh
, 1)
2647 VPK(swus
, s32
, u16
, cvtswuh
, 1)
2648 VPK(uhus
, u16
, u8
, cvtuhub
, 1)
2649 VPK(uwus
, u32
, u16
, cvtuwuh
, 1)
2650 VPK(uhum
, u16
, u8
, I
, 0)
2651 VPK(uwum
, u32
, u16
, I
, 0)
2656 void helper_vrefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2659 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2660 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2661 r
->f
[i
] = float32_div(float32_one
, b
->f
[i
], &env
->vec_status
);
2666 #define VRFI(suffix, rounding) \
2667 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2670 float_status s = env->vec_status; \
2671 set_float_rounding_mode(rounding, &s); \
2672 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2673 HANDLE_NAN1(r->f[i], b->f[i]) { \
2674 r->f[i] = float32_round_to_int (b->f[i], &s); \
2678 VRFI(n
, float_round_nearest_even
)
2679 VRFI(m
, float_round_down
)
2680 VRFI(p
, float_round_up
)
2681 VRFI(z
, float_round_to_zero
)
2684 #define VROTATE(suffix, element) \
2685 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2688 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2689 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2690 unsigned int shift = b->element[i] & mask; \
2691 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2699 void helper_vrsqrtefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2702 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2703 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2704 float32 t
= float32_sqrt(b
->f
[i
], &env
->vec_status
);
2705 r
->f
[i
] = float32_div(float32_one
, t
, &env
->vec_status
);
2710 void helper_vsel (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2712 r
->u64
[0] = (a
->u64
[0] & ~c
->u64
[0]) | (b
->u64
[0] & c
->u64
[0]);
2713 r
->u64
[1] = (a
->u64
[1] & ~c
->u64
[1]) | (b
->u64
[1] & c
->u64
[1]);
2716 void helper_vlogefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2719 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2720 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2721 r
->f
[i
] = float32_log2(b
->f
[i
], &env
->vec_status
);
2726 #if defined(HOST_WORDS_BIGENDIAN)
2733 /* The specification says that the results are undefined if all of the
2734 * shift counts are not identical. We check to make sure that they are
2735 * to conform to what real hardware appears to do. */
2736 #define VSHIFT(suffix, leftp) \
2737 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2739 int shift = b->u8[LO_IDX*15] & 0x7; \
2742 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2743 doit = doit && ((b->u8[i] & 0x7) == shift); \
2748 } else if (leftp) { \
2749 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2750 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2751 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2753 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2754 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2755 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2765 #define VSL(suffix, element) \
2766 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2769 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2770 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2771 unsigned int shift = b->element[i] & mask; \
2772 r->element[i] = a->element[i] << shift; \
2780 void helper_vsldoi (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, uint32_t shift
)
2782 int sh
= shift
& 0xf;
2786 #if defined(HOST_WORDS_BIGENDIAN)
2787 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2790 result
.u8
[i
] = b
->u8
[index
-0x10];
2792 result
.u8
[i
] = a
->u8
[index
];
2796 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2797 int index
= (16 - sh
) + i
;
2799 result
.u8
[i
] = a
->u8
[index
-0x10];
2801 result
.u8
[i
] = b
->u8
[index
];
2808 void helper_vslo (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2810 int sh
= (b
->u8
[LO_IDX
*0xf] >> 3) & 0xf;
2812 #if defined (HOST_WORDS_BIGENDIAN)
2813 memmove (&r
->u8
[0], &a
->u8
[sh
], 16-sh
);
2814 memset (&r
->u8
[16-sh
], 0, sh
);
2816 memmove (&r
->u8
[sh
], &a
->u8
[0], 16-sh
);
2817 memset (&r
->u8
[0], 0, sh
);
2821 /* Experimental testing shows that hardware masks the immediate. */
2822 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2823 #if defined(HOST_WORDS_BIGENDIAN)
2824 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2826 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2828 #define VSPLT(suffix, element) \
2829 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2831 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2833 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2834 r->element[i] = s; \
2841 #undef SPLAT_ELEMENT
2842 #undef _SPLAT_MASKED
2844 #define VSPLTI(suffix, element, splat_type) \
2845 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2847 splat_type x = (int8_t)(splat << 3) >> 3; \
2849 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2850 r->element[i] = x; \
2853 VSPLTI(b
, s8
, int8_t)
2854 VSPLTI(h
, s16
, int16_t)
2855 VSPLTI(w
, s32
, int32_t)
2858 #define VSR(suffix, element) \
2859 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2862 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2863 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2864 unsigned int shift = b->element[i] & mask; \
2865 r->element[i] = a->element[i] >> shift; \
2876 void helper_vsro (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2878 int sh
= (b
->u8
[LO_IDX
*0xf] >> 3) & 0xf;
2880 #if defined (HOST_WORDS_BIGENDIAN)
2881 memmove (&r
->u8
[sh
], &a
->u8
[0], 16-sh
);
2882 memset (&r
->u8
[0], 0, sh
);
2884 memmove (&r
->u8
[0], &a
->u8
[sh
], 16-sh
);
2885 memset (&r
->u8
[16-sh
], 0, sh
);
2889 void helper_vsubcuw (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2892 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2893 r
->u32
[i
] = a
->u32
[i
] >= b
->u32
[i
];
2897 void helper_vsumsws (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2904 #if defined(HOST_WORDS_BIGENDIAN)
2905 upper
= ARRAY_SIZE(r
->s32
)-1;
2909 t
= (int64_t)b
->s32
[upper
];
2910 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2914 result
.s32
[upper
] = cvtsdsw(t
, &sat
);
2918 env
->vscr
|= (1 << VSCR_SAT
);
2922 void helper_vsum2sws (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2928 #if defined(HOST_WORDS_BIGENDIAN)
2933 for (i
= 0; i
< ARRAY_SIZE(r
->u64
); i
++) {
2934 int64_t t
= (int64_t)b
->s32
[upper
+i
*2];
2936 for (j
= 0; j
< ARRAY_SIZE(r
->u64
); j
++) {
2939 result
.s32
[upper
+i
*2] = cvtsdsw(t
, &sat
);
2944 env
->vscr
|= (1 << VSCR_SAT
);
2948 void helper_vsum4sbs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2953 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2954 int64_t t
= (int64_t)b
->s32
[i
];
2955 for (j
= 0; j
< ARRAY_SIZE(r
->s32
); j
++) {
2958 r
->s32
[i
] = cvtsdsw(t
, &sat
);
2962 env
->vscr
|= (1 << VSCR_SAT
);
2966 void helper_vsum4shs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2971 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2972 int64_t t
= (int64_t)b
->s32
[i
];
2973 t
+= a
->s16
[2*i
] + a
->s16
[2*i
+1];
2974 r
->s32
[i
] = cvtsdsw(t
, &sat
);
2978 env
->vscr
|= (1 << VSCR_SAT
);
2982 void helper_vsum4ubs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2987 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2988 uint64_t t
= (uint64_t)b
->u32
[i
];
2989 for (j
= 0; j
< ARRAY_SIZE(r
->u32
); j
++) {
2992 r
->u32
[i
] = cvtuduw(t
, &sat
);
2996 env
->vscr
|= (1 << VSCR_SAT
);
3000 #if defined(HOST_WORDS_BIGENDIAN)
3007 #define VUPKPX(suffix, hi) \
3008 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3012 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3013 uint16_t e = b->u16[hi ? i : i+4]; \
3014 uint8_t a = (e >> 15) ? 0xff : 0; \
3015 uint8_t r = (e >> 10) & 0x1f; \
3016 uint8_t g = (e >> 5) & 0x1f; \
3017 uint8_t b = e & 0x1f; \
3018 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3026 #define VUPK(suffix, unpacked, packee, hi) \
3027 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3032 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3033 result.unpacked[i] = b->packee[i]; \
3036 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3037 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3042 VUPK(hsb
, s16
, s8
, UPKHI
)
3043 VUPK(hsh
, s32
, s16
, UPKHI
)
3044 VUPK(lsb
, s16
, s8
, UPKLO
)
3045 VUPK(lsh
, s32
, s16
, UPKLO
)
3050 #undef DO_HANDLE_NAN
3054 #undef VECTOR_FOR_INORDER_I
3058 /*****************************************************************************/
3059 /* SPE extension helpers */
3060 /* Use a table to make this quicker */
3061 static uint8_t hbrev
[16] = {
3062 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3063 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3066 static inline uint8_t byte_reverse(uint8_t val
)
3068 return hbrev
[val
>> 4] | (hbrev
[val
& 0xF] << 4);
3071 static inline uint32_t word_reverse(uint32_t val
)
3073 return byte_reverse(val
>> 24) | (byte_reverse(val
>> 16) << 8) |
3074 (byte_reverse(val
>> 8) << 16) | (byte_reverse(val
) << 24);
3077 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3078 target_ulong
helper_brinc (target_ulong arg1
, target_ulong arg2
)
3080 uint32_t a
, b
, d
, mask
;
3082 mask
= UINT32_MAX
>> (32 - MASKBITS
);
3085 d
= word_reverse(1 + word_reverse(a
| ~b
));
3086 return (arg1
& ~mask
) | (d
& b
);
3089 uint32_t helper_cntlsw32 (uint32_t val
)
3091 if (val
& 0x80000000)
3097 uint32_t helper_cntlzw32 (uint32_t val
)
3102 /* Single-precision floating-point conversions */
3103 static inline uint32_t efscfsi(uint32_t val
)
3107 u
.f
= int32_to_float32(val
, &env
->vec_status
);
3112 static inline uint32_t efscfui(uint32_t val
)
3116 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
3121 static inline int32_t efsctsi(uint32_t val
)
3126 /* NaN are not treated the same way IEEE 754 does */
3127 if (unlikely(float32_is_nan(u
.f
)))
3130 return float32_to_int32(u
.f
, &env
->vec_status
);
3133 static inline uint32_t efsctui(uint32_t val
)
3138 /* NaN are not treated the same way IEEE 754 does */
3139 if (unlikely(float32_is_nan(u
.f
)))
3142 return float32_to_uint32(u
.f
, &env
->vec_status
);
3145 static inline uint32_t efsctsiz(uint32_t val
)
3150 /* NaN are not treated the same way IEEE 754 does */
3151 if (unlikely(float32_is_nan(u
.f
)))
3154 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
3157 static inline uint32_t efsctuiz(uint32_t val
)
3162 /* NaN are not treated the same way IEEE 754 does */
3163 if (unlikely(float32_is_nan(u
.f
)))
3166 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
3169 static inline uint32_t efscfsf(uint32_t val
)
3174 u
.f
= int32_to_float32(val
, &env
->vec_status
);
3175 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
3176 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
3181 static inline uint32_t efscfuf(uint32_t val
)
3186 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
3187 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3188 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
3193 static inline uint32_t efsctsf(uint32_t val
)
3199 /* NaN are not treated the same way IEEE 754 does */
3200 if (unlikely(float32_is_nan(u
.f
)))
3202 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3203 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
3205 return float32_to_int32(u
.f
, &env
->vec_status
);
3208 static inline uint32_t efsctuf(uint32_t val
)
3214 /* NaN are not treated the same way IEEE 754 does */
3215 if (unlikely(float32_is_nan(u
.f
)))
3217 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3218 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
3220 return float32_to_uint32(u
.f
, &env
->vec_status
);
3223 #define HELPER_SPE_SINGLE_CONV(name) \
3224 uint32_t helper_e##name (uint32_t val) \
3226 return e##name(val); \
3229 HELPER_SPE_SINGLE_CONV(fscfsi
);
3231 HELPER_SPE_SINGLE_CONV(fscfui
);
3233 HELPER_SPE_SINGLE_CONV(fscfuf
);
3235 HELPER_SPE_SINGLE_CONV(fscfsf
);
3237 HELPER_SPE_SINGLE_CONV(fsctsi
);
3239 HELPER_SPE_SINGLE_CONV(fsctui
);
3241 HELPER_SPE_SINGLE_CONV(fsctsiz
);
3243 HELPER_SPE_SINGLE_CONV(fsctuiz
);
3245 HELPER_SPE_SINGLE_CONV(fsctsf
);
3247 HELPER_SPE_SINGLE_CONV(fsctuf
);
3249 #define HELPER_SPE_VECTOR_CONV(name) \
3250 uint64_t helper_ev##name (uint64_t val) \
3252 return ((uint64_t)e##name(val >> 32) << 32) | \
3253 (uint64_t)e##name(val); \
3256 HELPER_SPE_VECTOR_CONV(fscfsi
);
3258 HELPER_SPE_VECTOR_CONV(fscfui
);
3260 HELPER_SPE_VECTOR_CONV(fscfuf
);
3262 HELPER_SPE_VECTOR_CONV(fscfsf
);
3264 HELPER_SPE_VECTOR_CONV(fsctsi
);
3266 HELPER_SPE_VECTOR_CONV(fsctui
);
3268 HELPER_SPE_VECTOR_CONV(fsctsiz
);
3270 HELPER_SPE_VECTOR_CONV(fsctuiz
);
3272 HELPER_SPE_VECTOR_CONV(fsctsf
);
3274 HELPER_SPE_VECTOR_CONV(fsctuf
);
3276 /* Single-precision floating-point arithmetic */
3277 static inline uint32_t efsadd(uint32_t op1
, uint32_t op2
)
3282 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
3286 static inline uint32_t efssub(uint32_t op1
, uint32_t op2
)
3291 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
3295 static inline uint32_t efsmul(uint32_t op1
, uint32_t op2
)
3300 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
3304 static inline uint32_t efsdiv(uint32_t op1
, uint32_t op2
)
3309 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
3313 #define HELPER_SPE_SINGLE_ARITH(name) \
3314 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3316 return e##name(op1, op2); \
3319 HELPER_SPE_SINGLE_ARITH(fsadd
);
3321 HELPER_SPE_SINGLE_ARITH(fssub
);
3323 HELPER_SPE_SINGLE_ARITH(fsmul
);
3325 HELPER_SPE_SINGLE_ARITH(fsdiv
);
3327 #define HELPER_SPE_VECTOR_ARITH(name) \
3328 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3330 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3331 (uint64_t)e##name(op1, op2); \
3334 HELPER_SPE_VECTOR_ARITH(fsadd
);
3336 HELPER_SPE_VECTOR_ARITH(fssub
);
3338 HELPER_SPE_VECTOR_ARITH(fsmul
);
3340 HELPER_SPE_VECTOR_ARITH(fsdiv
);
3342 /* Single-precision floating-point comparisons */
3343 static inline uint32_t efststlt(uint32_t op1
, uint32_t op2
)
3348 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
3351 static inline uint32_t efststgt(uint32_t op1
, uint32_t op2
)
3356 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
3359 static inline uint32_t efststeq(uint32_t op1
, uint32_t op2
)
3364 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
3367 static inline uint32_t efscmplt(uint32_t op1
, uint32_t op2
)
3369 /* XXX: TODO: test special values (NaN, infinites, ...) */
3370 return efststlt(op1
, op2
);
3373 static inline uint32_t efscmpgt(uint32_t op1
, uint32_t op2
)
3375 /* XXX: TODO: test special values (NaN, infinites, ...) */
3376 return efststgt(op1
, op2
);
3379 static inline uint32_t efscmpeq(uint32_t op1
, uint32_t op2
)
3381 /* XXX: TODO: test special values (NaN, infinites, ...) */
3382 return efststeq(op1
, op2
);
3385 #define HELPER_SINGLE_SPE_CMP(name) \
3386 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3388 return e##name(op1, op2) << 2; \
3391 HELPER_SINGLE_SPE_CMP(fststlt
);
3393 HELPER_SINGLE_SPE_CMP(fststgt
);
3395 HELPER_SINGLE_SPE_CMP(fststeq
);
3397 HELPER_SINGLE_SPE_CMP(fscmplt
);
3399 HELPER_SINGLE_SPE_CMP(fscmpgt
);
3401 HELPER_SINGLE_SPE_CMP(fscmpeq
);
3403 static inline uint32_t evcmp_merge(int t0
, int t1
)
3405 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
3408 #define HELPER_VECTOR_SPE_CMP(name) \
3409 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3411 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3414 HELPER_VECTOR_SPE_CMP(fststlt
);
3416 HELPER_VECTOR_SPE_CMP(fststgt
);
3418 HELPER_VECTOR_SPE_CMP(fststeq
);
3420 HELPER_VECTOR_SPE_CMP(fscmplt
);
3422 HELPER_VECTOR_SPE_CMP(fscmpgt
);
3424 HELPER_VECTOR_SPE_CMP(fscmpeq
);
3426 /* Double-precision floating-point conversion */
3427 uint64_t helper_efdcfsi (uint32_t val
)
3431 u
.d
= int32_to_float64(val
, &env
->vec_status
);
3436 uint64_t helper_efdcfsid (uint64_t val
)
3440 u
.d
= int64_to_float64(val
, &env
->vec_status
);
3445 uint64_t helper_efdcfui (uint32_t val
)
3449 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
3454 uint64_t helper_efdcfuid (uint64_t val
)
3458 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
3463 uint32_t helper_efdctsi (uint64_t val
)
3468 /* NaN are not treated the same way IEEE 754 does */
3469 if (unlikely(float64_is_nan(u
.d
)))
3472 return float64_to_int32(u
.d
, &env
->vec_status
);
3475 uint32_t helper_efdctui (uint64_t val
)
3480 /* NaN are not treated the same way IEEE 754 does */
3481 if (unlikely(float64_is_nan(u
.d
)))
3484 return float64_to_uint32(u
.d
, &env
->vec_status
);
3487 uint32_t helper_efdctsiz (uint64_t val
)
3492 /* NaN are not treated the same way IEEE 754 does */
3493 if (unlikely(float64_is_nan(u
.d
)))
3496 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
3499 uint64_t helper_efdctsidz (uint64_t val
)
3504 /* NaN are not treated the same way IEEE 754 does */
3505 if (unlikely(float64_is_nan(u
.d
)))
3508 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
3511 uint32_t helper_efdctuiz (uint64_t val
)
3516 /* NaN are not treated the same way IEEE 754 does */
3517 if (unlikely(float64_is_nan(u
.d
)))
3520 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
3523 uint64_t helper_efdctuidz (uint64_t val
)
3528 /* NaN are not treated the same way IEEE 754 does */
3529 if (unlikely(float64_is_nan(u
.d
)))
3532 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
3535 uint64_t helper_efdcfsf (uint32_t val
)
3540 u
.d
= int32_to_float64(val
, &env
->vec_status
);
3541 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
3542 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
3547 uint64_t helper_efdcfuf (uint32_t val
)
3552 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
3553 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
3554 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
3559 uint32_t helper_efdctsf (uint64_t val
)
3565 /* NaN are not treated the same way IEEE 754 does */
3566 if (unlikely(float64_is_nan(u
.d
)))
3568 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
3569 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
3571 return float64_to_int32(u
.d
, &env
->vec_status
);
3574 uint32_t helper_efdctuf (uint64_t val
)
3580 /* NaN are not treated the same way IEEE 754 does */
3581 if (unlikely(float64_is_nan(u
.d
)))
3583 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
3584 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
3586 return float64_to_uint32(u
.d
, &env
->vec_status
);
3589 uint32_t helper_efscfd (uint64_t val
)
3595 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
3600 uint64_t helper_efdcfs (uint32_t val
)
3606 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
3611 /* Double precision fixed-point arithmetic */
3612 uint64_t helper_efdadd (uint64_t op1
, uint64_t op2
)
3617 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
3621 uint64_t helper_efdsub (uint64_t op1
, uint64_t op2
)
3626 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
3630 uint64_t helper_efdmul (uint64_t op1
, uint64_t op2
)
3635 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
3639 uint64_t helper_efddiv (uint64_t op1
, uint64_t op2
)
3644 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
3648 /* Double precision floating point helpers */
3649 uint32_t helper_efdtstlt (uint64_t op1
, uint64_t op2
)
3654 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
3657 uint32_t helper_efdtstgt (uint64_t op1
, uint64_t op2
)
3662 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
3665 uint32_t helper_efdtsteq (uint64_t op1
, uint64_t op2
)
3670 return float64_eq(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
3673 uint32_t helper_efdcmplt (uint64_t op1
, uint64_t op2
)
3675 /* XXX: TODO: test special values (NaN, infinites, ...) */
3676 return helper_efdtstlt(op1
, op2
);
3679 uint32_t helper_efdcmpgt (uint64_t op1
, uint64_t op2
)
3681 /* XXX: TODO: test special values (NaN, infinites, ...) */
3682 return helper_efdtstgt(op1
, op2
);
3685 uint32_t helper_efdcmpeq (uint64_t op1
, uint64_t op2
)
3687 /* XXX: TODO: test special values (NaN, infinites, ...) */
3688 return helper_efdtsteq(op1
, op2
);
3691 /*****************************************************************************/
3692 /* Softmmu support */
3693 #if !defined (CONFIG_USER_ONLY)
3695 #define MMUSUFFIX _mmu
3698 #include "softmmu_template.h"
3701 #include "softmmu_template.h"
3704 #include "softmmu_template.h"
3707 #include "softmmu_template.h"
3709 /* try to fill the TLB and return an exception if error. If retaddr is
3710 NULL, it means that the function was called in C code (i.e. not
3711 from generated code or from helper.c) */
3712 /* XXX: fix it to restore all registers */
3713 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
3715 TranslationBlock
*tb
;
3716 CPUState
*saved_env
;
3720 /* XXX: hack to restore env in all cases, even if not called from
3723 env
= cpu_single_env
;
3724 ret
= cpu_ppc_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
3725 if (unlikely(ret
!= 0)) {
3726 if (likely(retaddr
)) {
3727 /* now we have a real cpu fault */
3728 pc
= (unsigned long)retaddr
;
3729 tb
= tb_find_pc(pc
);
3731 /* the PC is inside the translated code. It means that we have
3732 a virtual CPU fault */
3733 cpu_restore_state(tb
, env
, pc
, NULL
);
3736 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
3741 /* Segment registers load and store */
3742 target_ulong
helper_load_sr (target_ulong sr_num
)
3744 #if defined(TARGET_PPC64)
3745 if (env
->mmu_model
& POWERPC_MMU_64
)
3746 return ppc_load_sr(env
, sr_num
);
3748 return env
->sr
[sr_num
];
3751 void helper_store_sr (target_ulong sr_num
, target_ulong val
)
3753 ppc_store_sr(env
, sr_num
, val
);
3756 /* SLB management */
3757 #if defined(TARGET_PPC64)
3758 target_ulong
helper_load_slb (target_ulong slb_nr
)
3760 return ppc_load_slb(env
, slb_nr
);
3763 void helper_store_slb (target_ulong rb
, target_ulong rs
)
3765 ppc_store_slb(env
, rb
, rs
);
3768 void helper_slbia (void)
3770 ppc_slb_invalidate_all(env
);
3773 void helper_slbie (target_ulong addr
)
3775 ppc_slb_invalidate_one(env
, addr
);
3778 #endif /* defined(TARGET_PPC64) */
3780 /* TLB management */
3781 void helper_tlbia (void)
3783 ppc_tlb_invalidate_all(env
);
3786 void helper_tlbie (target_ulong addr
)
3788 ppc_tlb_invalidate_one(env
, addr
);
3791 /* Software driven TLBs management */
3792 /* PowerPC 602/603 software TLB load instructions helpers */
3793 static void do_6xx_tlb (target_ulong new_EPN
, int is_code
)
3795 target_ulong RPN
, CMP
, EPN
;
3798 RPN
= env
->spr
[SPR_RPA
];
3800 CMP
= env
->spr
[SPR_ICMP
];
3801 EPN
= env
->spr
[SPR_IMISS
];
3803 CMP
= env
->spr
[SPR_DCMP
];
3804 EPN
= env
->spr
[SPR_DMISS
];
3806 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
3807 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
3808 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
3810 /* Store this TLB */
3811 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
3812 way
, is_code
, CMP
, RPN
);
3815 void helper_6xx_tlbd (target_ulong EPN
)
3820 void helper_6xx_tlbi (target_ulong EPN
)
3825 /* PowerPC 74xx software TLB load instructions helpers */
3826 static void do_74xx_tlb (target_ulong new_EPN
, int is_code
)
3828 target_ulong RPN
, CMP
, EPN
;
3831 RPN
= env
->spr
[SPR_PTELO
];
3832 CMP
= env
->spr
[SPR_PTEHI
];
3833 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
3834 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
3835 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
3836 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
3838 /* Store this TLB */
3839 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
3840 way
, is_code
, CMP
, RPN
);
3843 void helper_74xx_tlbd (target_ulong EPN
)
3845 do_74xx_tlb(EPN
, 0);
3848 void helper_74xx_tlbi (target_ulong EPN
)
3850 do_74xx_tlb(EPN
, 1);
3853 static inline target_ulong
booke_tlb_to_page_size(int size
)
3855 return 1024 << (2 * size
);
3858 static inline int booke_page_size_to_tlb(target_ulong page_size
)
3862 switch (page_size
) {
3896 #if defined (TARGET_PPC64)
3897 case 0x000100000000ULL
:
3900 case 0x000400000000ULL
:
3903 case 0x001000000000ULL
:
3906 case 0x004000000000ULL
:
3909 case 0x010000000000ULL
:
3921 /* Helpers for 4xx TLB management */
3922 target_ulong
helper_4xx_tlbre_lo (target_ulong entry
)
3929 tlb
= &env
->tlb
[entry
].tlbe
;
3931 if (tlb
->prot
& PAGE_VALID
)
3933 size
= booke_page_size_to_tlb(tlb
->size
);
3934 if (size
< 0 || size
> 0x7)
3937 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
3941 target_ulong
helper_4xx_tlbre_hi (target_ulong entry
)
3947 tlb
= &env
->tlb
[entry
].tlbe
;
3949 if (tlb
->prot
& PAGE_EXEC
)
3951 if (tlb
->prot
& PAGE_WRITE
)
3956 void helper_4xx_tlbwe_hi (target_ulong entry
, target_ulong val
)
3959 target_ulong page
, end
;
3961 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
3964 tlb
= &env
->tlb
[entry
].tlbe
;
3965 /* Invalidate previous TLB (if it's valid) */
3966 if (tlb
->prot
& PAGE_VALID
) {
3967 end
= tlb
->EPN
+ tlb
->size
;
3968 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
3969 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
3970 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
3971 tlb_flush_page(env
, page
);
3973 tlb
->size
= booke_tlb_to_page_size((val
>> 7) & 0x7);
3974 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3975 * If this ever occurs, one should use the ppcemb target instead
3976 * of the ppc or ppc64 one
3978 if ((val
& 0x40) && tlb
->size
< TARGET_PAGE_SIZE
) {
3979 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
3980 "are not supported (%d)\n",
3981 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
3983 tlb
->EPN
= val
& ~(tlb
->size
- 1);
3985 tlb
->prot
|= PAGE_VALID
;
3987 /* XXX: TO BE FIXED */
3989 "Little-endian TLB entries are not supported by now\n");
3992 tlb
->prot
&= ~PAGE_VALID
;
3994 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
3995 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
3996 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
3997 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
3998 tlb
->prot
& PAGE_READ
? 'r' : '-',
3999 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
4000 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
4001 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
4002 /* Invalidate new TLB (if valid) */
4003 if (tlb
->prot
& PAGE_VALID
) {
4004 end
= tlb
->EPN
+ tlb
->size
;
4005 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
4006 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
4007 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
4008 tlb_flush_page(env
, page
);
4012 void helper_4xx_tlbwe_lo (target_ulong entry
, target_ulong val
)
4016 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
4019 tlb
= &env
->tlb
[entry
].tlbe
;
4020 tlb
->attr
= val
& 0xFF;
4021 tlb
->RPN
= val
& 0xFFFFFC00;
4022 tlb
->prot
= PAGE_READ
;
4024 tlb
->prot
|= PAGE_EXEC
;
4026 tlb
->prot
|= PAGE_WRITE
;
4027 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
4028 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
4029 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
4030 tlb
->prot
& PAGE_READ
? 'r' : '-',
4031 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
4032 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
4033 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
4036 target_ulong
helper_4xx_tlbsx (target_ulong address
)
4038 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
4041 /* PowerPC 440 TLB management */
4042 void helper_440_tlbwe (uint32_t word
, target_ulong entry
, target_ulong value
)
4045 target_ulong EPN
, RPN
, size
;
4048 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
4049 __func__
, word
, (int)entry
, value
);
4052 tlb
= &env
->tlb
[entry
].tlbe
;
4055 /* Just here to please gcc */
4057 EPN
= value
& 0xFFFFFC00;
4058 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
)
4061 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
4062 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
)
4066 tlb
->attr
|= (value
>> 8) & 1;
4067 if (value
& 0x200) {
4068 tlb
->prot
|= PAGE_VALID
;
4070 if (tlb
->prot
& PAGE_VALID
) {
4071 tlb
->prot
&= ~PAGE_VALID
;
4075 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
4080 RPN
= value
& 0xFFFFFC0F;
4081 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
)
4086 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
4087 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
4089 tlb
->prot
|= PAGE_READ
<< 4;
4091 tlb
->prot
|= PAGE_WRITE
<< 4;
4093 tlb
->prot
|= PAGE_EXEC
<< 4;
4095 tlb
->prot
|= PAGE_READ
;
4097 tlb
->prot
|= PAGE_WRITE
;
4099 tlb
->prot
|= PAGE_EXEC
;
4104 target_ulong
helper_440_tlbre (uint32_t word
, target_ulong entry
)
4111 tlb
= &env
->tlb
[entry
].tlbe
;
4114 /* Just here to please gcc */
4117 size
= booke_page_size_to_tlb(tlb
->size
);
4118 if (size
< 0 || size
> 0xF)
4121 if (tlb
->attr
& 0x1)
4123 if (tlb
->prot
& PAGE_VALID
)
4125 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
4126 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
4132 ret
= tlb
->attr
& ~0x1;
4133 if (tlb
->prot
& (PAGE_READ
<< 4))
4135 if (tlb
->prot
& (PAGE_WRITE
<< 4))
4137 if (tlb
->prot
& (PAGE_EXEC
<< 4))
4139 if (tlb
->prot
& PAGE_READ
)
4141 if (tlb
->prot
& PAGE_WRITE
)
4143 if (tlb
->prot
& PAGE_EXEC
)
4150 target_ulong
helper_440_tlbsx (target_ulong address
)
4152 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
4155 #endif /* !CONFIG_USER_ONLY */