2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "host-utils.h"
24 #include "helper_regs.h"
27 //#define DEBUG_EXCEPTIONS
28 //#define DEBUG_SOFTWARE_TLB
30 #ifdef DEBUG_SOFTWARE_TLB
31 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
33 # define LOG_SWTLB(...) do { } while (0)
37 /*****************************************************************************/
38 /* Exceptions processing helpers */
40 void helper_raise_exception_err (uint32_t exception
, uint32_t error_code
)
43 printf("Raise exception %3x code : %d\n", exception
, error_code
);
45 env
->exception_index
= exception
;
46 env
->error_code
= error_code
;
50 void helper_raise_exception (uint32_t exception
)
52 helper_raise_exception_err(exception
, 0);
55 /*****************************************************************************/
57 void helper_load_dump_spr (uint32_t sprn
)
59 qemu_log("Read SPR %d %03x => " TARGET_FMT_lx
"\n", sprn
, sprn
,
63 void helper_store_dump_spr (uint32_t sprn
)
65 qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx
"\n", sprn
, sprn
,
69 target_ulong
helper_load_tbl (void)
71 return (target_ulong
)cpu_ppc_load_tbl(env
);
74 target_ulong
helper_load_tbu (void)
76 return cpu_ppc_load_tbu(env
);
79 target_ulong
helper_load_atbl (void)
81 return (target_ulong
)cpu_ppc_load_atbl(env
);
84 target_ulong
helper_load_atbu (void)
86 return cpu_ppc_load_atbu(env
);
89 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
90 target_ulong
helper_load_purr (void)
92 return (target_ulong
)cpu_ppc_load_purr(env
);
96 target_ulong
helper_load_601_rtcl (void)
98 return cpu_ppc601_load_rtcl(env
);
101 target_ulong
helper_load_601_rtcu (void)
103 return cpu_ppc601_load_rtcu(env
);
106 #if !defined(CONFIG_USER_ONLY)
107 #if defined (TARGET_PPC64)
108 void helper_store_asr (target_ulong val
)
110 ppc_store_asr(env
, val
);
114 void helper_store_sdr1 (target_ulong val
)
116 ppc_store_sdr1(env
, val
);
119 void helper_store_tbl (target_ulong val
)
121 cpu_ppc_store_tbl(env
, val
);
124 void helper_store_tbu (target_ulong val
)
126 cpu_ppc_store_tbu(env
, val
);
129 void helper_store_atbl (target_ulong val
)
131 cpu_ppc_store_atbl(env
, val
);
134 void helper_store_atbu (target_ulong val
)
136 cpu_ppc_store_atbu(env
, val
);
139 void helper_store_601_rtcl (target_ulong val
)
141 cpu_ppc601_store_rtcl(env
, val
);
144 void helper_store_601_rtcu (target_ulong val
)
146 cpu_ppc601_store_rtcu(env
, val
);
149 target_ulong
helper_load_decr (void)
151 return cpu_ppc_load_decr(env
);
154 void helper_store_decr (target_ulong val
)
156 cpu_ppc_store_decr(env
, val
);
159 void helper_store_hid0_601 (target_ulong val
)
163 hid0
= env
->spr
[SPR_HID0
];
164 if ((val
^ hid0
) & 0x00000008) {
165 /* Change current endianness */
166 env
->hflags
&= ~(1 << MSR_LE
);
167 env
->hflags_nmsr
&= ~(1 << MSR_LE
);
168 env
->hflags_nmsr
|= (1 << MSR_LE
) & (((val
>> 3) & 1) << MSR_LE
);
169 env
->hflags
|= env
->hflags_nmsr
;
170 qemu_log("%s: set endianness to %c => " TARGET_FMT_lx
"\n", __func__
,
171 val
& 0x8 ? 'l' : 'b', env
->hflags
);
173 env
->spr
[SPR_HID0
] = (uint32_t)val
;
176 void helper_store_403_pbr (uint32_t num
, target_ulong value
)
178 if (likely(env
->pb
[num
] != value
)) {
179 env
->pb
[num
] = value
;
180 /* Should be optimized */
185 target_ulong
helper_load_40x_pit (void)
187 return load_40x_pit(env
);
190 void helper_store_40x_pit (target_ulong val
)
192 store_40x_pit(env
, val
);
195 void helper_store_40x_dbcr0 (target_ulong val
)
197 store_40x_dbcr0(env
, val
);
200 void helper_store_40x_sler (target_ulong val
)
202 store_40x_sler(env
, val
);
205 void helper_store_booke_tcr (target_ulong val
)
207 store_booke_tcr(env
, val
);
210 void helper_store_booke_tsr (target_ulong val
)
212 store_booke_tsr(env
, val
);
215 void helper_store_ibatu (uint32_t nr
, target_ulong val
)
217 ppc_store_ibatu(env
, nr
, val
);
220 void helper_store_ibatl (uint32_t nr
, target_ulong val
)
222 ppc_store_ibatl(env
, nr
, val
);
225 void helper_store_dbatu (uint32_t nr
, target_ulong val
)
227 ppc_store_dbatu(env
, nr
, val
);
230 void helper_store_dbatl (uint32_t nr
, target_ulong val
)
232 ppc_store_dbatl(env
, nr
, val
);
235 void helper_store_601_batl (uint32_t nr
, target_ulong val
)
237 ppc_store_ibatl_601(env
, nr
, val
);
240 void helper_store_601_batu (uint32_t nr
, target_ulong val
)
242 ppc_store_ibatu_601(env
, nr
, val
);
246 /*****************************************************************************/
247 /* Memory load and stores */
249 static inline target_ulong
addr_add(target_ulong addr
, target_long arg
)
251 #if defined(TARGET_PPC64)
253 return (uint32_t)(addr
+ arg
);
259 void helper_lmw (target_ulong addr
, uint32_t reg
)
261 for (; reg
< 32; reg
++) {
263 env
->gpr
[reg
] = bswap32(ldl(addr
));
265 env
->gpr
[reg
] = ldl(addr
);
266 addr
= addr_add(addr
, 4);
270 void helper_stmw (target_ulong addr
, uint32_t reg
)
272 for (; reg
< 32; reg
++) {
274 stl(addr
, bswap32((uint32_t)env
->gpr
[reg
]));
276 stl(addr
, (uint32_t)env
->gpr
[reg
]);
277 addr
= addr_add(addr
, 4);
281 void helper_lsw(target_ulong addr
, uint32_t nb
, uint32_t reg
)
284 for (; nb
> 3; nb
-= 4) {
285 env
->gpr
[reg
] = ldl(addr
);
286 reg
= (reg
+ 1) % 32;
287 addr
= addr_add(addr
, 4);
289 if (unlikely(nb
> 0)) {
291 for (sh
= 24; nb
> 0; nb
--, sh
-= 8) {
292 env
->gpr
[reg
] |= ldub(addr
) << sh
;
293 addr
= addr_add(addr
, 1);
297 /* PPC32 specification says we must generate an exception if
298 * rA is in the range of registers to be loaded.
299 * In an other hand, IBM says this is valid, but rA won't be loaded.
300 * For now, I'll follow the spec...
302 void helper_lswx(target_ulong addr
, uint32_t reg
, uint32_t ra
, uint32_t rb
)
304 if (likely(xer_bc
!= 0)) {
305 if (unlikely((ra
!= 0 && reg
< ra
&& (reg
+ xer_bc
) > ra
) ||
306 (reg
< rb
&& (reg
+ xer_bc
) > rb
))) {
307 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
309 POWERPC_EXCP_INVAL_LSWX
);
311 helper_lsw(addr
, xer_bc
, reg
);
316 void helper_stsw(target_ulong addr
, uint32_t nb
, uint32_t reg
)
319 for (; nb
> 3; nb
-= 4) {
320 stl(addr
, env
->gpr
[reg
]);
321 reg
= (reg
+ 1) % 32;
322 addr
= addr_add(addr
, 4);
324 if (unlikely(nb
> 0)) {
325 for (sh
= 24; nb
> 0; nb
--, sh
-= 8) {
326 stb(addr
, (env
->gpr
[reg
] >> sh
) & 0xFF);
327 addr
= addr_add(addr
, 1);
332 static void do_dcbz(target_ulong addr
, int dcache_line_size
)
334 addr
&= ~(dcache_line_size
- 1);
336 for (i
= 0 ; i
< dcache_line_size
; i
+= 4) {
339 if (env
->reserve_addr
== addr
)
340 env
->reserve_addr
= (target_ulong
)-1ULL;
343 void helper_dcbz(target_ulong addr
)
345 do_dcbz(addr
, env
->dcache_line_size
);
348 void helper_dcbz_970(target_ulong addr
)
350 if (((env
->spr
[SPR_970_HID5
] >> 7) & 0x3) == 1)
353 do_dcbz(addr
, env
->dcache_line_size
);
356 void helper_icbi(target_ulong addr
)
358 addr
&= ~(env
->dcache_line_size
- 1);
359 /* Invalidate one cache line :
360 * PowerPC specification says this is to be treated like a load
361 * (not a fetch) by the MMU. To be sure it will be so,
362 * do the load "by hand".
365 tb_invalidate_page_range(addr
, addr
+ env
->icache_line_size
);
369 target_ulong
helper_lscbx (target_ulong addr
, uint32_t reg
, uint32_t ra
, uint32_t rb
)
373 for (i
= 0; i
< xer_bc
; i
++) {
375 addr
= addr_add(addr
, 1);
376 /* ra (if not 0) and rb are never modified */
377 if (likely(reg
!= rb
&& (ra
== 0 || reg
!= ra
))) {
378 env
->gpr
[reg
] = (env
->gpr
[reg
] & ~(0xFF << d
)) | (c
<< d
);
380 if (unlikely(c
== xer_cmp
))
382 if (likely(d
!= 0)) {
393 /*****************************************************************************/
394 /* Fixed point operations helpers */
395 #if defined(TARGET_PPC64)
397 /* multiply high word */
398 uint64_t helper_mulhd (uint64_t arg1
, uint64_t arg2
)
402 muls64(&tl
, &th
, arg1
, arg2
);
406 /* multiply high word unsigned */
407 uint64_t helper_mulhdu (uint64_t arg1
, uint64_t arg2
)
411 mulu64(&tl
, &th
, arg1
, arg2
);
415 uint64_t helper_mulldo (uint64_t arg1
, uint64_t arg2
)
420 muls64(&tl
, (uint64_t *)&th
, arg1
, arg2
);
421 /* If th != 0 && th != -1, then we had an overflow */
422 if (likely((uint64_t)(th
+ 1) <= 1)) {
423 env
->xer
&= ~(1 << XER_OV
);
425 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
431 target_ulong
helper_cntlzw (target_ulong t
)
436 #if defined(TARGET_PPC64)
437 target_ulong
helper_cntlzd (target_ulong t
)
443 /* shift right arithmetic helper */
444 target_ulong
helper_sraw (target_ulong value
, target_ulong shift
)
448 if (likely(!(shift
& 0x20))) {
449 if (likely((uint32_t)shift
!= 0)) {
451 ret
= (int32_t)value
>> shift
;
452 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
453 env
->xer
&= ~(1 << XER_CA
);
455 env
->xer
|= (1 << XER_CA
);
458 ret
= (int32_t)value
;
459 env
->xer
&= ~(1 << XER_CA
);
462 ret
= (int32_t)value
>> 31;
464 env
->xer
|= (1 << XER_CA
);
466 env
->xer
&= ~(1 << XER_CA
);
469 return (target_long
)ret
;
472 #if defined(TARGET_PPC64)
473 target_ulong
helper_srad (target_ulong value
, target_ulong shift
)
477 if (likely(!(shift
& 0x40))) {
478 if (likely((uint64_t)shift
!= 0)) {
480 ret
= (int64_t)value
>> shift
;
481 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
482 env
->xer
&= ~(1 << XER_CA
);
484 env
->xer
|= (1 << XER_CA
);
487 ret
= (int64_t)value
;
488 env
->xer
&= ~(1 << XER_CA
);
491 ret
= (int64_t)value
>> 63;
493 env
->xer
|= (1 << XER_CA
);
495 env
->xer
&= ~(1 << XER_CA
);
502 #if defined(TARGET_PPC64)
503 target_ulong
helper_popcntb (target_ulong val
)
505 val
= (val
& 0x5555555555555555ULL
) + ((val
>> 1) &
506 0x5555555555555555ULL
);
507 val
= (val
& 0x3333333333333333ULL
) + ((val
>> 2) &
508 0x3333333333333333ULL
);
509 val
= (val
& 0x0f0f0f0f0f0f0f0fULL
) + ((val
>> 4) &
510 0x0f0f0f0f0f0f0f0fULL
);
514 target_ulong
helper_popcntw (target_ulong val
)
516 val
= (val
& 0x5555555555555555ULL
) + ((val
>> 1) &
517 0x5555555555555555ULL
);
518 val
= (val
& 0x3333333333333333ULL
) + ((val
>> 2) &
519 0x3333333333333333ULL
);
520 val
= (val
& 0x0f0f0f0f0f0f0f0fULL
) + ((val
>> 4) &
521 0x0f0f0f0f0f0f0f0fULL
);
522 val
= (val
& 0x00ff00ff00ff00ffULL
) + ((val
>> 8) &
523 0x00ff00ff00ff00ffULL
);
524 val
= (val
& 0x0000ffff0000ffffULL
) + ((val
>> 16) &
525 0x0000ffff0000ffffULL
);
529 target_ulong
helper_popcntd (target_ulong val
)
531 val
= (val
& 0x5555555555555555ULL
) + ((val
>> 1) &
532 0x5555555555555555ULL
);
533 val
= (val
& 0x3333333333333333ULL
) + ((val
>> 2) &
534 0x3333333333333333ULL
);
535 val
= (val
& 0x0f0f0f0f0f0f0f0fULL
) + ((val
>> 4) &
536 0x0f0f0f0f0f0f0f0fULL
);
537 val
= (val
& 0x00ff00ff00ff00ffULL
) + ((val
>> 8) &
538 0x00ff00ff00ff00ffULL
);
539 val
= (val
& 0x0000ffff0000ffffULL
) + ((val
>> 16) &
540 0x0000ffff0000ffffULL
);
541 val
= (val
& 0x00000000ffffffffULL
) + ((val
>> 32) &
542 0x00000000ffffffffULL
);
546 target_ulong
helper_popcntb (target_ulong val
)
548 val
= (val
& 0x55555555) + ((val
>> 1) & 0x55555555);
549 val
= (val
& 0x33333333) + ((val
>> 2) & 0x33333333);
550 val
= (val
& 0x0f0f0f0f) + ((val
>> 4) & 0x0f0f0f0f);
554 target_ulong
helper_popcntw (target_ulong val
)
556 val
= (val
& 0x55555555) + ((val
>> 1) & 0x55555555);
557 val
= (val
& 0x33333333) + ((val
>> 2) & 0x33333333);
558 val
= (val
& 0x0f0f0f0f) + ((val
>> 4) & 0x0f0f0f0f);
559 val
= (val
& 0x00ff00ff) + ((val
>> 8) & 0x00ff00ff);
560 val
= (val
& 0x0000ffff) + ((val
>> 16) & 0x0000ffff);
565 /*****************************************************************************/
566 /* Floating point operations helpers */
567 uint64_t helper_float32_to_float64(uint32_t arg
)
572 d
.d
= float32_to_float64(f
.f
, &env
->fp_status
);
576 uint32_t helper_float64_to_float32(uint64_t arg
)
581 f
.f
= float64_to_float32(d
.d
, &env
->fp_status
);
585 static inline int isden(float64 d
)
591 return ((u
.ll
>> 52) & 0x7FF) == 0;
594 uint32_t helper_compute_fprf (uint64_t arg
, uint32_t set_fprf
)
600 isneg
= float64_is_neg(farg
.d
);
601 if (unlikely(float64_is_any_nan(farg
.d
))) {
602 if (float64_is_signaling_nan(farg
.d
)) {
603 /* Signaling NaN: flags are undefined */
609 } else if (unlikely(float64_is_infinity(farg
.d
))) {
616 if (float64_is_zero(farg
.d
)) {
624 /* Denormalized numbers */
627 /* Normalized numbers */
638 /* We update FPSCR_FPRF */
639 env
->fpscr
&= ~(0x1F << FPSCR_FPRF
);
640 env
->fpscr
|= ret
<< FPSCR_FPRF
;
642 /* We just need fpcc to update Rc1 */
646 /* Floating-point invalid operations exception */
647 static inline uint64_t fload_invalid_op_excp(int op
)
654 case POWERPC_EXCP_FP_VXSNAN
:
655 env
->fpscr
|= 1 << FPSCR_VXSNAN
;
657 case POWERPC_EXCP_FP_VXSOFT
:
658 env
->fpscr
|= 1 << FPSCR_VXSOFT
;
660 case POWERPC_EXCP_FP_VXISI
:
661 /* Magnitude subtraction of infinities */
662 env
->fpscr
|= 1 << FPSCR_VXISI
;
664 case POWERPC_EXCP_FP_VXIDI
:
665 /* Division of infinity by infinity */
666 env
->fpscr
|= 1 << FPSCR_VXIDI
;
668 case POWERPC_EXCP_FP_VXZDZ
:
669 /* Division of zero by zero */
670 env
->fpscr
|= 1 << FPSCR_VXZDZ
;
672 case POWERPC_EXCP_FP_VXIMZ
:
673 /* Multiplication of zero by infinity */
674 env
->fpscr
|= 1 << FPSCR_VXIMZ
;
676 case POWERPC_EXCP_FP_VXVC
:
677 /* Ordered comparison of NaN */
678 env
->fpscr
|= 1 << FPSCR_VXVC
;
679 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
680 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
681 /* We must update the target FPR before raising the exception */
683 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
684 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_VXVC
;
685 /* Update the floating-point enabled exception summary */
686 env
->fpscr
|= 1 << FPSCR_FEX
;
687 /* Exception is differed */
691 case POWERPC_EXCP_FP_VXSQRT
:
692 /* Square root of a negative number */
693 env
->fpscr
|= 1 << FPSCR_VXSQRT
;
695 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
697 /* Set the result to quiet NaN */
698 ret
= 0x7FF8000000000000ULL
;
699 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
700 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
703 case POWERPC_EXCP_FP_VXCVI
:
704 /* Invalid conversion */
705 env
->fpscr
|= 1 << FPSCR_VXCVI
;
706 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
708 /* Set the result to quiet NaN */
709 ret
= 0x7FF8000000000000ULL
;
710 env
->fpscr
&= ~(0xF << FPSCR_FPCC
);
711 env
->fpscr
|= 0x11 << FPSCR_FPCC
;
715 /* Update the floating-point invalid operation summary */
716 env
->fpscr
|= 1 << FPSCR_VX
;
717 /* Update the floating-point exception summary */
718 env
->fpscr
|= 1 << FPSCR_FX
;
720 /* Update the floating-point enabled exception summary */
721 env
->fpscr
|= 1 << FPSCR_FEX
;
722 if (msr_fe0
!= 0 || msr_fe1
!= 0)
723 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_FP
| op
);
728 static inline void float_zero_divide_excp(void)
730 env
->fpscr
|= 1 << FPSCR_ZX
;
731 env
->fpscr
&= ~((1 << FPSCR_FR
) | (1 << FPSCR_FI
));
732 /* Update the floating-point exception summary */
733 env
->fpscr
|= 1 << FPSCR_FX
;
735 /* Update the floating-point enabled exception summary */
736 env
->fpscr
|= 1 << FPSCR_FEX
;
737 if (msr_fe0
!= 0 || msr_fe1
!= 0) {
738 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
739 POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
);
744 static inline void float_overflow_excp(void)
746 env
->fpscr
|= 1 << FPSCR_OX
;
747 /* Update the floating-point exception summary */
748 env
->fpscr
|= 1 << FPSCR_FX
;
750 /* XXX: should adjust the result */
751 /* Update the floating-point enabled exception summary */
752 env
->fpscr
|= 1 << FPSCR_FEX
;
753 /* We must update the target FPR before raising the exception */
754 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
755 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
757 env
->fpscr
|= 1 << FPSCR_XX
;
758 env
->fpscr
|= 1 << FPSCR_FI
;
762 static inline void float_underflow_excp(void)
764 env
->fpscr
|= 1 << FPSCR_UX
;
765 /* Update the floating-point exception summary */
766 env
->fpscr
|= 1 << FPSCR_FX
;
768 /* XXX: should adjust the result */
769 /* Update the floating-point enabled exception summary */
770 env
->fpscr
|= 1 << FPSCR_FEX
;
771 /* We must update the target FPR before raising the exception */
772 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
773 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
777 static inline void float_inexact_excp(void)
779 env
->fpscr
|= 1 << FPSCR_XX
;
780 /* Update the floating-point exception summary */
781 env
->fpscr
|= 1 << FPSCR_FX
;
783 /* Update the floating-point enabled exception summary */
784 env
->fpscr
|= 1 << FPSCR_FEX
;
785 /* We must update the target FPR before raising the exception */
786 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
787 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
791 static inline void fpscr_set_rounding_mode(void)
795 /* Set rounding mode */
798 /* Best approximation (round to nearest) */
799 rnd_type
= float_round_nearest_even
;
802 /* Smaller magnitude (round toward zero) */
803 rnd_type
= float_round_to_zero
;
806 /* Round toward +infinite */
807 rnd_type
= float_round_up
;
811 /* Round toward -infinite */
812 rnd_type
= float_round_down
;
815 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
818 void helper_fpscr_clrbit (uint32_t bit
)
822 prev
= (env
->fpscr
>> bit
) & 1;
823 env
->fpscr
&= ~(1 << bit
);
828 fpscr_set_rounding_mode();
836 void helper_fpscr_setbit (uint32_t bit
)
840 prev
= (env
->fpscr
>> bit
) & 1;
841 env
->fpscr
|= 1 << bit
;
845 env
->fpscr
|= 1 << FPSCR_FX
;
849 env
->fpscr
|= 1 << FPSCR_FX
;
854 env
->fpscr
|= 1 << FPSCR_FX
;
859 env
->fpscr
|= 1 << FPSCR_FX
;
864 env
->fpscr
|= 1 << FPSCR_FX
;
877 env
->fpscr
|= 1 << FPSCR_VX
;
878 env
->fpscr
|= 1 << FPSCR_FX
;
885 env
->error_code
= POWERPC_EXCP_FP
;
887 env
->error_code
|= POWERPC_EXCP_FP_VXSNAN
;
889 env
->error_code
|= POWERPC_EXCP_FP_VXISI
;
891 env
->error_code
|= POWERPC_EXCP_FP_VXIDI
;
893 env
->error_code
|= POWERPC_EXCP_FP_VXZDZ
;
895 env
->error_code
|= POWERPC_EXCP_FP_VXIMZ
;
897 env
->error_code
|= POWERPC_EXCP_FP_VXVC
;
899 env
->error_code
|= POWERPC_EXCP_FP_VXSOFT
;
901 env
->error_code
|= POWERPC_EXCP_FP_VXSQRT
;
903 env
->error_code
|= POWERPC_EXCP_FP_VXCVI
;
910 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_OX
;
917 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_UX
;
924 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_ZX
;
931 env
->error_code
= POWERPC_EXCP_FP
| POWERPC_EXCP_FP_XX
;
937 fpscr_set_rounding_mode();
942 /* Update the floating-point enabled exception summary */
943 env
->fpscr
|= 1 << FPSCR_FEX
;
944 /* We have to update Rc1 before raising the exception */
945 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
951 void helper_store_fpscr (uint64_t arg
, uint32_t mask
)
954 * We use only the 32 LSB of the incoming fpr
962 new |= prev
& 0x60000000;
963 for (i
= 0; i
< 8; i
++) {
964 if (mask
& (1 << i
)) {
965 env
->fpscr
&= ~(0xF << (4 * i
));
966 env
->fpscr
|= new & (0xF << (4 * i
));
969 /* Update VX and FEX */
971 env
->fpscr
|= 1 << FPSCR_VX
;
973 env
->fpscr
&= ~(1 << FPSCR_VX
);
974 if ((fpscr_ex
& fpscr_eex
) != 0) {
975 env
->fpscr
|= 1 << FPSCR_FEX
;
976 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
977 /* XXX: we should compute it properly */
978 env
->error_code
= POWERPC_EXCP_FP
;
981 env
->fpscr
&= ~(1 << FPSCR_FEX
);
982 fpscr_set_rounding_mode();
985 void helper_float_check_status (void)
987 #ifdef CONFIG_SOFTFLOAT
988 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
989 (env
->error_code
& POWERPC_EXCP_FP
)) {
990 /* Differred floating-point exception after target FPR update */
991 if (msr_fe0
!= 0 || msr_fe1
!= 0)
992 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
994 int status
= get_float_exception_flags(&env
->fp_status
);
995 if (status
& float_flag_divbyzero
) {
996 float_zero_divide_excp();
997 } else if (status
& float_flag_overflow
) {
998 float_overflow_excp();
999 } else if (status
& float_flag_underflow
) {
1000 float_underflow_excp();
1001 } else if (status
& float_flag_inexact
) {
1002 float_inexact_excp();
1006 if (env
->exception_index
== POWERPC_EXCP_PROGRAM
&&
1007 (env
->error_code
& POWERPC_EXCP_FP
)) {
1008 /* Differred floating-point exception after target FPR update */
1009 if (msr_fe0
!= 0 || msr_fe1
!= 0)
1010 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
1015 #ifdef CONFIG_SOFTFLOAT
1016 void helper_reset_fpstatus (void)
1018 set_float_exception_flags(0, &env
->fp_status
);
1023 uint64_t helper_fadd (uint64_t arg1
, uint64_t arg2
)
1025 CPU_DoubleU farg1
, farg2
;
1030 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
1031 float64_is_neg(farg1
.d
) != float64_is_neg(farg2
.d
))) {
1032 /* Magnitude subtraction of infinities */
1033 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1035 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1036 float64_is_signaling_nan(farg2
.d
))) {
1038 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1040 farg1
.d
= float64_add(farg1
.d
, farg2
.d
, &env
->fp_status
);
1047 uint64_t helper_fsub (uint64_t arg1
, uint64_t arg2
)
1049 CPU_DoubleU farg1
, farg2
;
1054 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
) &&
1055 float64_is_neg(farg1
.d
) == float64_is_neg(farg2
.d
))) {
1056 /* Magnitude subtraction of infinities */
1057 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1059 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1060 float64_is_signaling_nan(farg2
.d
))) {
1061 /* sNaN subtraction */
1062 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1064 farg1
.d
= float64_sub(farg1
.d
, farg2
.d
, &env
->fp_status
);
1071 uint64_t helper_fmul (uint64_t arg1
, uint64_t arg2
)
1073 CPU_DoubleU farg1
, farg2
;
1078 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1079 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1080 /* Multiplication of zero by infinity */
1081 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1083 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1084 float64_is_signaling_nan(farg2
.d
))) {
1085 /* sNaN multiplication */
1086 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1088 farg1
.d
= float64_mul(farg1
.d
, farg2
.d
, &env
->fp_status
);
1095 uint64_t helper_fdiv (uint64_t arg1
, uint64_t arg2
)
1097 CPU_DoubleU farg1
, farg2
;
1102 if (unlikely(float64_is_infinity(farg1
.d
) && float64_is_infinity(farg2
.d
))) {
1103 /* Division of infinity by infinity */
1104 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI
);
1105 } else if (unlikely(float64_is_zero(farg1
.d
) && float64_is_zero(farg2
.d
))) {
1106 /* Division of zero by zero */
1107 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ
);
1109 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1110 float64_is_signaling_nan(farg2
.d
))) {
1112 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1114 farg1
.d
= float64_div(farg1
.d
, farg2
.d
, &env
->fp_status
);
1121 uint64_t helper_fabs (uint64_t arg
)
1126 farg
.d
= float64_abs(farg
.d
);
1131 uint64_t helper_fnabs (uint64_t arg
)
1136 farg
.d
= float64_abs(farg
.d
);
1137 farg
.d
= float64_chs(farg
.d
);
1142 uint64_t helper_fneg (uint64_t arg
)
1147 farg
.d
= float64_chs(farg
.d
);
1151 /* fctiw - fctiw. */
1152 uint64_t helper_fctiw (uint64_t arg
)
1157 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1158 /* sNaN conversion */
1159 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1160 } else if (unlikely(float64_is_quiet_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1161 /* qNan / infinity conversion */
1162 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1164 farg
.ll
= float64_to_int32(farg
.d
, &env
->fp_status
);
1165 /* XXX: higher bits are not supposed to be significant.
1166 * to make tests easier, return the same as a real PowerPC 750
1168 farg
.ll
|= 0xFFF80000ULL
<< 32;
1173 /* fctiwz - fctiwz. */
1174 uint64_t helper_fctiwz (uint64_t arg
)
1179 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1180 /* sNaN conversion */
1181 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1182 } else if (unlikely(float64_is_quiet_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1183 /* qNan / infinity conversion */
1184 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1186 farg
.ll
= float64_to_int32_round_to_zero(farg
.d
, &env
->fp_status
);
1187 /* XXX: higher bits are not supposed to be significant.
1188 * to make tests easier, return the same as a real PowerPC 750
1190 farg
.ll
|= 0xFFF80000ULL
<< 32;
1195 #if defined(TARGET_PPC64)
1196 /* fcfid - fcfid. */
1197 uint64_t helper_fcfid (uint64_t arg
)
1200 farg
.d
= int64_to_float64(arg
, &env
->fp_status
);
1204 /* fctid - fctid. */
1205 uint64_t helper_fctid (uint64_t arg
)
1210 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1211 /* sNaN conversion */
1212 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1213 } else if (unlikely(float64_is_quiet_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1214 /* qNan / infinity conversion */
1215 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1217 farg
.ll
= float64_to_int64(farg
.d
, &env
->fp_status
);
1222 /* fctidz - fctidz. */
1223 uint64_t helper_fctidz (uint64_t arg
)
1228 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1229 /* sNaN conversion */
1230 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1231 } else if (unlikely(float64_is_quiet_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1232 /* qNan / infinity conversion */
1233 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1235 farg
.ll
= float64_to_int64_round_to_zero(farg
.d
, &env
->fp_status
);
1242 static inline uint64_t do_fri(uint64_t arg
, int rounding_mode
)
1247 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1249 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
| POWERPC_EXCP_FP_VXCVI
);
1250 } else if (unlikely(float64_is_quiet_nan(farg
.d
) || float64_is_infinity(farg
.d
))) {
1251 /* qNan / infinity round */
1252 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI
);
1254 set_float_rounding_mode(rounding_mode
, &env
->fp_status
);
1255 farg
.ll
= float64_round_to_int(farg
.d
, &env
->fp_status
);
1256 /* Restore rounding mode from FPSCR */
1257 fpscr_set_rounding_mode();
1262 uint64_t helper_frin (uint64_t arg
)
1264 return do_fri(arg
, float_round_nearest_even
);
1267 uint64_t helper_friz (uint64_t arg
)
1269 return do_fri(arg
, float_round_to_zero
);
1272 uint64_t helper_frip (uint64_t arg
)
1274 return do_fri(arg
, float_round_up
);
1277 uint64_t helper_frim (uint64_t arg
)
1279 return do_fri(arg
, float_round_down
);
1282 /* fmadd - fmadd. */
1283 uint64_t helper_fmadd (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1285 CPU_DoubleU farg1
, farg2
, farg3
;
1291 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1292 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1293 /* Multiplication of zero by infinity */
1294 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1296 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1297 float64_is_signaling_nan(farg2
.d
) ||
1298 float64_is_signaling_nan(farg3
.d
))) {
1299 /* sNaN operation */
1300 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1303 /* This is the way the PowerPC specification defines it */
1304 float128 ft0_128
, ft1_128
;
1306 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1307 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1308 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1309 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1310 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
1311 /* Magnitude subtraction of infinities */
1312 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1314 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1315 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
1316 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1319 /* This is OK on x86 hosts */
1320 farg1
.d
= (farg1
.d
* farg2
.d
) + farg3
.d
;
1327 /* fmsub - fmsub. */
1328 uint64_t helper_fmsub (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1330 CPU_DoubleU farg1
, farg2
, farg3
;
1336 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1337 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1338 /* Multiplication of zero by infinity */
1339 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1341 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1342 float64_is_signaling_nan(farg2
.d
) ||
1343 float64_is_signaling_nan(farg3
.d
))) {
1344 /* sNaN operation */
1345 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1348 /* This is the way the PowerPC specification defines it */
1349 float128 ft0_128
, ft1_128
;
1351 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1352 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1353 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1354 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1355 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
1356 /* Magnitude subtraction of infinities */
1357 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1359 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1360 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
1361 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1364 /* This is OK on x86 hosts */
1365 farg1
.d
= (farg1
.d
* farg2
.d
) - farg3
.d
;
1371 /* fnmadd - fnmadd. */
1372 uint64_t helper_fnmadd (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1374 CPU_DoubleU farg1
, farg2
, farg3
;
1380 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1381 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1382 /* Multiplication of zero by infinity */
1383 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1385 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1386 float64_is_signaling_nan(farg2
.d
) ||
1387 float64_is_signaling_nan(farg3
.d
))) {
1388 /* sNaN operation */
1389 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1392 /* This is the way the PowerPC specification defines it */
1393 float128 ft0_128
, ft1_128
;
1395 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1396 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1397 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1398 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1399 float128_is_neg(ft0_128
) != float64_is_neg(farg3
.d
))) {
1400 /* Magnitude subtraction of infinities */
1401 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1403 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1404 ft0_128
= float128_add(ft0_128
, ft1_128
, &env
->fp_status
);
1405 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1408 /* This is OK on x86 hosts */
1409 farg1
.d
= (farg1
.d
* farg2
.d
) + farg3
.d
;
1411 if (likely(!float64_is_any_nan(farg1
.d
))) {
1412 farg1
.d
= float64_chs(farg1
.d
);
1418 /* fnmsub - fnmsub. */
1419 uint64_t helper_fnmsub (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1421 CPU_DoubleU farg1
, farg2
, farg3
;
1427 if (unlikely((float64_is_infinity(farg1
.d
) && float64_is_zero(farg2
.d
)) ||
1428 (float64_is_zero(farg1
.d
) && float64_is_infinity(farg2
.d
)))) {
1429 /* Multiplication of zero by infinity */
1430 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ
);
1432 if (unlikely(float64_is_signaling_nan(farg1
.d
) ||
1433 float64_is_signaling_nan(farg2
.d
) ||
1434 float64_is_signaling_nan(farg3
.d
))) {
1435 /* sNaN operation */
1436 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1439 /* This is the way the PowerPC specification defines it */
1440 float128 ft0_128
, ft1_128
;
1442 ft0_128
= float64_to_float128(farg1
.d
, &env
->fp_status
);
1443 ft1_128
= float64_to_float128(farg2
.d
, &env
->fp_status
);
1444 ft0_128
= float128_mul(ft0_128
, ft1_128
, &env
->fp_status
);
1445 if (unlikely(float128_is_infinity(ft0_128
) && float64_is_infinity(farg3
.d
) &&
1446 float128_is_neg(ft0_128
) == float64_is_neg(farg3
.d
))) {
1447 /* Magnitude subtraction of infinities */
1448 farg1
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI
);
1450 ft1_128
= float64_to_float128(farg3
.d
, &env
->fp_status
);
1451 ft0_128
= float128_sub(ft0_128
, ft1_128
, &env
->fp_status
);
1452 farg1
.d
= float128_to_float64(ft0_128
, &env
->fp_status
);
1455 /* This is OK on x86 hosts */
1456 farg1
.d
= (farg1
.d
* farg2
.d
) - farg3
.d
;
1458 if (likely(!float64_is_any_nan(farg1
.d
))) {
1459 farg1
.d
= float64_chs(farg1
.d
);
1466 uint64_t helper_frsp (uint64_t arg
)
1472 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1473 /* sNaN square root */
1474 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1476 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1477 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1482 /* fsqrt - fsqrt. */
1483 uint64_t helper_fsqrt (uint64_t arg
)
1488 if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
1489 /* Square root of a negative nonzero number */
1490 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT
);
1492 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1493 /* sNaN square root */
1494 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1496 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1502 uint64_t helper_fre (uint64_t arg
)
1507 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1508 /* sNaN reciprocal */
1509 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1511 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1516 uint64_t helper_fres (uint64_t arg
)
1522 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1523 /* sNaN reciprocal */
1524 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1526 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1527 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1528 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1533 /* frsqrte - frsqrte. */
1534 uint64_t helper_frsqrte (uint64_t arg
)
1540 if (unlikely(float64_is_neg(farg
.d
) && !float64_is_zero(farg
.d
))) {
1541 /* Reciprocal square root of a negative nonzero number */
1542 farg
.ll
= fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT
);
1544 if (unlikely(float64_is_signaling_nan(farg
.d
))) {
1545 /* sNaN reciprocal square root */
1546 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1548 farg
.d
= float64_sqrt(farg
.d
, &env
->fp_status
);
1549 farg
.d
= float64_div(float64_one
, farg
.d
, &env
->fp_status
);
1550 f32
= float64_to_float32(farg
.d
, &env
->fp_status
);
1551 farg
.d
= float32_to_float64(f32
, &env
->fp_status
);
1557 uint64_t helper_fsel (uint64_t arg1
, uint64_t arg2
, uint64_t arg3
)
1563 if ((!float64_is_neg(farg1
.d
) || float64_is_zero(farg1
.d
)) && !float64_is_any_nan(farg1
.d
)) {
1570 void helper_fcmpu (uint64_t arg1
, uint64_t arg2
, uint32_t crfD
)
1572 CPU_DoubleU farg1
, farg2
;
1577 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1578 float64_is_any_nan(farg2
.d
))) {
1580 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1582 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1588 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1589 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1590 env
->crf
[crfD
] = ret
;
1591 if (unlikely(ret
== 0x01UL
1592 && (float64_is_signaling_nan(farg1
.d
) ||
1593 float64_is_signaling_nan(farg2
.d
)))) {
1594 /* sNaN comparison */
1595 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
);
1599 void helper_fcmpo (uint64_t arg1
, uint64_t arg2
, uint32_t crfD
)
1601 CPU_DoubleU farg1
, farg2
;
1606 if (unlikely(float64_is_any_nan(farg1
.d
) ||
1607 float64_is_any_nan(farg2
.d
))) {
1609 } else if (float64_lt(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1611 } else if (!float64_le(farg1
.d
, farg2
.d
, &env
->fp_status
)) {
1617 env
->fpscr
&= ~(0x0F << FPSCR_FPRF
);
1618 env
->fpscr
|= ret
<< FPSCR_FPRF
;
1619 env
->crf
[crfD
] = ret
;
1620 if (unlikely (ret
== 0x01UL
)) {
1621 if (float64_is_signaling_nan(farg1
.d
) ||
1622 float64_is_signaling_nan(farg2
.d
)) {
1623 /* sNaN comparison */
1624 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN
|
1625 POWERPC_EXCP_FP_VXVC
);
1627 /* qNaN comparison */
1628 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC
);
1633 #if !defined (CONFIG_USER_ONLY)
1634 void helper_store_msr (target_ulong val
)
1636 val
= hreg_store_msr(env
, val
, 0);
1638 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
1639 helper_raise_exception(val
);
1643 static inline void do_rfi(target_ulong nip
, target_ulong msr
,
1644 target_ulong msrm
, int keep_msrh
)
1646 #if defined(TARGET_PPC64)
1647 if (msr
& (1ULL << MSR_SF
)) {
1648 nip
= (uint64_t)nip
;
1649 msr
&= (uint64_t)msrm
;
1651 nip
= (uint32_t)nip
;
1652 msr
= (uint32_t)(msr
& msrm
);
1654 msr
|= env
->msr
& ~((uint64_t)0xFFFFFFFF);
1657 nip
= (uint32_t)nip
;
1658 msr
&= (uint32_t)msrm
;
1660 /* XXX: beware: this is false if VLE is supported */
1661 env
->nip
= nip
& ~((target_ulong
)0x00000003);
1662 hreg_store_msr(env
, msr
, 1);
1663 #if defined (DEBUG_OP)
1664 cpu_dump_rfi(env
->nip
, env
->msr
);
1666 /* No need to raise an exception here,
1667 * as rfi is always the last insn of a TB
1669 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
1672 void helper_rfi (void)
1674 do_rfi(env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
],
1675 ~((target_ulong
)0x783F0000), 1);
1678 #if defined(TARGET_PPC64)
1679 void helper_rfid (void)
1681 do_rfi(env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
],
1682 ~((target_ulong
)0x783F0000), 0);
1685 void helper_hrfid (void)
1687 do_rfi(env
->spr
[SPR_HSRR0
], env
->spr
[SPR_HSRR1
],
1688 ~((target_ulong
)0x783F0000), 0);
1693 void helper_tw (target_ulong arg1
, target_ulong arg2
, uint32_t flags
)
1695 if (!likely(!(((int32_t)arg1
< (int32_t)arg2
&& (flags
& 0x10)) ||
1696 ((int32_t)arg1
> (int32_t)arg2
&& (flags
& 0x08)) ||
1697 ((int32_t)arg1
== (int32_t)arg2
&& (flags
& 0x04)) ||
1698 ((uint32_t)arg1
< (uint32_t)arg2
&& (flags
& 0x02)) ||
1699 ((uint32_t)arg1
> (uint32_t)arg2
&& (flags
& 0x01))))) {
1700 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_TRAP
);
1704 #if defined(TARGET_PPC64)
1705 void helper_td (target_ulong arg1
, target_ulong arg2
, uint32_t flags
)
1707 if (!likely(!(((int64_t)arg1
< (int64_t)arg2
&& (flags
& 0x10)) ||
1708 ((int64_t)arg1
> (int64_t)arg2
&& (flags
& 0x08)) ||
1709 ((int64_t)arg1
== (int64_t)arg2
&& (flags
& 0x04)) ||
1710 ((uint64_t)arg1
< (uint64_t)arg2
&& (flags
& 0x02)) ||
1711 ((uint64_t)arg1
> (uint64_t)arg2
&& (flags
& 0x01)))))
1712 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_TRAP
);
1716 /*****************************************************************************/
1717 /* PowerPC 601 specific instructions (POWER bridge) */
1719 target_ulong
helper_clcs (uint32_t arg
)
1723 /* Instruction cache line size */
1724 return env
->icache_line_size
;
1727 /* Data cache line size */
1728 return env
->dcache_line_size
;
1731 /* Minimum cache line size */
1732 return (env
->icache_line_size
< env
->dcache_line_size
) ?
1733 env
->icache_line_size
: env
->dcache_line_size
;
1736 /* Maximum cache line size */
1737 return (env
->icache_line_size
> env
->dcache_line_size
) ?
1738 env
->icache_line_size
: env
->dcache_line_size
;
1747 target_ulong
helper_div (target_ulong arg1
, target_ulong arg2
)
1749 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
1751 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1752 (int32_t)arg2
== 0) {
1753 env
->spr
[SPR_MQ
] = 0;
1756 env
->spr
[SPR_MQ
] = tmp
% arg2
;
1757 return tmp
/ (int32_t)arg2
;
1761 target_ulong
helper_divo (target_ulong arg1
, target_ulong arg2
)
1763 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
1765 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1766 (int32_t)arg2
== 0) {
1767 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1768 env
->spr
[SPR_MQ
] = 0;
1771 env
->spr
[SPR_MQ
] = tmp
% arg2
;
1772 tmp
/= (int32_t)arg2
;
1773 if ((int32_t)tmp
!= tmp
) {
1774 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1776 env
->xer
&= ~(1 << XER_OV
);
1782 target_ulong
helper_divs (target_ulong arg1
, target_ulong arg2
)
1784 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1785 (int32_t)arg2
== 0) {
1786 env
->spr
[SPR_MQ
] = 0;
1789 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
1790 return (int32_t)arg1
/ (int32_t)arg2
;
1794 target_ulong
helper_divso (target_ulong arg1
, target_ulong arg2
)
1796 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
1797 (int32_t)arg2
== 0) {
1798 env
->xer
|= (1 << XER_OV
) | (1 << XER_SO
);
1799 env
->spr
[SPR_MQ
] = 0;
1802 env
->xer
&= ~(1 << XER_OV
);
1803 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
1804 return (int32_t)arg1
/ (int32_t)arg2
;
1808 #if !defined (CONFIG_USER_ONLY)
1809 target_ulong
helper_rac (target_ulong addr
)
1813 target_ulong ret
= 0;
1815 /* We don't have to generate many instances of this instruction,
1816 * as rac is supervisor only.
1818 /* XXX: FIX THIS: Pretend we have no BAT */
1819 nb_BATs
= env
->nb_BATs
;
1821 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0)
1823 env
->nb_BATs
= nb_BATs
;
1827 void helper_rfsvc (void)
1829 do_rfi(env
->lr
, env
->ctr
, 0x0000FFFF, 0);
1833 /*****************************************************************************/
1834 /* 602 specific instructions */
1835 /* mfrom is the most crazy instruction ever seen, imho ! */
1836 /* Real implementation uses a ROM table. Do the same */
1837 /* Extremly decomposed:
1839 * return 256 * log10(10 + 1.0) + 0.5
1841 #if !defined (CONFIG_USER_ONLY)
1842 target_ulong
helper_602_mfrom (target_ulong arg
)
1844 if (likely(arg
< 602)) {
1845 #include "mfrom_table.c"
1846 return mfrom_ROM_table
[arg
];
1853 /*****************************************************************************/
1854 /* Embedded PowerPC specific helpers */
1856 /* XXX: to be improved to check access rights when in user-mode */
1857 target_ulong
helper_load_dcr (target_ulong dcrn
)
1861 if (unlikely(env
->dcr_env
== NULL
)) {
1862 qemu_log("No DCR environment\n");
1863 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1864 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1865 } else if (unlikely(ppc_dcr_read(env
->dcr_env
, (uint32_t)dcrn
, &val
) != 0)) {
1866 qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn
, (uint32_t)dcrn
);
1867 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1868 POWERPC_EXCP_INVAL
| POWERPC_EXCP_PRIV_REG
);
1873 void helper_store_dcr (target_ulong dcrn
, target_ulong val
)
1875 if (unlikely(env
->dcr_env
== NULL
)) {
1876 qemu_log("No DCR environment\n");
1877 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1878 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
1879 } else if (unlikely(ppc_dcr_write(env
->dcr_env
, (uint32_t)dcrn
, (uint32_t)val
) != 0)) {
1880 qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn
, (uint32_t)dcrn
);
1881 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
,
1882 POWERPC_EXCP_INVAL
| POWERPC_EXCP_PRIV_REG
);
1886 #if !defined(CONFIG_USER_ONLY)
1887 void helper_40x_rfci (void)
1889 do_rfi(env
->spr
[SPR_40x_SRR2
], env
->spr
[SPR_40x_SRR3
],
1890 ~((target_ulong
)0xFFFF0000), 0);
1893 void helper_rfci (void)
1895 do_rfi(env
->spr
[SPR_BOOKE_CSRR0
], SPR_BOOKE_CSRR1
,
1896 ~((target_ulong
)0x3FFF0000), 0);
1899 void helper_rfdi (void)
1901 do_rfi(env
->spr
[SPR_BOOKE_DSRR0
], SPR_BOOKE_DSRR1
,
1902 ~((target_ulong
)0x3FFF0000), 0);
1905 void helper_rfmci (void)
1907 do_rfi(env
->spr
[SPR_BOOKE_MCSRR0
], SPR_BOOKE_MCSRR1
,
1908 ~((target_ulong
)0x3FFF0000), 0);
1913 target_ulong
helper_dlmzb (target_ulong high
, target_ulong low
, uint32_t update_Rc
)
1919 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1920 if ((high
& mask
) == 0) {
1928 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1929 if ((low
& mask
) == 0) {
1941 env
->xer
= (env
->xer
& ~0x7F) | i
;
1943 env
->crf
[0] |= xer_so
;
1948 /*****************************************************************************/
1949 /* Altivec extension helpers */
1950 #if defined(HOST_WORDS_BIGENDIAN)
1958 #if defined(HOST_WORDS_BIGENDIAN)
1959 #define VECTOR_FOR_INORDER_I(index, element) \
1960 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1962 #define VECTOR_FOR_INORDER_I(index, element) \
1963 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1966 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1967 * execute the following block. */
1968 #define DO_HANDLE_NAN(result, x) \
1969 if (float32_is_any_nan(x)) { \
1972 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1976 #define HANDLE_NAN1(result, x) \
1977 DO_HANDLE_NAN(result, x)
1978 #define HANDLE_NAN2(result, x, y) \
1979 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1980 #define HANDLE_NAN3(result, x, y, z) \
1981 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1983 /* Saturating arithmetic helpers. */
1984 #define SATCVT(from, to, from_type, to_type, min, max) \
1985 static inline to_type cvt##from##to(from_type x, int *sat) \
1988 if (x < (from_type)min) { \
1991 } else if (x > (from_type)max) { \
1999 #define SATCVTU(from, to, from_type, to_type, min, max) \
2000 static inline to_type cvt##from##to(from_type x, int *sat) \
2003 if (x > (from_type)max) { \
2011 SATCVT(sh
, sb
, int16_t, int8_t, INT8_MIN
, INT8_MAX
)
2012 SATCVT(sw
, sh
, int32_t, int16_t, INT16_MIN
, INT16_MAX
)
2013 SATCVT(sd
, sw
, int64_t, int32_t, INT32_MIN
, INT32_MAX
)
2015 SATCVTU(uh
, ub
, uint16_t, uint8_t, 0, UINT8_MAX
)
2016 SATCVTU(uw
, uh
, uint32_t, uint16_t, 0, UINT16_MAX
)
2017 SATCVTU(ud
, uw
, uint64_t, uint32_t, 0, UINT32_MAX
)
2018 SATCVT(sh
, ub
, int16_t, uint8_t, 0, UINT8_MAX
)
2019 SATCVT(sw
, uh
, int32_t, uint16_t, 0, UINT16_MAX
)
2020 SATCVT(sd
, uw
, int64_t, uint32_t, 0, UINT32_MAX
)
2024 #define LVE(name, access, swap, element) \
2025 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2027 size_t n_elems = ARRAY_SIZE(r->element); \
2028 int adjust = HI_IDX*(n_elems-1); \
2029 int sh = sizeof(r->element[0]) >> 1; \
2030 int index = (addr & 0xf) >> sh; \
2032 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2034 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2038 LVE(lvebx
, ldub
, I
, u8
)
2039 LVE(lvehx
, lduw
, bswap16
, u16
)
2040 LVE(lvewx
, ldl
, bswap32
, u32
)
2044 void helper_lvsl (ppc_avr_t
*r
, target_ulong sh
)
2046 int i
, j
= (sh
& 0xf);
2048 VECTOR_FOR_INORDER_I (i
, u8
) {
2053 void helper_lvsr (ppc_avr_t
*r
, target_ulong sh
)
2055 int i
, j
= 0x10 - (sh
& 0xf);
2057 VECTOR_FOR_INORDER_I (i
, u8
) {
2062 #define STVE(name, access, swap, element) \
2063 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2065 size_t n_elems = ARRAY_SIZE(r->element); \
2066 int adjust = HI_IDX*(n_elems-1); \
2067 int sh = sizeof(r->element[0]) >> 1; \
2068 int index = (addr & 0xf) >> sh; \
2070 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2072 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2076 STVE(stvebx
, stb
, I
, u8
)
2077 STVE(stvehx
, stw
, bswap16
, u16
)
2078 STVE(stvewx
, stl
, bswap32
, u32
)
2082 void helper_mtvscr (ppc_avr_t
*r
)
2084 #if defined(HOST_WORDS_BIGENDIAN)
2085 env
->vscr
= r
->u32
[3];
2087 env
->vscr
= r
->u32
[0];
2089 set_flush_to_zero(vscr_nj
, &env
->vec_status
);
2092 void helper_vaddcuw (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2095 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2096 r
->u32
[i
] = ~a
->u32
[i
] < b
->u32
[i
];
2100 #define VARITH_DO(name, op, element) \
2101 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2104 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2105 r->element[i] = a->element[i] op b->element[i]; \
2108 #define VARITH(suffix, element) \
2109 VARITH_DO(add##suffix, +, element) \
2110 VARITH_DO(sub##suffix, -, element)
2117 #define VARITHFP(suffix, func) \
2118 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2121 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2122 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2123 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2127 VARITHFP(addfp
, float32_add
)
2128 VARITHFP(subfp
, float32_sub
)
2131 #define VARITHSAT_CASE(type, op, cvt, element) \
2133 type result = (type)a->element[i] op (type)b->element[i]; \
2134 r->element[i] = cvt(result, &sat); \
2137 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2138 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2142 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2143 switch (sizeof(r->element[0])) { \
2144 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2145 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2146 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2150 env->vscr |= (1 << VSCR_SAT); \
2153 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2154 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2155 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2156 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2157 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2158 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2159 VARITHSAT_SIGNED(b
, s8
, int16_t, cvtshsb
)
2160 VARITHSAT_SIGNED(h
, s16
, int32_t, cvtswsh
)
2161 VARITHSAT_SIGNED(w
, s32
, int64_t, cvtsdsw
)
2162 VARITHSAT_UNSIGNED(b
, u8
, uint16_t, cvtshub
)
2163 VARITHSAT_UNSIGNED(h
, u16
, uint32_t, cvtswuh
)
2164 VARITHSAT_UNSIGNED(w
, u32
, uint64_t, cvtsduw
)
2165 #undef VARITHSAT_CASE
2167 #undef VARITHSAT_SIGNED
2168 #undef VARITHSAT_UNSIGNED
2170 #define VAVG_DO(name, element, etype) \
2171 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2174 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2175 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2176 r->element[i] = x >> 1; \
2180 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2181 VAVG_DO(avgs##type, signed_element, signed_type) \
2182 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2183 VAVG(b
, s8
, int16_t, u8
, uint16_t)
2184 VAVG(h
, s16
, int32_t, u16
, uint32_t)
2185 VAVG(w
, s32
, int64_t, u32
, uint64_t)
2189 #define VCF(suffix, cvt, element) \
2190 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2193 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2194 float32 t = cvt(b->element[i], &env->vec_status); \
2195 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2198 VCF(ux
, uint32_to_float32
, u32
)
2199 VCF(sx
, int32_to_float32
, s32
)
2202 #define VCMP_DO(suffix, compare, element, record) \
2203 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2205 uint32_t ones = (uint32_t)-1; \
2206 uint32_t all = ones; \
2207 uint32_t none = 0; \
2209 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2210 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2211 switch (sizeof (a->element[0])) { \
2212 case 4: r->u32[i] = result; break; \
2213 case 2: r->u16[i] = result; break; \
2214 case 1: r->u8[i] = result; break; \
2220 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2223 #define VCMP(suffix, compare, element) \
2224 VCMP_DO(suffix, compare, element, 0) \
2225 VCMP_DO(suffix##_dot, compare, element, 1)
2238 #define VCMPFP_DO(suffix, compare, order, record) \
2239 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2241 uint32_t ones = (uint32_t)-1; \
2242 uint32_t all = ones; \
2243 uint32_t none = 0; \
2245 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2247 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2248 if (rel == float_relation_unordered) { \
2250 } else if (rel compare order) { \
2255 r->u32[i] = result; \
2260 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2263 #define VCMPFP(suffix, compare, order) \
2264 VCMPFP_DO(suffix, compare, order, 0) \
2265 VCMPFP_DO(suffix##_dot, compare, order, 1)
2266 VCMPFP(eqfp
, ==, float_relation_equal
)
2267 VCMPFP(gefp
, !=, float_relation_less
)
2268 VCMPFP(gtfp
, ==, float_relation_greater
)
2272 static inline void vcmpbfp_internal(ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
,
2277 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2278 int le_rel
= float32_compare_quiet(a
->f
[i
], b
->f
[i
], &env
->vec_status
);
2279 if (le_rel
== float_relation_unordered
) {
2280 r
->u32
[i
] = 0xc0000000;
2281 /* ALL_IN does not need to be updated here. */
2283 float32 bneg
= float32_chs(b
->f
[i
]);
2284 int ge_rel
= float32_compare_quiet(a
->f
[i
], bneg
, &env
->vec_status
);
2285 int le
= le_rel
!= float_relation_greater
;
2286 int ge
= ge_rel
!= float_relation_less
;
2287 r
->u32
[i
] = ((!le
) << 31) | ((!ge
) << 30);
2288 all_in
|= (!le
| !ge
);
2292 env
->crf
[6] = (all_in
== 0) << 1;
2296 void helper_vcmpbfp (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2298 vcmpbfp_internal(r
, a
, b
, 0);
2301 void helper_vcmpbfp_dot (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2303 vcmpbfp_internal(r
, a
, b
, 1);
2306 #define VCT(suffix, satcvt, element) \
2307 void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2311 float_status s = env->vec_status; \
2312 set_float_rounding_mode(float_round_to_zero, &s); \
2313 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2314 if (float32_is_any_nan(b->f[i])) { \
2315 r->element[i] = 0; \
2317 float64 t = float32_to_float64(b->f[i], &s); \
2319 t = float64_scalbn(t, uim, &s); \
2320 j = float64_to_int64(t, &s); \
2321 r->element[i] = satcvt(j, &sat); \
2325 env->vscr |= (1 << VSCR_SAT); \
2328 VCT(uxs
, cvtsduw
, u32
)
2329 VCT(sxs
, cvtsdsw
, s32
)
2332 void helper_vmaddfp (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2335 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2336 HANDLE_NAN3(r
->f
[i
], a
->f
[i
], b
->f
[i
], c
->f
[i
]) {
2337 /* Need to do the computation in higher precision and round
2338 * once at the end. */
2339 float64 af
, bf
, cf
, t
;
2340 af
= float32_to_float64(a
->f
[i
], &env
->vec_status
);
2341 bf
= float32_to_float64(b
->f
[i
], &env
->vec_status
);
2342 cf
= float32_to_float64(c
->f
[i
], &env
->vec_status
);
2343 t
= float64_mul(af
, cf
, &env
->vec_status
);
2344 t
= float64_add(t
, bf
, &env
->vec_status
);
2345 r
->f
[i
] = float64_to_float32(t
, &env
->vec_status
);
2350 void helper_vmhaddshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2355 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2356 int32_t prod
= a
->s16
[i
] * b
->s16
[i
];
2357 int32_t t
= (int32_t)c
->s16
[i
] + (prod
>> 15);
2358 r
->s16
[i
] = cvtswsh (t
, &sat
);
2362 env
->vscr
|= (1 << VSCR_SAT
);
2366 void helper_vmhraddshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2371 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2372 int32_t prod
= a
->s16
[i
] * b
->s16
[i
] + 0x00004000;
2373 int32_t t
= (int32_t)c
->s16
[i
] + (prod
>> 15);
2374 r
->s16
[i
] = cvtswsh (t
, &sat
);
2378 env
->vscr
|= (1 << VSCR_SAT
);
2382 #define VMINMAX_DO(name, compare, element) \
2383 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2386 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2387 if (a->element[i] compare b->element[i]) { \
2388 r->element[i] = b->element[i]; \
2390 r->element[i] = a->element[i]; \
2394 #define VMINMAX(suffix, element) \
2395 VMINMAX_DO(min##suffix, >, element) \
2396 VMINMAX_DO(max##suffix, <, element)
2406 #define VMINMAXFP(suffix, rT, rF) \
2407 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2410 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2411 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2412 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2413 r->f[i] = rT->f[i]; \
2415 r->f[i] = rF->f[i]; \
2420 VMINMAXFP(minfp
, a
, b
)
2421 VMINMAXFP(maxfp
, b
, a
)
2424 void helper_vmladduhm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2427 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2428 int32_t prod
= a
->s16
[i
] * b
->s16
[i
];
2429 r
->s16
[i
] = (int16_t) (prod
+ c
->s16
[i
]);
2433 #define VMRG_DO(name, element, highp) \
2434 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2438 size_t n_elems = ARRAY_SIZE(r->element); \
2439 for (i = 0; i < n_elems/2; i++) { \
2441 result.element[i*2+HI_IDX] = a->element[i]; \
2442 result.element[i*2+LO_IDX] = b->element[i]; \
2444 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2445 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2450 #if defined(HOST_WORDS_BIGENDIAN)
2457 #define VMRG(suffix, element) \
2458 VMRG_DO(mrgl##suffix, element, MRGHI) \
2459 VMRG_DO(mrgh##suffix, element, MRGLO)
2468 void helper_vmsummbm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2473 for (i
= 0; i
< ARRAY_SIZE(r
->s8
); i
++) {
2474 prod
[i
] = (int32_t)a
->s8
[i
] * b
->u8
[i
];
2477 VECTOR_FOR_INORDER_I(i
, s32
) {
2478 r
->s32
[i
] = c
->s32
[i
] + prod
[4*i
] + prod
[4*i
+1] + prod
[4*i
+2] + prod
[4*i
+3];
2482 void helper_vmsumshm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2487 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2488 prod
[i
] = a
->s16
[i
] * b
->s16
[i
];
2491 VECTOR_FOR_INORDER_I(i
, s32
) {
2492 r
->s32
[i
] = c
->s32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2496 void helper_vmsumshs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2502 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
2503 prod
[i
] = (int32_t)a
->s16
[i
] * b
->s16
[i
];
2506 VECTOR_FOR_INORDER_I (i
, s32
) {
2507 int64_t t
= (int64_t)c
->s32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2508 r
->u32
[i
] = cvtsdsw(t
, &sat
);
2512 env
->vscr
|= (1 << VSCR_SAT
);
2516 void helper_vmsumubm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2521 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2522 prod
[i
] = a
->u8
[i
] * b
->u8
[i
];
2525 VECTOR_FOR_INORDER_I(i
, u32
) {
2526 r
->u32
[i
] = c
->u32
[i
] + prod
[4*i
] + prod
[4*i
+1] + prod
[4*i
+2] + prod
[4*i
+3];
2530 void helper_vmsumuhm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2535 for (i
= 0; i
< ARRAY_SIZE(r
->u16
); i
++) {
2536 prod
[i
] = a
->u16
[i
] * b
->u16
[i
];
2539 VECTOR_FOR_INORDER_I(i
, u32
) {
2540 r
->u32
[i
] = c
->u32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2544 void helper_vmsumuhs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2550 for (i
= 0; i
< ARRAY_SIZE(r
->u16
); i
++) {
2551 prod
[i
] = a
->u16
[i
] * b
->u16
[i
];
2554 VECTOR_FOR_INORDER_I (i
, s32
) {
2555 uint64_t t
= (uint64_t)c
->u32
[i
] + prod
[2*i
] + prod
[2*i
+1];
2556 r
->u32
[i
] = cvtuduw(t
, &sat
);
2560 env
->vscr
|= (1 << VSCR_SAT
);
2564 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2565 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2568 VECTOR_FOR_INORDER_I(i, prod_element) { \
2570 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2572 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2576 #define VMUL(suffix, mul_element, prod_element) \
2577 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2578 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2586 void helper_vnmsubfp (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2589 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2590 HANDLE_NAN3(r
->f
[i
], a
->f
[i
], b
->f
[i
], c
->f
[i
]) {
2591 /* Need to do the computation is higher precision and round
2592 * once at the end. */
2593 float64 af
, bf
, cf
, t
;
2594 af
= float32_to_float64(a
->f
[i
], &env
->vec_status
);
2595 bf
= float32_to_float64(b
->f
[i
], &env
->vec_status
);
2596 cf
= float32_to_float64(c
->f
[i
], &env
->vec_status
);
2597 t
= float64_mul(af
, cf
, &env
->vec_status
);
2598 t
= float64_sub(t
, bf
, &env
->vec_status
);
2600 r
->f
[i
] = float64_to_float32(t
, &env
->vec_status
);
2605 void helper_vperm (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2609 VECTOR_FOR_INORDER_I (i
, u8
) {
2610 int s
= c
->u8
[i
] & 0x1f;
2611 #if defined(HOST_WORDS_BIGENDIAN)
2612 int index
= s
& 0xf;
2614 int index
= 15 - (s
& 0xf);
2617 result
.u8
[i
] = b
->u8
[index
];
2619 result
.u8
[i
] = a
->u8
[index
];
2625 #if defined(HOST_WORDS_BIGENDIAN)
2630 void helper_vpkpx (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2634 #if defined(HOST_WORDS_BIGENDIAN)
2635 const ppc_avr_t
*x
[2] = { a
, b
};
2637 const ppc_avr_t
*x
[2] = { b
, a
};
2640 VECTOR_FOR_INORDER_I (i
, u64
) {
2641 VECTOR_FOR_INORDER_I (j
, u32
){
2642 uint32_t e
= x
[i
]->u32
[j
];
2643 result
.u16
[4*i
+j
] = (((e
>> 9) & 0xfc00) |
2644 ((e
>> 6) & 0x3e0) |
2651 #define VPK(suffix, from, to, cvt, dosat) \
2652 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2657 ppc_avr_t *a0 = PKBIG ? a : b; \
2658 ppc_avr_t *a1 = PKBIG ? b : a; \
2659 VECTOR_FOR_INORDER_I (i, from) { \
2660 result.to[i] = cvt(a0->from[i], &sat); \
2661 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2664 if (dosat && sat) { \
2665 env->vscr |= (1 << VSCR_SAT); \
2669 VPK(shss
, s16
, s8
, cvtshsb
, 1)
2670 VPK(shus
, s16
, u8
, cvtshub
, 1)
2671 VPK(swss
, s32
, s16
, cvtswsh
, 1)
2672 VPK(swus
, s32
, u16
, cvtswuh
, 1)
2673 VPK(uhus
, u16
, u8
, cvtuhub
, 1)
2674 VPK(uwus
, u32
, u16
, cvtuwuh
, 1)
2675 VPK(uhum
, u16
, u8
, I
, 0)
2676 VPK(uwum
, u32
, u16
, I
, 0)
2681 void helper_vrefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2684 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2685 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2686 r
->f
[i
] = float32_div(float32_one
, b
->f
[i
], &env
->vec_status
);
2691 #define VRFI(suffix, rounding) \
2692 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2695 float_status s = env->vec_status; \
2696 set_float_rounding_mode(rounding, &s); \
2697 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2698 HANDLE_NAN1(r->f[i], b->f[i]) { \
2699 r->f[i] = float32_round_to_int (b->f[i], &s); \
2703 VRFI(n
, float_round_nearest_even
)
2704 VRFI(m
, float_round_down
)
2705 VRFI(p
, float_round_up
)
2706 VRFI(z
, float_round_to_zero
)
2709 #define VROTATE(suffix, element) \
2710 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2713 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2714 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2715 unsigned int shift = b->element[i] & mask; \
2716 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2724 void helper_vrsqrtefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2727 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2728 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2729 float32 t
= float32_sqrt(b
->f
[i
], &env
->vec_status
);
2730 r
->f
[i
] = float32_div(float32_one
, t
, &env
->vec_status
);
2735 void helper_vsel (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
2737 r
->u64
[0] = (a
->u64
[0] & ~c
->u64
[0]) | (b
->u64
[0] & c
->u64
[0]);
2738 r
->u64
[1] = (a
->u64
[1] & ~c
->u64
[1]) | (b
->u64
[1] & c
->u64
[1]);
2741 void helper_vexptefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2744 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2745 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2746 r
->f
[i
] = float32_exp2(b
->f
[i
], &env
->vec_status
);
2751 void helper_vlogefp (ppc_avr_t
*r
, ppc_avr_t
*b
)
2754 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
2755 HANDLE_NAN1(r
->f
[i
], b
->f
[i
]) {
2756 r
->f
[i
] = float32_log2(b
->f
[i
], &env
->vec_status
);
2761 #if defined(HOST_WORDS_BIGENDIAN)
2768 /* The specification says that the results are undefined if all of the
2769 * shift counts are not identical. We check to make sure that they are
2770 * to conform to what real hardware appears to do. */
2771 #define VSHIFT(suffix, leftp) \
2772 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2774 int shift = b->u8[LO_IDX*15] & 0x7; \
2777 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2778 doit = doit && ((b->u8[i] & 0x7) == shift); \
2783 } else if (leftp) { \
2784 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2785 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2786 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2788 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2789 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2790 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2800 #define VSL(suffix, element) \
2801 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2804 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2805 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2806 unsigned int shift = b->element[i] & mask; \
2807 r->element[i] = a->element[i] << shift; \
2815 void helper_vsldoi (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, uint32_t shift
)
2817 int sh
= shift
& 0xf;
2821 #if defined(HOST_WORDS_BIGENDIAN)
2822 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2825 result
.u8
[i
] = b
->u8
[index
-0x10];
2827 result
.u8
[i
] = a
->u8
[index
];
2831 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
2832 int index
= (16 - sh
) + i
;
2834 result
.u8
[i
] = a
->u8
[index
-0x10];
2836 result
.u8
[i
] = b
->u8
[index
];
2843 void helper_vslo (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2845 int sh
= (b
->u8
[LO_IDX
*0xf] >> 3) & 0xf;
2847 #if defined (HOST_WORDS_BIGENDIAN)
2848 memmove (&r
->u8
[0], &a
->u8
[sh
], 16-sh
);
2849 memset (&r
->u8
[16-sh
], 0, sh
);
2851 memmove (&r
->u8
[sh
], &a
->u8
[0], 16-sh
);
2852 memset (&r
->u8
[0], 0, sh
);
2856 /* Experimental testing shows that hardware masks the immediate. */
2857 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2858 #if defined(HOST_WORDS_BIGENDIAN)
2859 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2861 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2863 #define VSPLT(suffix, element) \
2864 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2866 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2868 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2869 r->element[i] = s; \
2876 #undef SPLAT_ELEMENT
2877 #undef _SPLAT_MASKED
2879 #define VSPLTI(suffix, element, splat_type) \
2880 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2882 splat_type x = (int8_t)(splat << 3) >> 3; \
2884 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2885 r->element[i] = x; \
2888 VSPLTI(b
, s8
, int8_t)
2889 VSPLTI(h
, s16
, int16_t)
2890 VSPLTI(w
, s32
, int32_t)
2893 #define VSR(suffix, element) \
2894 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2897 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2898 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2899 unsigned int shift = b->element[i] & mask; \
2900 r->element[i] = a->element[i] >> shift; \
2911 void helper_vsro (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2913 int sh
= (b
->u8
[LO_IDX
*0xf] >> 3) & 0xf;
2915 #if defined (HOST_WORDS_BIGENDIAN)
2916 memmove (&r
->u8
[sh
], &a
->u8
[0], 16-sh
);
2917 memset (&r
->u8
[0], 0, sh
);
2919 memmove (&r
->u8
[0], &a
->u8
[sh
], 16-sh
);
2920 memset (&r
->u8
[16-sh
], 0, sh
);
2924 void helper_vsubcuw (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2927 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
2928 r
->u32
[i
] = a
->u32
[i
] >= b
->u32
[i
];
2932 void helper_vsumsws (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2939 #if defined(HOST_WORDS_BIGENDIAN)
2940 upper
= ARRAY_SIZE(r
->s32
)-1;
2944 t
= (int64_t)b
->s32
[upper
];
2945 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2949 result
.s32
[upper
] = cvtsdsw(t
, &sat
);
2953 env
->vscr
|= (1 << VSCR_SAT
);
2957 void helper_vsum2sws (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2963 #if defined(HOST_WORDS_BIGENDIAN)
2968 for (i
= 0; i
< ARRAY_SIZE(r
->u64
); i
++) {
2969 int64_t t
= (int64_t)b
->s32
[upper
+i
*2];
2971 for (j
= 0; j
< ARRAY_SIZE(r
->u64
); j
++) {
2974 result
.s32
[upper
+i
*2] = cvtsdsw(t
, &sat
);
2979 env
->vscr
|= (1 << VSCR_SAT
);
2983 void helper_vsum4sbs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
2988 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
2989 int64_t t
= (int64_t)b
->s32
[i
];
2990 for (j
= 0; j
< ARRAY_SIZE(r
->s32
); j
++) {
2993 r
->s32
[i
] = cvtsdsw(t
, &sat
);
2997 env
->vscr
|= (1 << VSCR_SAT
);
3001 void helper_vsum4shs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
3006 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
3007 int64_t t
= (int64_t)b
->s32
[i
];
3008 t
+= a
->s16
[2*i
] + a
->s16
[2*i
+1];
3009 r
->s32
[i
] = cvtsdsw(t
, &sat
);
3013 env
->vscr
|= (1 << VSCR_SAT
);
3017 void helper_vsum4ubs (ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
3022 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
3023 uint64_t t
= (uint64_t)b
->u32
[i
];
3024 for (j
= 0; j
< ARRAY_SIZE(r
->u32
); j
++) {
3027 r
->u32
[i
] = cvtuduw(t
, &sat
);
3031 env
->vscr
|= (1 << VSCR_SAT
);
3035 #if defined(HOST_WORDS_BIGENDIAN)
3042 #define VUPKPX(suffix, hi) \
3043 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3047 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3048 uint16_t e = b->u16[hi ? i : i+4]; \
3049 uint8_t a = (e >> 15) ? 0xff : 0; \
3050 uint8_t r = (e >> 10) & 0x1f; \
3051 uint8_t g = (e >> 5) & 0x1f; \
3052 uint8_t b = e & 0x1f; \
3053 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3061 #define VUPK(suffix, unpacked, packee, hi) \
3062 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3067 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3068 result.unpacked[i] = b->packee[i]; \
3071 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3072 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3077 VUPK(hsb
, s16
, s8
, UPKHI
)
3078 VUPK(hsh
, s32
, s16
, UPKHI
)
3079 VUPK(lsb
, s16
, s8
, UPKLO
)
3080 VUPK(lsh
, s32
, s16
, UPKLO
)
3085 #undef DO_HANDLE_NAN
3089 #undef VECTOR_FOR_INORDER_I
3093 /*****************************************************************************/
3094 /* SPE extension helpers */
3095 /* Use a table to make this quicker */
3096 static uint8_t hbrev
[16] = {
3097 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3098 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3101 static inline uint8_t byte_reverse(uint8_t val
)
3103 return hbrev
[val
>> 4] | (hbrev
[val
& 0xF] << 4);
3106 static inline uint32_t word_reverse(uint32_t val
)
3108 return byte_reverse(val
>> 24) | (byte_reverse(val
>> 16) << 8) |
3109 (byte_reverse(val
>> 8) << 16) | (byte_reverse(val
) << 24);
3112 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
3113 target_ulong
helper_brinc (target_ulong arg1
, target_ulong arg2
)
3115 uint32_t a
, b
, d
, mask
;
3117 mask
= UINT32_MAX
>> (32 - MASKBITS
);
3120 d
= word_reverse(1 + word_reverse(a
| ~b
));
3121 return (arg1
& ~mask
) | (d
& b
);
3124 uint32_t helper_cntlsw32 (uint32_t val
)
3126 if (val
& 0x80000000)
3132 uint32_t helper_cntlzw32 (uint32_t val
)
3137 /* Single-precision floating-point conversions */
3138 static inline uint32_t efscfsi(uint32_t val
)
3142 u
.f
= int32_to_float32(val
, &env
->vec_status
);
3147 static inline uint32_t efscfui(uint32_t val
)
3151 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
3156 static inline int32_t efsctsi(uint32_t val
)
3161 /* NaN are not treated the same way IEEE 754 does */
3162 if (unlikely(float32_is_quiet_nan(u
.f
)))
3165 return float32_to_int32(u
.f
, &env
->vec_status
);
3168 static inline uint32_t efsctui(uint32_t val
)
3173 /* NaN are not treated the same way IEEE 754 does */
3174 if (unlikely(float32_is_quiet_nan(u
.f
)))
3177 return float32_to_uint32(u
.f
, &env
->vec_status
);
3180 static inline uint32_t efsctsiz(uint32_t val
)
3185 /* NaN are not treated the same way IEEE 754 does */
3186 if (unlikely(float32_is_quiet_nan(u
.f
)))
3189 return float32_to_int32_round_to_zero(u
.f
, &env
->vec_status
);
3192 static inline uint32_t efsctuiz(uint32_t val
)
3197 /* NaN are not treated the same way IEEE 754 does */
3198 if (unlikely(float32_is_quiet_nan(u
.f
)))
3201 return float32_to_uint32_round_to_zero(u
.f
, &env
->vec_status
);
3204 static inline uint32_t efscfsf(uint32_t val
)
3209 u
.f
= int32_to_float32(val
, &env
->vec_status
);
3210 tmp
= int64_to_float32(1ULL << 32, &env
->vec_status
);
3211 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
3216 static inline uint32_t efscfuf(uint32_t val
)
3221 u
.f
= uint32_to_float32(val
, &env
->vec_status
);
3222 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3223 u
.f
= float32_div(u
.f
, tmp
, &env
->vec_status
);
3228 static inline uint32_t efsctsf(uint32_t val
)
3234 /* NaN are not treated the same way IEEE 754 does */
3235 if (unlikely(float32_is_quiet_nan(u
.f
)))
3237 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3238 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
3240 return float32_to_int32(u
.f
, &env
->vec_status
);
3243 static inline uint32_t efsctuf(uint32_t val
)
3249 /* NaN are not treated the same way IEEE 754 does */
3250 if (unlikely(float32_is_quiet_nan(u
.f
)))
3252 tmp
= uint64_to_float32(1ULL << 32, &env
->vec_status
);
3253 u
.f
= float32_mul(u
.f
, tmp
, &env
->vec_status
);
3255 return float32_to_uint32(u
.f
, &env
->vec_status
);
3258 #define HELPER_SPE_SINGLE_CONV(name) \
3259 uint32_t helper_e##name (uint32_t val) \
3261 return e##name(val); \
3264 HELPER_SPE_SINGLE_CONV(fscfsi
);
3266 HELPER_SPE_SINGLE_CONV(fscfui
);
3268 HELPER_SPE_SINGLE_CONV(fscfuf
);
3270 HELPER_SPE_SINGLE_CONV(fscfsf
);
3272 HELPER_SPE_SINGLE_CONV(fsctsi
);
3274 HELPER_SPE_SINGLE_CONV(fsctui
);
3276 HELPER_SPE_SINGLE_CONV(fsctsiz
);
3278 HELPER_SPE_SINGLE_CONV(fsctuiz
);
3280 HELPER_SPE_SINGLE_CONV(fsctsf
);
3282 HELPER_SPE_SINGLE_CONV(fsctuf
);
3284 #define HELPER_SPE_VECTOR_CONV(name) \
3285 uint64_t helper_ev##name (uint64_t val) \
3287 return ((uint64_t)e##name(val >> 32) << 32) | \
3288 (uint64_t)e##name(val); \
3291 HELPER_SPE_VECTOR_CONV(fscfsi
);
3293 HELPER_SPE_VECTOR_CONV(fscfui
);
3295 HELPER_SPE_VECTOR_CONV(fscfuf
);
3297 HELPER_SPE_VECTOR_CONV(fscfsf
);
3299 HELPER_SPE_VECTOR_CONV(fsctsi
);
3301 HELPER_SPE_VECTOR_CONV(fsctui
);
3303 HELPER_SPE_VECTOR_CONV(fsctsiz
);
3305 HELPER_SPE_VECTOR_CONV(fsctuiz
);
3307 HELPER_SPE_VECTOR_CONV(fsctsf
);
3309 HELPER_SPE_VECTOR_CONV(fsctuf
);
3311 /* Single-precision floating-point arithmetic */
3312 static inline uint32_t efsadd(uint32_t op1
, uint32_t op2
)
3317 u1
.f
= float32_add(u1
.f
, u2
.f
, &env
->vec_status
);
3321 static inline uint32_t efssub(uint32_t op1
, uint32_t op2
)
3326 u1
.f
= float32_sub(u1
.f
, u2
.f
, &env
->vec_status
);
3330 static inline uint32_t efsmul(uint32_t op1
, uint32_t op2
)
3335 u1
.f
= float32_mul(u1
.f
, u2
.f
, &env
->vec_status
);
3339 static inline uint32_t efsdiv(uint32_t op1
, uint32_t op2
)
3344 u1
.f
= float32_div(u1
.f
, u2
.f
, &env
->vec_status
);
3348 #define HELPER_SPE_SINGLE_ARITH(name) \
3349 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3351 return e##name(op1, op2); \
3354 HELPER_SPE_SINGLE_ARITH(fsadd
);
3356 HELPER_SPE_SINGLE_ARITH(fssub
);
3358 HELPER_SPE_SINGLE_ARITH(fsmul
);
3360 HELPER_SPE_SINGLE_ARITH(fsdiv
);
3362 #define HELPER_SPE_VECTOR_ARITH(name) \
3363 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3365 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3366 (uint64_t)e##name(op1, op2); \
3369 HELPER_SPE_VECTOR_ARITH(fsadd
);
3371 HELPER_SPE_VECTOR_ARITH(fssub
);
3373 HELPER_SPE_VECTOR_ARITH(fsmul
);
3375 HELPER_SPE_VECTOR_ARITH(fsdiv
);
3377 /* Single-precision floating-point comparisons */
3378 static inline uint32_t efststlt(uint32_t op1
, uint32_t op2
)
3383 return float32_lt(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
3386 static inline uint32_t efststgt(uint32_t op1
, uint32_t op2
)
3391 return float32_le(u1
.f
, u2
.f
, &env
->vec_status
) ? 0 : 4;
3394 static inline uint32_t efststeq(uint32_t op1
, uint32_t op2
)
3399 return float32_eq(u1
.f
, u2
.f
, &env
->vec_status
) ? 4 : 0;
3402 static inline uint32_t efscmplt(uint32_t op1
, uint32_t op2
)
3404 /* XXX: TODO: test special values (NaN, infinites, ...) */
3405 return efststlt(op1
, op2
);
3408 static inline uint32_t efscmpgt(uint32_t op1
, uint32_t op2
)
3410 /* XXX: TODO: test special values (NaN, infinites, ...) */
3411 return efststgt(op1
, op2
);
3414 static inline uint32_t efscmpeq(uint32_t op1
, uint32_t op2
)
3416 /* XXX: TODO: test special values (NaN, infinites, ...) */
3417 return efststeq(op1
, op2
);
3420 #define HELPER_SINGLE_SPE_CMP(name) \
3421 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3423 return e##name(op1, op2) << 2; \
3426 HELPER_SINGLE_SPE_CMP(fststlt
);
3428 HELPER_SINGLE_SPE_CMP(fststgt
);
3430 HELPER_SINGLE_SPE_CMP(fststeq
);
3432 HELPER_SINGLE_SPE_CMP(fscmplt
);
3434 HELPER_SINGLE_SPE_CMP(fscmpgt
);
3436 HELPER_SINGLE_SPE_CMP(fscmpeq
);
3438 static inline uint32_t evcmp_merge(int t0
, int t1
)
3440 return (t0
<< 3) | (t1
<< 2) | ((t0
| t1
) << 1) | (t0
& t1
);
3443 #define HELPER_VECTOR_SPE_CMP(name) \
3444 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3446 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3449 HELPER_VECTOR_SPE_CMP(fststlt
);
3451 HELPER_VECTOR_SPE_CMP(fststgt
);
3453 HELPER_VECTOR_SPE_CMP(fststeq
);
3455 HELPER_VECTOR_SPE_CMP(fscmplt
);
3457 HELPER_VECTOR_SPE_CMP(fscmpgt
);
3459 HELPER_VECTOR_SPE_CMP(fscmpeq
);
3461 /* Double-precision floating-point conversion */
3462 uint64_t helper_efdcfsi (uint32_t val
)
3466 u
.d
= int32_to_float64(val
, &env
->vec_status
);
3471 uint64_t helper_efdcfsid (uint64_t val
)
3475 u
.d
= int64_to_float64(val
, &env
->vec_status
);
3480 uint64_t helper_efdcfui (uint32_t val
)
3484 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
3489 uint64_t helper_efdcfuid (uint64_t val
)
3493 u
.d
= uint64_to_float64(val
, &env
->vec_status
);
3498 uint32_t helper_efdctsi (uint64_t val
)
3503 /* NaN are not treated the same way IEEE 754 does */
3504 if (unlikely(float64_is_any_nan(u
.d
))) {
3508 return float64_to_int32(u
.d
, &env
->vec_status
);
3511 uint32_t helper_efdctui (uint64_t val
)
3516 /* NaN are not treated the same way IEEE 754 does */
3517 if (unlikely(float64_is_any_nan(u
.d
))) {
3521 return float64_to_uint32(u
.d
, &env
->vec_status
);
3524 uint32_t helper_efdctsiz (uint64_t val
)
3529 /* NaN are not treated the same way IEEE 754 does */
3530 if (unlikely(float64_is_any_nan(u
.d
))) {
3534 return float64_to_int32_round_to_zero(u
.d
, &env
->vec_status
);
3537 uint64_t helper_efdctsidz (uint64_t val
)
3542 /* NaN are not treated the same way IEEE 754 does */
3543 if (unlikely(float64_is_any_nan(u
.d
))) {
3547 return float64_to_int64_round_to_zero(u
.d
, &env
->vec_status
);
3550 uint32_t helper_efdctuiz (uint64_t val
)
3555 /* NaN are not treated the same way IEEE 754 does */
3556 if (unlikely(float64_is_any_nan(u
.d
))) {
3560 return float64_to_uint32_round_to_zero(u
.d
, &env
->vec_status
);
3563 uint64_t helper_efdctuidz (uint64_t val
)
3568 /* NaN are not treated the same way IEEE 754 does */
3569 if (unlikely(float64_is_any_nan(u
.d
))) {
3573 return float64_to_uint64_round_to_zero(u
.d
, &env
->vec_status
);
3576 uint64_t helper_efdcfsf (uint32_t val
)
3581 u
.d
= int32_to_float64(val
, &env
->vec_status
);
3582 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
3583 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
3588 uint64_t helper_efdcfuf (uint32_t val
)
3593 u
.d
= uint32_to_float64(val
, &env
->vec_status
);
3594 tmp
= int64_to_float64(1ULL << 32, &env
->vec_status
);
3595 u
.d
= float64_div(u
.d
, tmp
, &env
->vec_status
);
3600 uint32_t helper_efdctsf (uint64_t val
)
3606 /* NaN are not treated the same way IEEE 754 does */
3607 if (unlikely(float64_is_any_nan(u
.d
))) {
3610 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
3611 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
3613 return float64_to_int32(u
.d
, &env
->vec_status
);
3616 uint32_t helper_efdctuf (uint64_t val
)
3622 /* NaN are not treated the same way IEEE 754 does */
3623 if (unlikely(float64_is_any_nan(u
.d
))) {
3626 tmp
= uint64_to_float64(1ULL << 32, &env
->vec_status
);
3627 u
.d
= float64_mul(u
.d
, tmp
, &env
->vec_status
);
3629 return float64_to_uint32(u
.d
, &env
->vec_status
);
3632 uint32_t helper_efscfd (uint64_t val
)
3638 u2
.f
= float64_to_float32(u1
.d
, &env
->vec_status
);
3643 uint64_t helper_efdcfs (uint32_t val
)
3649 u2
.d
= float32_to_float64(u1
.f
, &env
->vec_status
);
3654 /* Double precision fixed-point arithmetic */
3655 uint64_t helper_efdadd (uint64_t op1
, uint64_t op2
)
3660 u1
.d
= float64_add(u1
.d
, u2
.d
, &env
->vec_status
);
3664 uint64_t helper_efdsub (uint64_t op1
, uint64_t op2
)
3669 u1
.d
= float64_sub(u1
.d
, u2
.d
, &env
->vec_status
);
3673 uint64_t helper_efdmul (uint64_t op1
, uint64_t op2
)
3678 u1
.d
= float64_mul(u1
.d
, u2
.d
, &env
->vec_status
);
3682 uint64_t helper_efddiv (uint64_t op1
, uint64_t op2
)
3687 u1
.d
= float64_div(u1
.d
, u2
.d
, &env
->vec_status
);
3691 /* Double precision floating point helpers */
3692 uint32_t helper_efdtstlt (uint64_t op1
, uint64_t op2
)
3697 return float64_lt(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
3700 uint32_t helper_efdtstgt (uint64_t op1
, uint64_t op2
)
3705 return float64_le(u1
.d
, u2
.d
, &env
->vec_status
) ? 0 : 4;
3708 uint32_t helper_efdtsteq (uint64_t op1
, uint64_t op2
)
3713 return float64_eq(u1
.d
, u2
.d
, &env
->vec_status
) ? 4 : 0;
3716 uint32_t helper_efdcmplt (uint64_t op1
, uint64_t op2
)
3718 /* XXX: TODO: test special values (NaN, infinites, ...) */
3719 return helper_efdtstlt(op1
, op2
);
3722 uint32_t helper_efdcmpgt (uint64_t op1
, uint64_t op2
)
3724 /* XXX: TODO: test special values (NaN, infinites, ...) */
3725 return helper_efdtstgt(op1
, op2
);
3728 uint32_t helper_efdcmpeq (uint64_t op1
, uint64_t op2
)
3730 /* XXX: TODO: test special values (NaN, infinites, ...) */
3731 return helper_efdtsteq(op1
, op2
);
3734 /*****************************************************************************/
3735 /* Softmmu support */
3736 #if !defined (CONFIG_USER_ONLY)
3738 #define MMUSUFFIX _mmu
3741 #include "softmmu_template.h"
3744 #include "softmmu_template.h"
3747 #include "softmmu_template.h"
3750 #include "softmmu_template.h"
3752 /* try to fill the TLB and return an exception if error. If retaddr is
3753 NULL, it means that the function was called in C code (i.e. not
3754 from generated code or from helper.c) */
3755 /* XXX: fix it to restore all registers */
3756 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
3758 TranslationBlock
*tb
;
3759 CPUState
*saved_env
;
3763 /* XXX: hack to restore env in all cases, even if not called from
3766 env
= cpu_single_env
;
3767 ret
= cpu_ppc_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
3768 if (unlikely(ret
!= 0)) {
3769 if (likely(retaddr
)) {
3770 /* now we have a real cpu fault */
3771 pc
= (unsigned long)retaddr
;
3772 tb
= tb_find_pc(pc
);
3774 /* the PC is inside the translated code. It means that we have
3775 a virtual CPU fault */
3776 cpu_restore_state(tb
, env
, pc
, NULL
);
3779 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
3784 /* Segment registers load and store */
3785 target_ulong
helper_load_sr (target_ulong sr_num
)
3787 #if defined(TARGET_PPC64)
3788 if (env
->mmu_model
& POWERPC_MMU_64
)
3789 return ppc_load_sr(env
, sr_num
);
3791 return env
->sr
[sr_num
];
3794 void helper_store_sr (target_ulong sr_num
, target_ulong val
)
3796 ppc_store_sr(env
, sr_num
, val
);
3799 /* SLB management */
3800 #if defined(TARGET_PPC64)
3801 void helper_store_slb (target_ulong rb
, target_ulong rs
)
3803 if (ppc_store_slb(env
, rb
, rs
) < 0) {
3804 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_INVAL
);
3808 target_ulong
helper_load_slb_esid (target_ulong rb
)
3812 if (ppc_load_slb_esid(env
, rb
, &rt
) < 0) {
3813 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_INVAL
);
3818 target_ulong
helper_load_slb_vsid (target_ulong rb
)
3822 if (ppc_load_slb_vsid(env
, rb
, &rt
) < 0) {
3823 helper_raise_exception_err(POWERPC_EXCP_PROGRAM
, POWERPC_EXCP_INVAL
);
3828 void helper_slbia (void)
3830 ppc_slb_invalidate_all(env
);
3833 void helper_slbie (target_ulong addr
)
3835 ppc_slb_invalidate_one(env
, addr
);
3838 #endif /* defined(TARGET_PPC64) */
3840 /* TLB management */
3841 void helper_tlbia (void)
3843 ppc_tlb_invalidate_all(env
);
3846 void helper_tlbie (target_ulong addr
)
3848 ppc_tlb_invalidate_one(env
, addr
);
3851 /* Software driven TLBs management */
3852 /* PowerPC 602/603 software TLB load instructions helpers */
3853 static void do_6xx_tlb (target_ulong new_EPN
, int is_code
)
3855 target_ulong RPN
, CMP
, EPN
;
3858 RPN
= env
->spr
[SPR_RPA
];
3860 CMP
= env
->spr
[SPR_ICMP
];
3861 EPN
= env
->spr
[SPR_IMISS
];
3863 CMP
= env
->spr
[SPR_DCMP
];
3864 EPN
= env
->spr
[SPR_DMISS
];
3866 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
3867 (void)EPN
; /* avoid a compiler warning */
3868 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
3869 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
3871 /* Store this TLB */
3872 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
3873 way
, is_code
, CMP
, RPN
);
3876 void helper_6xx_tlbd (target_ulong EPN
)
3881 void helper_6xx_tlbi (target_ulong EPN
)
3886 /* PowerPC 74xx software TLB load instructions helpers */
3887 static void do_74xx_tlb (target_ulong new_EPN
, int is_code
)
3889 target_ulong RPN
, CMP
, EPN
;
3892 RPN
= env
->spr
[SPR_PTELO
];
3893 CMP
= env
->spr
[SPR_PTEHI
];
3894 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
3895 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
3896 (void)EPN
; /* avoid a compiler warning */
3897 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
3898 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
3900 /* Store this TLB */
3901 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
3902 way
, is_code
, CMP
, RPN
);
3905 void helper_74xx_tlbd (target_ulong EPN
)
3907 do_74xx_tlb(EPN
, 0);
3910 void helper_74xx_tlbi (target_ulong EPN
)
3912 do_74xx_tlb(EPN
, 1);
3915 static inline target_ulong
booke_tlb_to_page_size(int size
)
3917 return 1024 << (2 * size
);
3920 static inline int booke_page_size_to_tlb(target_ulong page_size
)
3924 switch (page_size
) {
3958 #if defined (TARGET_PPC64)
3959 case 0x000100000000ULL
:
3962 case 0x000400000000ULL
:
3965 case 0x001000000000ULL
:
3968 case 0x004000000000ULL
:
3971 case 0x010000000000ULL
:
3983 /* Helpers for 4xx TLB management */
3984 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
3986 #define PPC4XX_TLBHI_V 0x00000040
3987 #define PPC4XX_TLBHI_E 0x00000020
3988 #define PPC4XX_TLBHI_SIZE_MIN 0
3989 #define PPC4XX_TLBHI_SIZE_MAX 7
3990 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
3991 #define PPC4XX_TLBHI_SIZE_SHIFT 7
3992 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
3994 #define PPC4XX_TLBLO_EX 0x00000200
3995 #define PPC4XX_TLBLO_WR 0x00000100
3996 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
3997 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
3999 target_ulong
helper_4xx_tlbre_hi (target_ulong entry
)
4005 entry
&= PPC4XX_TLB_ENTRY_MASK
;
4006 tlb
= &env
->tlb
[entry
].tlbe
;
4008 if (tlb
->prot
& PAGE_VALID
) {
4009 ret
|= PPC4XX_TLBHI_V
;
4011 size
= booke_page_size_to_tlb(tlb
->size
);
4012 if (size
< PPC4XX_TLBHI_SIZE_MIN
|| size
> PPC4XX_TLBHI_SIZE_MAX
) {
4013 size
= PPC4XX_TLBHI_SIZE_DEFAULT
;
4015 ret
|= size
<< PPC4XX_TLBHI_SIZE_SHIFT
;
4016 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
4020 target_ulong
helper_4xx_tlbre_lo (target_ulong entry
)
4025 entry
&= PPC4XX_TLB_ENTRY_MASK
;
4026 tlb
= &env
->tlb
[entry
].tlbe
;
4028 if (tlb
->prot
& PAGE_EXEC
) {
4029 ret
|= PPC4XX_TLBLO_EX
;
4031 if (tlb
->prot
& PAGE_WRITE
) {
4032 ret
|= PPC4XX_TLBLO_WR
;
4037 void helper_4xx_tlbwe_hi (target_ulong entry
, target_ulong val
)
4040 target_ulong page
, end
;
4042 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
4044 entry
&= PPC4XX_TLB_ENTRY_MASK
;
4045 tlb
= &env
->tlb
[entry
].tlbe
;
4046 /* Invalidate previous TLB (if it's valid) */
4047 if (tlb
->prot
& PAGE_VALID
) {
4048 end
= tlb
->EPN
+ tlb
->size
;
4049 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
4050 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
4051 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
4052 tlb_flush_page(env
, page
);
4055 tlb
->size
= booke_tlb_to_page_size((val
>> PPC4XX_TLBHI_SIZE_SHIFT
)
4056 & PPC4XX_TLBHI_SIZE_MASK
);
4057 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
4058 * If this ever occurs, one should use the ppcemb target instead
4059 * of the ppc or ppc64 one
4061 if ((val
& PPC4XX_TLBHI_V
) && tlb
->size
< TARGET_PAGE_SIZE
) {
4062 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
4063 "are not supported (%d)\n",
4064 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
4066 tlb
->EPN
= val
& ~(tlb
->size
- 1);
4067 if (val
& PPC4XX_TLBHI_V
) {
4068 tlb
->prot
|= PAGE_VALID
;
4069 if (val
& PPC4XX_TLBHI_E
) {
4070 /* XXX: TO BE FIXED */
4072 "Little-endian TLB entries are not supported by now\n");
4075 tlb
->prot
&= ~PAGE_VALID
;
4077 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
4078 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
4079 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
4080 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
4081 tlb
->prot
& PAGE_READ
? 'r' : '-',
4082 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
4083 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
4084 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
4085 /* Invalidate new TLB (if valid) */
4086 if (tlb
->prot
& PAGE_VALID
) {
4087 end
= tlb
->EPN
+ tlb
->size
;
4088 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
4089 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
4090 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
4091 tlb_flush_page(env
, page
);
4096 void helper_4xx_tlbwe_lo (target_ulong entry
, target_ulong val
)
4100 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
4102 entry
&= PPC4XX_TLB_ENTRY_MASK
;
4103 tlb
= &env
->tlb
[entry
].tlbe
;
4104 tlb
->attr
= val
& PPC4XX_TLBLO_ATTR_MASK
;
4105 tlb
->RPN
= val
& PPC4XX_TLBLO_RPN_MASK
;
4106 tlb
->prot
= PAGE_READ
;
4107 if (val
& PPC4XX_TLBLO_EX
) {
4108 tlb
->prot
|= PAGE_EXEC
;
4110 if (val
& PPC4XX_TLBLO_WR
) {
4111 tlb
->prot
|= PAGE_WRITE
;
4113 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
4114 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
4115 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
4116 tlb
->prot
& PAGE_READ
? 'r' : '-',
4117 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
4118 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
4119 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
4122 target_ulong
helper_4xx_tlbsx (target_ulong address
)
4124 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
4127 /* PowerPC 440 TLB management */
4128 void helper_440_tlbwe (uint32_t word
, target_ulong entry
, target_ulong value
)
4131 target_ulong EPN
, RPN
, size
;
4134 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
4135 __func__
, word
, (int)entry
, value
);
4138 tlb
= &env
->tlb
[entry
].tlbe
;
4141 /* Just here to please gcc */
4143 EPN
= value
& 0xFFFFFC00;
4144 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
)
4147 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
4148 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
)
4152 tlb
->attr
|= (value
>> 8) & 1;
4153 if (value
& 0x200) {
4154 tlb
->prot
|= PAGE_VALID
;
4156 if (tlb
->prot
& PAGE_VALID
) {
4157 tlb
->prot
&= ~PAGE_VALID
;
4161 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
4166 RPN
= value
& 0xFFFFFC0F;
4167 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
)
4172 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
4173 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
4175 tlb
->prot
|= PAGE_READ
<< 4;
4177 tlb
->prot
|= PAGE_WRITE
<< 4;
4179 tlb
->prot
|= PAGE_EXEC
<< 4;
4181 tlb
->prot
|= PAGE_READ
;
4183 tlb
->prot
|= PAGE_WRITE
;
4185 tlb
->prot
|= PAGE_EXEC
;
4190 target_ulong
helper_440_tlbre (uint32_t word
, target_ulong entry
)
4197 tlb
= &env
->tlb
[entry
].tlbe
;
4200 /* Just here to please gcc */
4203 size
= booke_page_size_to_tlb(tlb
->size
);
4204 if (size
< 0 || size
> 0xF)
4207 if (tlb
->attr
& 0x1)
4209 if (tlb
->prot
& PAGE_VALID
)
4211 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
4212 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
4218 ret
= tlb
->attr
& ~0x1;
4219 if (tlb
->prot
& (PAGE_READ
<< 4))
4221 if (tlb
->prot
& (PAGE_WRITE
<< 4))
4223 if (tlb
->prot
& (PAGE_EXEC
<< 4))
4225 if (tlb
->prot
& PAGE_READ
)
4227 if (tlb
->prot
& PAGE_WRITE
)
4229 if (tlb
->prot
& PAGE_EXEC
)
4236 target_ulong
helper_440_tlbsx (target_ulong address
)
4238 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
4241 #endif /* !CONFIG_USER_ONLY */