2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
23 #include "qemu/host-utils.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/memop.h"
28 #include "sysemu/kvm.h"
29 #include "fpu/softfloat.h"
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
34 void helper_raise_exception_err(CPUMIPSState
*env
, uint32_t exception
,
37 do_raise_exception_err(env
, exception
, error_code
, 0);
40 void helper_raise_exception(CPUMIPSState
*env
, uint32_t exception
)
42 do_raise_exception(env
, exception
, GETPC());
45 void helper_raise_exception_debug(CPUMIPSState
*env
)
47 do_raise_exception(env
, EXCP_DEBUG
, 0);
50 static void raise_exception(CPUMIPSState
*env
, uint32_t exception
)
52 do_raise_exception(env
, exception
, 0);
55 #if defined(CONFIG_USER_ONLY)
56 #define HELPER_LD(name, insn, type) \
57 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
58 int mem_idx, uintptr_t retaddr) \
60 return (type) cpu_##insn##_data_ra(env, addr, retaddr); \
63 #define HELPER_LD(name, insn, type) \
64 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
65 int mem_idx, uintptr_t retaddr) \
68 case 0: return (type) cpu_##insn##_kernel_ra(env, addr, retaddr); \
69 case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
71 case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
72 case 3: return (type) cpu_##insn##_error_ra(env, addr, retaddr); \
76 HELPER_LD(lw
, ldl
, int32_t)
77 #if defined(TARGET_MIPS64)
78 HELPER_LD(ld
, ldq
, int64_t)
82 #if defined(CONFIG_USER_ONLY)
83 #define HELPER_ST(name, insn, type) \
84 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
85 type val, int mem_idx, uintptr_t retaddr) \
87 cpu_##insn##_data_ra(env, addr, val, retaddr); \
90 #define HELPER_ST(name, insn, type) \
91 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
92 type val, int mem_idx, uintptr_t retaddr) \
96 cpu_##insn##_kernel_ra(env, addr, val, retaddr); \
99 cpu_##insn##_super_ra(env, addr, val, retaddr); \
103 cpu_##insn##_user_ra(env, addr, val, retaddr); \
106 cpu_##insn##_error_ra(env, addr, val, retaddr); \
111 HELPER_ST(sb
, stb
, uint8_t)
112 HELPER_ST(sw
, stl
, uint32_t)
113 #if defined(TARGET_MIPS64)
114 HELPER_ST(sd
, stq
, uint64_t)
118 /* 64 bits arithmetic for 32 bits hosts */
119 static inline uint64_t get_HILO(CPUMIPSState
*env
)
121 return ((uint64_t)(env
->active_tc
.HI
[0]) << 32) |
122 (uint32_t)env
->active_tc
.LO
[0];
125 static inline target_ulong
set_HIT0_LO(CPUMIPSState
*env
, uint64_t HILO
)
127 env
->active_tc
.LO
[0] = (int32_t)(HILO
& 0xFFFFFFFF);
128 return env
->active_tc
.HI
[0] = (int32_t)(HILO
>> 32);
131 static inline target_ulong
set_HI_LOT0(CPUMIPSState
*env
, uint64_t HILO
)
133 target_ulong tmp
= env
->active_tc
.LO
[0] = (int32_t)(HILO
& 0xFFFFFFFF);
134 env
->active_tc
.HI
[0] = (int32_t)(HILO
>> 32);
138 /* Multiplication variants of the vr54xx. */
139 target_ulong
helper_muls(CPUMIPSState
*env
, target_ulong arg1
,
142 return set_HI_LOT0(env
, 0 - ((int64_t)(int32_t)arg1
*
143 (int64_t)(int32_t)arg2
));
146 target_ulong
helper_mulsu(CPUMIPSState
*env
, target_ulong arg1
,
149 return set_HI_LOT0(env
, 0 - (uint64_t)(uint32_t)arg1
*
150 (uint64_t)(uint32_t)arg2
);
153 target_ulong
helper_macc(CPUMIPSState
*env
, target_ulong arg1
,
156 return set_HI_LOT0(env
, (int64_t)get_HILO(env
) + (int64_t)(int32_t)arg1
*
157 (int64_t)(int32_t)arg2
);
160 target_ulong
helper_macchi(CPUMIPSState
*env
, target_ulong arg1
,
163 return set_HIT0_LO(env
, (int64_t)get_HILO(env
) + (int64_t)(int32_t)arg1
*
164 (int64_t)(int32_t)arg2
);
167 target_ulong
helper_maccu(CPUMIPSState
*env
, target_ulong arg1
,
170 return set_HI_LOT0(env
, (uint64_t)get_HILO(env
) +
171 (uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
);
174 target_ulong
helper_macchiu(CPUMIPSState
*env
, target_ulong arg1
,
177 return set_HIT0_LO(env
, (uint64_t)get_HILO(env
) +
178 (uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
);
181 target_ulong
helper_msac(CPUMIPSState
*env
, target_ulong arg1
,
184 return set_HI_LOT0(env
, (int64_t)get_HILO(env
) - (int64_t)(int32_t)arg1
*
185 (int64_t)(int32_t)arg2
);
188 target_ulong
helper_msachi(CPUMIPSState
*env
, target_ulong arg1
,
191 return set_HIT0_LO(env
, (int64_t)get_HILO(env
) - (int64_t)(int32_t)arg1
*
192 (int64_t)(int32_t)arg2
);
195 target_ulong
helper_msacu(CPUMIPSState
*env
, target_ulong arg1
,
198 return set_HI_LOT0(env
, (uint64_t)get_HILO(env
) -
199 (uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
);
202 target_ulong
helper_msachiu(CPUMIPSState
*env
, target_ulong arg1
,
205 return set_HIT0_LO(env
, (uint64_t)get_HILO(env
) -
206 (uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
);
209 target_ulong
helper_mulhi(CPUMIPSState
*env
, target_ulong arg1
,
212 return set_HIT0_LO(env
, (int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
);
215 target_ulong
helper_mulhiu(CPUMIPSState
*env
, target_ulong arg1
,
218 return set_HIT0_LO(env
, (uint64_t)(uint32_t)arg1
*
219 (uint64_t)(uint32_t)arg2
);
222 target_ulong
helper_mulshi(CPUMIPSState
*env
, target_ulong arg1
,
225 return set_HIT0_LO(env
, 0 - (int64_t)(int32_t)arg1
*
226 (int64_t)(int32_t)arg2
);
229 target_ulong
helper_mulshiu(CPUMIPSState
*env
, target_ulong arg1
,
232 return set_HIT0_LO(env
, 0 - (uint64_t)(uint32_t)arg1
*
233 (uint64_t)(uint32_t)arg2
);
236 static inline target_ulong
bitswap(target_ulong v
)
238 v
= ((v
>> 1) & (target_ulong
)0x5555555555555555ULL
) |
239 ((v
& (target_ulong
)0x5555555555555555ULL
) << 1);
240 v
= ((v
>> 2) & (target_ulong
)0x3333333333333333ULL
) |
241 ((v
& (target_ulong
)0x3333333333333333ULL
) << 2);
242 v
= ((v
>> 4) & (target_ulong
)0x0F0F0F0F0F0F0F0FULL
) |
243 ((v
& (target_ulong
)0x0F0F0F0F0F0F0F0FULL
) << 4);
248 target_ulong
helper_dbitswap(target_ulong rt
)
254 target_ulong
helper_bitswap(target_ulong rt
)
256 return (int32_t)bitswap(rt
);
259 target_ulong
helper_rotx(target_ulong rs
, uint32_t shift
, uint32_t shiftx
,
263 uint64_t tmp0
= ((uint64_t)rs
) << 32 | ((uint64_t)rs
& 0xffffffff);
264 uint64_t tmp1
= tmp0
;
265 for (i
= 0; i
<= 46; i
++) {
273 if (stripe
!= 0 && !(i
& 0x4)) {
277 if (tmp0
& (1LL << (i
+ 16))) {
285 uint64_t tmp2
= tmp1
;
286 for (i
= 0; i
<= 38; i
++) {
295 if (tmp1
& (1LL << (i
+ 8))) {
303 uint64_t tmp3
= tmp2
;
304 for (i
= 0; i
<= 34; i
++) {
312 if (tmp2
& (1LL << (i
+ 4))) {
320 uint64_t tmp4
= tmp3
;
321 for (i
= 0; i
<= 32; i
++) {
329 if (tmp3
& (1LL << (i
+ 2))) {
337 uint64_t tmp5
= tmp4
;
338 for (i
= 0; i
<= 31; i
++) {
342 if (tmp4
& (1LL << (i
+ 1))) {
350 return (int64_t)(int32_t)(uint32_t)tmp5
;
353 #ifndef CONFIG_USER_ONLY
355 static inline hwaddr
do_translate_address(CPUMIPSState
*env
,
356 target_ulong address
,
357 int rw
, uintptr_t retaddr
)
360 CPUState
*cs
= env_cpu(env
);
362 paddr
= cpu_mips_translate_address(env
, address
, rw
);
365 cpu_loop_exit_restore(cs
, retaddr
);
371 #define HELPER_LD_ATOMIC(name, insn, almask) \
372 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
374 if (arg & almask) { \
375 if (!(env->hflags & MIPS_HFLAG_DM)) { \
376 env->CP0_BadVAddr = arg; \
378 do_raise_exception(env, EXCP_AdEL, GETPC()); \
380 env->CP0_LLAddr = do_translate_address(env, arg, 0, GETPC()); \
382 env->llval = do_##insn(env, arg, mem_idx, GETPC()); \
385 HELPER_LD_ATOMIC(ll
, lw
, 0x3)
387 HELPER_LD_ATOMIC(lld
, ld
, 0x7)
389 #undef HELPER_LD_ATOMIC
392 #ifdef TARGET_WORDS_BIGENDIAN
393 #define GET_LMASK(v) ((v) & 3)
394 #define GET_OFFSET(addr, offset) (addr + (offset))
396 #define GET_LMASK(v) (((v) & 3) ^ 3)
397 #define GET_OFFSET(addr, offset) (addr - (offset))
400 void helper_swl(CPUMIPSState
*env
, target_ulong arg1
, target_ulong arg2
,
403 do_sb(env
, arg2
, (uint8_t)(arg1
>> 24), mem_idx
, GETPC());
405 if (GET_LMASK(arg2
) <= 2) {
406 do_sb(env
, GET_OFFSET(arg2
, 1), (uint8_t)(arg1
>> 16), mem_idx
,
410 if (GET_LMASK(arg2
) <= 1) {
411 do_sb(env
, GET_OFFSET(arg2
, 2), (uint8_t)(arg1
>> 8), mem_idx
,
415 if (GET_LMASK(arg2
) == 0) {
416 do_sb(env
, GET_OFFSET(arg2
, 3), (uint8_t)arg1
, mem_idx
,
421 void helper_swr(CPUMIPSState
*env
, target_ulong arg1
, target_ulong arg2
,
424 do_sb(env
, arg2
, (uint8_t)arg1
, mem_idx
, GETPC());
426 if (GET_LMASK(arg2
) >= 1) {
427 do_sb(env
, GET_OFFSET(arg2
, -1), (uint8_t)(arg1
>> 8), mem_idx
,
431 if (GET_LMASK(arg2
) >= 2) {
432 do_sb(env
, GET_OFFSET(arg2
, -2), (uint8_t)(arg1
>> 16), mem_idx
,
436 if (GET_LMASK(arg2
) == 3) {
437 do_sb(env
, GET_OFFSET(arg2
, -3), (uint8_t)(arg1
>> 24), mem_idx
,
442 #if defined(TARGET_MIPS64)
444 * "half" load and stores. We must do the memory access inline,
445 * or fault handling won't work.
447 #ifdef TARGET_WORDS_BIGENDIAN
448 #define GET_LMASK64(v) ((v) & 7)
450 #define GET_LMASK64(v) (((v) & 7) ^ 7)
453 void helper_sdl(CPUMIPSState
*env
, target_ulong arg1
, target_ulong arg2
,
456 do_sb(env
, arg2
, (uint8_t)(arg1
>> 56), mem_idx
, GETPC());
458 if (GET_LMASK64(arg2
) <= 6) {
459 do_sb(env
, GET_OFFSET(arg2
, 1), (uint8_t)(arg1
>> 48), mem_idx
,
463 if (GET_LMASK64(arg2
) <= 5) {
464 do_sb(env
, GET_OFFSET(arg2
, 2), (uint8_t)(arg1
>> 40), mem_idx
,
468 if (GET_LMASK64(arg2
) <= 4) {
469 do_sb(env
, GET_OFFSET(arg2
, 3), (uint8_t)(arg1
>> 32), mem_idx
,
473 if (GET_LMASK64(arg2
) <= 3) {
474 do_sb(env
, GET_OFFSET(arg2
, 4), (uint8_t)(arg1
>> 24), mem_idx
,
478 if (GET_LMASK64(arg2
) <= 2) {
479 do_sb(env
, GET_OFFSET(arg2
, 5), (uint8_t)(arg1
>> 16), mem_idx
,
483 if (GET_LMASK64(arg2
) <= 1) {
484 do_sb(env
, GET_OFFSET(arg2
, 6), (uint8_t)(arg1
>> 8), mem_idx
,
488 if (GET_LMASK64(arg2
) <= 0) {
489 do_sb(env
, GET_OFFSET(arg2
, 7), (uint8_t)arg1
, mem_idx
,
494 void helper_sdr(CPUMIPSState
*env
, target_ulong arg1
, target_ulong arg2
,
497 do_sb(env
, arg2
, (uint8_t)arg1
, mem_idx
, GETPC());
499 if (GET_LMASK64(arg2
) >= 1) {
500 do_sb(env
, GET_OFFSET(arg2
, -1), (uint8_t)(arg1
>> 8), mem_idx
,
504 if (GET_LMASK64(arg2
) >= 2) {
505 do_sb(env
, GET_OFFSET(arg2
, -2), (uint8_t)(arg1
>> 16), mem_idx
,
509 if (GET_LMASK64(arg2
) >= 3) {
510 do_sb(env
, GET_OFFSET(arg2
, -3), (uint8_t)(arg1
>> 24), mem_idx
,
514 if (GET_LMASK64(arg2
) >= 4) {
515 do_sb(env
, GET_OFFSET(arg2
, -4), (uint8_t)(arg1
>> 32), mem_idx
,
519 if (GET_LMASK64(arg2
) >= 5) {
520 do_sb(env
, GET_OFFSET(arg2
, -5), (uint8_t)(arg1
>> 40), mem_idx
,
524 if (GET_LMASK64(arg2
) >= 6) {
525 do_sb(env
, GET_OFFSET(arg2
, -6), (uint8_t)(arg1
>> 48), mem_idx
,
529 if (GET_LMASK64(arg2
) == 7) {
530 do_sb(env
, GET_OFFSET(arg2
, -7), (uint8_t)(arg1
>> 56), mem_idx
,
534 #endif /* TARGET_MIPS64 */
536 static const int multiple_regs
[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
538 void helper_lwm(CPUMIPSState
*env
, target_ulong addr
, target_ulong reglist
,
541 target_ulong base_reglist
= reglist
& 0xf;
542 target_ulong do_r31
= reglist
& 0x10;
544 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE(multiple_regs
)) {
547 for (i
= 0; i
< base_reglist
; i
++) {
548 env
->active_tc
.gpr
[multiple_regs
[i
]] =
549 (target_long
)do_lw(env
, addr
, mem_idx
, GETPC());
555 env
->active_tc
.gpr
[31] = (target_long
)do_lw(env
, addr
, mem_idx
,
560 void helper_swm(CPUMIPSState
*env
, target_ulong addr
, target_ulong reglist
,
563 target_ulong base_reglist
= reglist
& 0xf;
564 target_ulong do_r31
= reglist
& 0x10;
566 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE(multiple_regs
)) {
569 for (i
= 0; i
< base_reglist
; i
++) {
570 do_sw(env
, addr
, env
->active_tc
.gpr
[multiple_regs
[i
]], mem_idx
,
577 do_sw(env
, addr
, env
->active_tc
.gpr
[31], mem_idx
, GETPC());
581 #if defined(TARGET_MIPS64)
582 void helper_ldm(CPUMIPSState
*env
, target_ulong addr
, target_ulong reglist
,
585 target_ulong base_reglist
= reglist
& 0xf;
586 target_ulong do_r31
= reglist
& 0x10;
588 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE(multiple_regs
)) {
591 for (i
= 0; i
< base_reglist
; i
++) {
592 env
->active_tc
.gpr
[multiple_regs
[i
]] = do_ld(env
, addr
, mem_idx
,
599 env
->active_tc
.gpr
[31] = do_ld(env
, addr
, mem_idx
, GETPC());
603 void helper_sdm(CPUMIPSState
*env
, target_ulong addr
, target_ulong reglist
,
606 target_ulong base_reglist
= reglist
& 0xf;
607 target_ulong do_r31
= reglist
& 0x10;
609 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE(multiple_regs
)) {
612 for (i
= 0; i
< base_reglist
; i
++) {
613 do_sd(env
, addr
, env
->active_tc
.gpr
[multiple_regs
[i
]], mem_idx
,
620 do_sd(env
, addr
, env
->active_tc
.gpr
[31], mem_idx
, GETPC());
625 #ifndef CONFIG_USER_ONLY
627 static bool mips_vpe_is_wfi(MIPSCPU
*c
)
629 CPUState
*cpu
= CPU(c
);
630 CPUMIPSState
*env
= &c
->env
;
633 * If the VPE is halted but otherwise active, it means it's waiting for
636 return cpu
->halted
&& mips_vpe_active(env
);
639 static bool mips_vp_is_wfi(MIPSCPU
*c
)
641 CPUState
*cpu
= CPU(c
);
642 CPUMIPSState
*env
= &c
->env
;
644 return cpu
->halted
&& mips_vp_active(env
);
647 static inline void mips_vpe_wake(MIPSCPU
*c
)
650 * Don't set ->halted = 0 directly, let it be done via cpu_has_work
651 * because there might be other conditions that state that c should
654 qemu_mutex_lock_iothread();
655 cpu_interrupt(CPU(c
), CPU_INTERRUPT_WAKE
);
656 qemu_mutex_unlock_iothread();
659 static inline void mips_vpe_sleep(MIPSCPU
*cpu
)
661 CPUState
*cs
= CPU(cpu
);
664 * The VPE was shut off, really go to bed.
665 * Reset any old _WAKE requests.
668 cpu_reset_interrupt(cs
, CPU_INTERRUPT_WAKE
);
671 static inline void mips_tc_wake(MIPSCPU
*cpu
, int tc
)
673 CPUMIPSState
*c
= &cpu
->env
;
675 /* FIXME: TC reschedule. */
676 if (mips_vpe_active(c
) && !mips_vpe_is_wfi(cpu
)) {
681 static inline void mips_tc_sleep(MIPSCPU
*cpu
, int tc
)
683 CPUMIPSState
*c
= &cpu
->env
;
685 /* FIXME: TC reschedule. */
686 if (!mips_vpe_active(c
)) {
693 * @env: CPU from which mapping is performed.
694 * @tc: Should point to an int with the value of the global TC index.
696 * This function will transform @tc into a local index within the
697 * returned #CPUMIPSState.
701 * FIXME: This code assumes that all VPEs have the same number of TCs,
702 * which depends on runtime setup. Can probably be fixed by
703 * walking the list of CPUMIPSStates.
705 static CPUMIPSState
*mips_cpu_map_tc(CPUMIPSState
*env
, int *tc
)
713 if (!(env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
))) {
714 /* Not allowed to address other CPUs. */
715 *tc
= env
->current_tc
;
720 vpe_idx
= tc_idx
/ cs
->nr_threads
;
721 *tc
= tc_idx
% cs
->nr_threads
;
722 other_cs
= qemu_get_cpu(vpe_idx
);
723 if (other_cs
== NULL
) {
726 cpu
= MIPS_CPU(other_cs
);
731 * The per VPE CP0_Status register shares some fields with the per TC
732 * CP0_TCStatus registers. These fields are wired to the same registers,
733 * so changes to either of them should be reflected on both registers.
735 * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
737 * These helper call synchronizes the regs for a given cpu.
741 * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c.
742 * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
746 /* Called for updates to CP0_TCStatus. */
747 static void sync_c0_tcstatus(CPUMIPSState
*cpu
, int tc
,
751 uint32_t tcu
, tmx
, tasid
, tksu
;
752 uint32_t mask
= ((1U << CP0St_CU3
)
759 tcu
= (v
>> CP0TCSt_TCU0
) & 0xf;
760 tmx
= (v
>> CP0TCSt_TMX
) & 0x1;
761 tasid
= v
& cpu
->CP0_EntryHi_ASID_mask
;
762 tksu
= (v
>> CP0TCSt_TKSU
) & 0x3;
764 status
= tcu
<< CP0St_CU0
;
765 status
|= tmx
<< CP0St_MX
;
766 status
|= tksu
<< CP0St_KSU
;
768 cpu
->CP0_Status
&= ~mask
;
769 cpu
->CP0_Status
|= status
;
771 /* Sync the TASID with EntryHi. */
772 cpu
->CP0_EntryHi
&= ~cpu
->CP0_EntryHi_ASID_mask
;
773 cpu
->CP0_EntryHi
|= tasid
;
778 /* Called for updates to CP0_EntryHi. */
779 static void sync_c0_entryhi(CPUMIPSState
*cpu
, int tc
)
782 uint32_t asid
, v
= cpu
->CP0_EntryHi
;
784 asid
= v
& cpu
->CP0_EntryHi_ASID_mask
;
786 if (tc
== cpu
->current_tc
) {
787 tcst
= &cpu
->active_tc
.CP0_TCStatus
;
789 tcst
= &cpu
->tcs
[tc
].CP0_TCStatus
;
792 *tcst
&= ~cpu
->CP0_EntryHi_ASID_mask
;
797 target_ulong
helper_mfc0_mvpcontrol(CPUMIPSState
*env
)
799 return env
->mvp
->CP0_MVPControl
;
802 target_ulong
helper_mfc0_mvpconf0(CPUMIPSState
*env
)
804 return env
->mvp
->CP0_MVPConf0
;
807 target_ulong
helper_mfc0_mvpconf1(CPUMIPSState
*env
)
809 return env
->mvp
->CP0_MVPConf1
;
812 target_ulong
helper_mfc0_random(CPUMIPSState
*env
)
814 return (int32_t)cpu_mips_get_random(env
);
817 target_ulong
helper_mfc0_tcstatus(CPUMIPSState
*env
)
819 return env
->active_tc
.CP0_TCStatus
;
822 target_ulong
helper_mftc0_tcstatus(CPUMIPSState
*env
)
824 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
825 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
827 if (other_tc
== other
->current_tc
) {
828 return other
->active_tc
.CP0_TCStatus
;
830 return other
->tcs
[other_tc
].CP0_TCStatus
;
834 target_ulong
helper_mfc0_tcbind(CPUMIPSState
*env
)
836 return env
->active_tc
.CP0_TCBind
;
839 target_ulong
helper_mftc0_tcbind(CPUMIPSState
*env
)
841 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
842 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
844 if (other_tc
== other
->current_tc
) {
845 return other
->active_tc
.CP0_TCBind
;
847 return other
->tcs
[other_tc
].CP0_TCBind
;
851 target_ulong
helper_mfc0_tcrestart(CPUMIPSState
*env
)
853 return env
->active_tc
.PC
;
856 target_ulong
helper_mftc0_tcrestart(CPUMIPSState
*env
)
858 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
859 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
861 if (other_tc
== other
->current_tc
) {
862 return other
->active_tc
.PC
;
864 return other
->tcs
[other_tc
].PC
;
868 target_ulong
helper_mfc0_tchalt(CPUMIPSState
*env
)
870 return env
->active_tc
.CP0_TCHalt
;
873 target_ulong
helper_mftc0_tchalt(CPUMIPSState
*env
)
875 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
876 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
878 if (other_tc
== other
->current_tc
) {
879 return other
->active_tc
.CP0_TCHalt
;
881 return other
->tcs
[other_tc
].CP0_TCHalt
;
885 target_ulong
helper_mfc0_tccontext(CPUMIPSState
*env
)
887 return env
->active_tc
.CP0_TCContext
;
890 target_ulong
helper_mftc0_tccontext(CPUMIPSState
*env
)
892 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
893 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
895 if (other_tc
== other
->current_tc
) {
896 return other
->active_tc
.CP0_TCContext
;
898 return other
->tcs
[other_tc
].CP0_TCContext
;
902 target_ulong
helper_mfc0_tcschedule(CPUMIPSState
*env
)
904 return env
->active_tc
.CP0_TCSchedule
;
907 target_ulong
helper_mftc0_tcschedule(CPUMIPSState
*env
)
909 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
910 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
912 if (other_tc
== other
->current_tc
) {
913 return other
->active_tc
.CP0_TCSchedule
;
915 return other
->tcs
[other_tc
].CP0_TCSchedule
;
919 target_ulong
helper_mfc0_tcschefback(CPUMIPSState
*env
)
921 return env
->active_tc
.CP0_TCScheFBack
;
924 target_ulong
helper_mftc0_tcschefback(CPUMIPSState
*env
)
926 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
927 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
929 if (other_tc
== other
->current_tc
) {
930 return other
->active_tc
.CP0_TCScheFBack
;
932 return other
->tcs
[other_tc
].CP0_TCScheFBack
;
936 target_ulong
helper_mfc0_count(CPUMIPSState
*env
)
938 return (int32_t)cpu_mips_get_count(env
);
941 target_ulong
helper_mfc0_saar(CPUMIPSState
*env
)
943 if ((env
->CP0_SAARI
& 0x3f) < 2) {
944 return (int32_t) env
->CP0_SAAR
[env
->CP0_SAARI
& 0x3f];
949 target_ulong
helper_mfhc0_saar(CPUMIPSState
*env
)
951 if ((env
->CP0_SAARI
& 0x3f) < 2) {
952 return env
->CP0_SAAR
[env
->CP0_SAARI
& 0x3f] >> 32;
957 target_ulong
helper_mftc0_entryhi(CPUMIPSState
*env
)
959 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
960 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
962 return other
->CP0_EntryHi
;
965 target_ulong
helper_mftc0_cause(CPUMIPSState
*env
)
967 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
969 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
971 if (other_tc
== other
->current_tc
) {
972 tccause
= other
->CP0_Cause
;
974 tccause
= other
->CP0_Cause
;
980 target_ulong
helper_mftc0_status(CPUMIPSState
*env
)
982 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
983 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
985 return other
->CP0_Status
;
988 target_ulong
helper_mfc0_lladdr(CPUMIPSState
*env
)
990 return (int32_t)(env
->CP0_LLAddr
>> env
->CP0_LLAddr_shift
);
993 target_ulong
helper_mfc0_maar(CPUMIPSState
*env
)
995 return (int32_t) env
->CP0_MAAR
[env
->CP0_MAARI
];
998 target_ulong
helper_mfhc0_maar(CPUMIPSState
*env
)
1000 return env
->CP0_MAAR
[env
->CP0_MAARI
] >> 32;
1003 target_ulong
helper_mfc0_watchlo(CPUMIPSState
*env
, uint32_t sel
)
1005 return (int32_t)env
->CP0_WatchLo
[sel
];
1008 target_ulong
helper_mfc0_watchhi(CPUMIPSState
*env
, uint32_t sel
)
1010 return env
->CP0_WatchHi
[sel
];
1013 target_ulong
helper_mfc0_debug(CPUMIPSState
*env
)
1015 target_ulong t0
= env
->CP0_Debug
;
1016 if (env
->hflags
& MIPS_HFLAG_DM
) {
1017 t0
|= 1 << CP0DB_DM
;
1023 target_ulong
helper_mftc0_debug(CPUMIPSState
*env
)
1025 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1027 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1029 if (other_tc
== other
->current_tc
) {
1030 tcstatus
= other
->active_tc
.CP0_Debug_tcstatus
;
1032 tcstatus
= other
->tcs
[other_tc
].CP0_Debug_tcstatus
;
1035 /* XXX: Might be wrong, check with EJTAG spec. */
1036 return (other
->CP0_Debug
& ~((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
))) |
1037 (tcstatus
& ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
)));
1040 #if defined(TARGET_MIPS64)
1041 target_ulong
helper_dmfc0_tcrestart(CPUMIPSState
*env
)
1043 return env
->active_tc
.PC
;
1046 target_ulong
helper_dmfc0_tchalt(CPUMIPSState
*env
)
1048 return env
->active_tc
.CP0_TCHalt
;
1051 target_ulong
helper_dmfc0_tccontext(CPUMIPSState
*env
)
1053 return env
->active_tc
.CP0_TCContext
;
1056 target_ulong
helper_dmfc0_tcschedule(CPUMIPSState
*env
)
1058 return env
->active_tc
.CP0_TCSchedule
;
1061 target_ulong
helper_dmfc0_tcschefback(CPUMIPSState
*env
)
1063 return env
->active_tc
.CP0_TCScheFBack
;
1066 target_ulong
helper_dmfc0_lladdr(CPUMIPSState
*env
)
1068 return env
->CP0_LLAddr
>> env
->CP0_LLAddr_shift
;
1071 target_ulong
helper_dmfc0_maar(CPUMIPSState
*env
)
1073 return env
->CP0_MAAR
[env
->CP0_MAARI
];
1076 target_ulong
helper_dmfc0_watchlo(CPUMIPSState
*env
, uint32_t sel
)
1078 return env
->CP0_WatchLo
[sel
];
1081 target_ulong
helper_dmfc0_saar(CPUMIPSState
*env
)
1083 if ((env
->CP0_SAARI
& 0x3f) < 2) {
1084 return env
->CP0_SAAR
[env
->CP0_SAARI
& 0x3f];
1088 #endif /* TARGET_MIPS64 */
1090 void helper_mtc0_index(CPUMIPSState
*env
, target_ulong arg1
)
1092 uint32_t index_p
= env
->CP0_Index
& 0x80000000;
1093 uint32_t tlb_index
= arg1
& 0x7fffffff;
1094 if (tlb_index
< env
->tlb
->nb_tlb
) {
1095 if (env
->insn_flags
& ISA_MIPS32R6
) {
1096 index_p
|= arg1
& 0x80000000;
1098 env
->CP0_Index
= index_p
| tlb_index
;
1102 void helper_mtc0_mvpcontrol(CPUMIPSState
*env
, target_ulong arg1
)
1107 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
)) {
1108 mask
|= (1 << CP0MVPCo_CPA
) | (1 << CP0MVPCo_VPC
) |
1109 (1 << CP0MVPCo_EVP
);
1111 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
)) {
1112 mask
|= (1 << CP0MVPCo_STLB
);
1114 newval
= (env
->mvp
->CP0_MVPControl
& ~mask
) | (arg1
& mask
);
1116 /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
1118 env
->mvp
->CP0_MVPControl
= newval
;
1121 void helper_mtc0_vpecontrol(CPUMIPSState
*env
, target_ulong arg1
)
1126 mask
= (1 << CP0VPECo_YSI
) | (1 << CP0VPECo_GSI
) |
1127 (1 << CP0VPECo_TE
) | (0xff << CP0VPECo_TargTC
);
1128 newval
= (env
->CP0_VPEControl
& ~mask
) | (arg1
& mask
);
1131 * Yield scheduler intercept not implemented.
1132 * Gating storage scheduler intercept not implemented.
1135 /* TODO: Enable/disable TCs. */
1137 env
->CP0_VPEControl
= newval
;
1140 void helper_mttc0_vpecontrol(CPUMIPSState
*env
, target_ulong arg1
)
1142 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1143 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1147 mask
= (1 << CP0VPECo_YSI
) | (1 << CP0VPECo_GSI
) |
1148 (1 << CP0VPECo_TE
) | (0xff << CP0VPECo_TargTC
);
1149 newval
= (other
->CP0_VPEControl
& ~mask
) | (arg1
& mask
);
1151 /* TODO: Enable/disable TCs. */
1153 other
->CP0_VPEControl
= newval
;
1156 target_ulong
helper_mftc0_vpecontrol(CPUMIPSState
*env
)
1158 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1159 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1160 /* FIXME: Mask away return zero on read bits. */
1161 return other
->CP0_VPEControl
;
1164 target_ulong
helper_mftc0_vpeconf0(CPUMIPSState
*env
)
1166 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1167 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1169 return other
->CP0_VPEConf0
;
1172 void helper_mtc0_vpeconf0(CPUMIPSState
*env
, target_ulong arg1
)
1177 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
)) {
1178 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_VPA
)) {
1179 mask
|= (0xff << CP0VPEC0_XTC
);
1181 mask
|= (1 << CP0VPEC0_MVP
) | (1 << CP0VPEC0_VPA
);
1183 newval
= (env
->CP0_VPEConf0
& ~mask
) | (arg1
& mask
);
1185 /* TODO: TC exclusive handling due to ERL/EXL. */
1187 env
->CP0_VPEConf0
= newval
;
1190 void helper_mttc0_vpeconf0(CPUMIPSState
*env
, target_ulong arg1
)
1192 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1193 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1197 mask
|= (1 << CP0VPEC0_MVP
) | (1 << CP0VPEC0_VPA
);
1198 newval
= (other
->CP0_VPEConf0
& ~mask
) | (arg1
& mask
);
1200 /* TODO: TC exclusive handling due to ERL/EXL. */
1201 other
->CP0_VPEConf0
= newval
;
1204 void helper_mtc0_vpeconf1(CPUMIPSState
*env
, target_ulong arg1
)
1209 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1210 mask
|= (0xff << CP0VPEC1_NCX
) | (0xff << CP0VPEC1_NCP2
) |
1211 (0xff << CP0VPEC1_NCP1
);
1212 newval
= (env
->CP0_VPEConf1
& ~mask
) | (arg1
& mask
);
1214 /* UDI not implemented. */
1215 /* CP2 not implemented. */
1217 /* TODO: Handle FPU (CP1) binding. */
1219 env
->CP0_VPEConf1
= newval
;
1222 void helper_mtc0_yqmask(CPUMIPSState
*env
, target_ulong arg1
)
1224 /* Yield qualifier inputs not implemented. */
1225 env
->CP0_YQMask
= 0x00000000;
1228 void helper_mtc0_vpeopt(CPUMIPSState
*env
, target_ulong arg1
)
1230 env
->CP0_VPEOpt
= arg1
& 0x0000ffff;
1233 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
1235 void helper_mtc0_entrylo0(CPUMIPSState
*env
, target_ulong arg1
)
1237 /* 1k pages not implemented */
1238 target_ulong rxi
= arg1
& (env
->CP0_PageGrain
& (3u << CP0PG_XIE
));
1239 env
->CP0_EntryLo0
= (arg1
& MTC0_ENTRYLO_MASK(env
))
1240 | (rxi
<< (CP0EnLo_XI
- 30));
1243 #if defined(TARGET_MIPS64)
1244 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
1246 void helper_dmtc0_entrylo0(CPUMIPSState
*env
, uint64_t arg1
)
1248 uint64_t rxi
= arg1
& ((env
->CP0_PageGrain
& (3ull << CP0PG_XIE
)) << 32);
1249 env
->CP0_EntryLo0
= (arg1
& DMTC0_ENTRYLO_MASK(env
)) | rxi
;
1253 void helper_mtc0_tcstatus(CPUMIPSState
*env
, target_ulong arg1
)
1255 uint32_t mask
= env
->CP0_TCStatus_rw_bitmask
;
1258 newval
= (env
->active_tc
.CP0_TCStatus
& ~mask
) | (arg1
& mask
);
1260 env
->active_tc
.CP0_TCStatus
= newval
;
1261 sync_c0_tcstatus(env
, env
->current_tc
, newval
);
1264 void helper_mttc0_tcstatus(CPUMIPSState
*env
, target_ulong arg1
)
1266 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1267 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1269 if (other_tc
== other
->current_tc
) {
1270 other
->active_tc
.CP0_TCStatus
= arg1
;
1272 other
->tcs
[other_tc
].CP0_TCStatus
= arg1
;
1274 sync_c0_tcstatus(other
, other_tc
, arg1
);
1277 void helper_mtc0_tcbind(CPUMIPSState
*env
, target_ulong arg1
)
1279 uint32_t mask
= (1 << CP0TCBd_TBE
);
1282 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
)) {
1283 mask
|= (1 << CP0TCBd_CurVPE
);
1285 newval
= (env
->active_tc
.CP0_TCBind
& ~mask
) | (arg1
& mask
);
1286 env
->active_tc
.CP0_TCBind
= newval
;
1289 void helper_mttc0_tcbind(CPUMIPSState
*env
, target_ulong arg1
)
1291 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1292 uint32_t mask
= (1 << CP0TCBd_TBE
);
1294 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1296 if (other
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
)) {
1297 mask
|= (1 << CP0TCBd_CurVPE
);
1299 if (other_tc
== other
->current_tc
) {
1300 newval
= (other
->active_tc
.CP0_TCBind
& ~mask
) | (arg1
& mask
);
1301 other
->active_tc
.CP0_TCBind
= newval
;
1303 newval
= (other
->tcs
[other_tc
].CP0_TCBind
& ~mask
) | (arg1
& mask
);
1304 other
->tcs
[other_tc
].CP0_TCBind
= newval
;
1308 void helper_mtc0_tcrestart(CPUMIPSState
*env
, target_ulong arg1
)
1310 env
->active_tc
.PC
= arg1
;
1311 env
->active_tc
.CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1312 env
->CP0_LLAddr
= 0;
1314 /* MIPS16 not implemented. */
1317 void helper_mttc0_tcrestart(CPUMIPSState
*env
, target_ulong arg1
)
1319 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1320 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1322 if (other_tc
== other
->current_tc
) {
1323 other
->active_tc
.PC
= arg1
;
1324 other
->active_tc
.CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1325 other
->CP0_LLAddr
= 0;
1327 /* MIPS16 not implemented. */
1329 other
->tcs
[other_tc
].PC
= arg1
;
1330 other
->tcs
[other_tc
].CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1331 other
->CP0_LLAddr
= 0;
1333 /* MIPS16 not implemented. */
1337 void helper_mtc0_tchalt(CPUMIPSState
*env
, target_ulong arg1
)
1339 MIPSCPU
*cpu
= env_archcpu(env
);
1341 env
->active_tc
.CP0_TCHalt
= arg1
& 0x1;
1343 /* TODO: Halt TC / Restart (if allocated+active) TC. */
1344 if (env
->active_tc
.CP0_TCHalt
& 1) {
1345 mips_tc_sleep(cpu
, env
->current_tc
);
1347 mips_tc_wake(cpu
, env
->current_tc
);
1351 void helper_mttc0_tchalt(CPUMIPSState
*env
, target_ulong arg1
)
1353 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1354 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1355 MIPSCPU
*other_cpu
= env_archcpu(other
);
1357 /* TODO: Halt TC / Restart (if allocated+active) TC. */
1359 if (other_tc
== other
->current_tc
) {
1360 other
->active_tc
.CP0_TCHalt
= arg1
;
1362 other
->tcs
[other_tc
].CP0_TCHalt
= arg1
;
1366 mips_tc_sleep(other_cpu
, other_tc
);
1368 mips_tc_wake(other_cpu
, other_tc
);
1372 void helper_mtc0_tccontext(CPUMIPSState
*env
, target_ulong arg1
)
1374 env
->active_tc
.CP0_TCContext
= arg1
;
1377 void helper_mttc0_tccontext(CPUMIPSState
*env
, target_ulong arg1
)
1379 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1380 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1382 if (other_tc
== other
->current_tc
) {
1383 other
->active_tc
.CP0_TCContext
= arg1
;
1385 other
->tcs
[other_tc
].CP0_TCContext
= arg1
;
1389 void helper_mtc0_tcschedule(CPUMIPSState
*env
, target_ulong arg1
)
1391 env
->active_tc
.CP0_TCSchedule
= arg1
;
1394 void helper_mttc0_tcschedule(CPUMIPSState
*env
, target_ulong arg1
)
1396 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1397 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1399 if (other_tc
== other
->current_tc
) {
1400 other
->active_tc
.CP0_TCSchedule
= arg1
;
1402 other
->tcs
[other_tc
].CP0_TCSchedule
= arg1
;
1406 void helper_mtc0_tcschefback(CPUMIPSState
*env
, target_ulong arg1
)
1408 env
->active_tc
.CP0_TCScheFBack
= arg1
;
1411 void helper_mttc0_tcschefback(CPUMIPSState
*env
, target_ulong arg1
)
1413 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1414 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1416 if (other_tc
== other
->current_tc
) {
1417 other
->active_tc
.CP0_TCScheFBack
= arg1
;
1419 other
->tcs
[other_tc
].CP0_TCScheFBack
= arg1
;
1423 void helper_mtc0_entrylo1(CPUMIPSState
*env
, target_ulong arg1
)
1425 /* 1k pages not implemented */
1426 target_ulong rxi
= arg1
& (env
->CP0_PageGrain
& (3u << CP0PG_XIE
));
1427 env
->CP0_EntryLo1
= (arg1
& MTC0_ENTRYLO_MASK(env
))
1428 | (rxi
<< (CP0EnLo_XI
- 30));
1431 #if defined(TARGET_MIPS64)
1432 void helper_dmtc0_entrylo1(CPUMIPSState
*env
, uint64_t arg1
)
1434 uint64_t rxi
= arg1
& ((env
->CP0_PageGrain
& (3ull << CP0PG_XIE
)) << 32);
1435 env
->CP0_EntryLo1
= (arg1
& DMTC0_ENTRYLO_MASK(env
)) | rxi
;
1439 void helper_mtc0_context(CPUMIPSState
*env
, target_ulong arg1
)
1441 env
->CP0_Context
= (env
->CP0_Context
& 0x007FFFFF) | (arg1
& ~0x007FFFFF);
1444 void update_pagemask(CPUMIPSState
*env
, target_ulong arg1
, int32_t *pagemask
)
1446 uint64_t mask
= arg1
>> (TARGET_PAGE_BITS
+ 1);
1447 if (!(env
->insn_flags
& ISA_MIPS32R6
) || (arg1
== ~0) ||
1448 (mask
== 0x0000 || mask
== 0x0003 || mask
== 0x000F ||
1449 mask
== 0x003F || mask
== 0x00FF || mask
== 0x03FF ||
1450 mask
== 0x0FFF || mask
== 0x3FFF || mask
== 0xFFFF)) {
1451 env
->CP0_PageMask
= arg1
& (0x1FFFFFFF & (TARGET_PAGE_MASK
<< 1));
1455 void helper_mtc0_pagemask(CPUMIPSState
*env
, target_ulong arg1
)
1457 update_pagemask(env
, arg1
, &env
->CP0_PageMask
);
1460 void helper_mtc0_pagegrain(CPUMIPSState
*env
, target_ulong arg1
)
1462 /* SmartMIPS not implemented */
1463 /* 1k pages not implemented */
1464 env
->CP0_PageGrain
= (arg1
& env
->CP0_PageGrain_rw_bitmask
) |
1465 (env
->CP0_PageGrain
& ~env
->CP0_PageGrain_rw_bitmask
);
1466 compute_hflags(env
);
1467 restore_pamask(env
);
1470 void helper_mtc0_segctl0(CPUMIPSState
*env
, target_ulong arg1
)
1472 CPUState
*cs
= env_cpu(env
);
1474 env
->CP0_SegCtl0
= arg1
& CP0SC0_MASK
;
1478 void helper_mtc0_segctl1(CPUMIPSState
*env
, target_ulong arg1
)
1480 CPUState
*cs
= env_cpu(env
);
1482 env
->CP0_SegCtl1
= arg1
& CP0SC1_MASK
;
1486 void helper_mtc0_segctl2(CPUMIPSState
*env
, target_ulong arg1
)
1488 CPUState
*cs
= env_cpu(env
);
1490 env
->CP0_SegCtl2
= arg1
& CP0SC2_MASK
;
1494 void helper_mtc0_pwfield(CPUMIPSState
*env
, target_ulong arg1
)
1496 #if defined(TARGET_MIPS64)
1497 uint64_t mask
= 0x3F3FFFFFFFULL
;
1498 uint32_t old_ptei
= (env
->CP0_PWField
>> CP0PF_PTEI
) & 0x3FULL
;
1499 uint32_t new_ptei
= (arg1
>> CP0PF_PTEI
) & 0x3FULL
;
1501 if ((env
->insn_flags
& ISA_MIPS32R6
)) {
1502 if (((arg1
>> CP0PF_BDI
) & 0x3FULL
) < 12) {
1503 mask
&= ~(0x3FULL
<< CP0PF_BDI
);
1505 if (((arg1
>> CP0PF_GDI
) & 0x3FULL
) < 12) {
1506 mask
&= ~(0x3FULL
<< CP0PF_GDI
);
1508 if (((arg1
>> CP0PF_UDI
) & 0x3FULL
) < 12) {
1509 mask
&= ~(0x3FULL
<< CP0PF_UDI
);
1511 if (((arg1
>> CP0PF_MDI
) & 0x3FULL
) < 12) {
1512 mask
&= ~(0x3FULL
<< CP0PF_MDI
);
1514 if (((arg1
>> CP0PF_PTI
) & 0x3FULL
) < 12) {
1515 mask
&= ~(0x3FULL
<< CP0PF_PTI
);
1518 env
->CP0_PWField
= arg1
& mask
;
1520 if ((new_ptei
>= 32) ||
1521 ((env
->insn_flags
& ISA_MIPS32R6
) &&
1522 (new_ptei
== 0 || new_ptei
== 1))) {
1523 env
->CP0_PWField
= (env
->CP0_PWField
& ~0x3FULL
) |
1524 (old_ptei
<< CP0PF_PTEI
);
1527 uint32_t mask
= 0x3FFFFFFF;
1528 uint32_t old_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
1529 uint32_t new_ptew
= (arg1
>> CP0PF_PTEW
) & 0x3F;
1531 if ((env
->insn_flags
& ISA_MIPS32R6
)) {
1532 if (((arg1
>> CP0PF_GDW
) & 0x3F) < 12) {
1533 mask
&= ~(0x3F << CP0PF_GDW
);
1535 if (((arg1
>> CP0PF_UDW
) & 0x3F) < 12) {
1536 mask
&= ~(0x3F << CP0PF_UDW
);
1538 if (((arg1
>> CP0PF_MDW
) & 0x3F) < 12) {
1539 mask
&= ~(0x3F << CP0PF_MDW
);
1541 if (((arg1
>> CP0PF_PTW
) & 0x3F) < 12) {
1542 mask
&= ~(0x3F << CP0PF_PTW
);
1545 env
->CP0_PWField
= arg1
& mask
;
1547 if ((new_ptew
>= 32) ||
1548 ((env
->insn_flags
& ISA_MIPS32R6
) &&
1549 (new_ptew
== 0 || new_ptew
== 1))) {
1550 env
->CP0_PWField
= (env
->CP0_PWField
& ~0x3F) |
1551 (old_ptew
<< CP0PF_PTEW
);
1556 void helper_mtc0_pwsize(CPUMIPSState
*env
, target_ulong arg1
)
1558 #if defined(TARGET_MIPS64)
1559 env
->CP0_PWSize
= arg1
& 0x3F7FFFFFFFULL
;
1561 env
->CP0_PWSize
= arg1
& 0x3FFFFFFF;
1565 void helper_mtc0_wired(CPUMIPSState
*env
, target_ulong arg1
)
1567 if (env
->insn_flags
& ISA_MIPS32R6
) {
1568 if (arg1
< env
->tlb
->nb_tlb
) {
1569 env
->CP0_Wired
= arg1
;
1572 env
->CP0_Wired
= arg1
% env
->tlb
->nb_tlb
;
1576 void helper_mtc0_pwctl(CPUMIPSState
*env
, target_ulong arg1
)
1578 #if defined(TARGET_MIPS64)
1579 /* PWEn = 0. Hardware page table walking is not implemented. */
1580 env
->CP0_PWCtl
= (env
->CP0_PWCtl
& 0x000000C0) | (arg1
& 0x5C00003F);
1582 env
->CP0_PWCtl
= (arg1
& 0x800000FF);
1586 void helper_mtc0_srsconf0(CPUMIPSState
*env
, target_ulong arg1
)
1588 env
->CP0_SRSConf0
|= arg1
& env
->CP0_SRSConf0_rw_bitmask
;
1591 void helper_mtc0_srsconf1(CPUMIPSState
*env
, target_ulong arg1
)
1593 env
->CP0_SRSConf1
|= arg1
& env
->CP0_SRSConf1_rw_bitmask
;
1596 void helper_mtc0_srsconf2(CPUMIPSState
*env
, target_ulong arg1
)
1598 env
->CP0_SRSConf2
|= arg1
& env
->CP0_SRSConf2_rw_bitmask
;
1601 void helper_mtc0_srsconf3(CPUMIPSState
*env
, target_ulong arg1
)
1603 env
->CP0_SRSConf3
|= arg1
& env
->CP0_SRSConf3_rw_bitmask
;
1606 void helper_mtc0_srsconf4(CPUMIPSState
*env
, target_ulong arg1
)
1608 env
->CP0_SRSConf4
|= arg1
& env
->CP0_SRSConf4_rw_bitmask
;
1611 void helper_mtc0_hwrena(CPUMIPSState
*env
, target_ulong arg1
)
1613 uint32_t mask
= 0x0000000F;
1615 if ((env
->CP0_Config1
& (1 << CP0C1_PC
)) &&
1616 (env
->insn_flags
& ISA_MIPS32R6
)) {
1619 if (env
->insn_flags
& ISA_MIPS32R6
) {
1622 if (env
->CP0_Config3
& (1 << CP0C3_ULRI
)) {
1625 if (arg1
& (1 << 29)) {
1626 env
->hflags
|= MIPS_HFLAG_HWRENA_ULR
;
1628 env
->hflags
&= ~MIPS_HFLAG_HWRENA_ULR
;
1632 env
->CP0_HWREna
= arg1
& mask
;
1635 void helper_mtc0_count(CPUMIPSState
*env
, target_ulong arg1
)
1637 cpu_mips_store_count(env
, arg1
);
1640 void helper_mtc0_saari(CPUMIPSState
*env
, target_ulong arg1
)
1642 uint32_t target
= arg1
& 0x3f;
1644 env
->CP0_SAARI
= target
;
1648 void helper_mtc0_saar(CPUMIPSState
*env
, target_ulong arg1
)
1650 uint32_t target
= env
->CP0_SAARI
& 0x3f;
1652 env
->CP0_SAAR
[target
] = arg1
& 0x00000ffffffff03fULL
;
1656 itc_reconfigure(env
->itu
);
1663 void helper_mthc0_saar(CPUMIPSState
*env
, target_ulong arg1
)
1665 uint32_t target
= env
->CP0_SAARI
& 0x3f;
1667 env
->CP0_SAAR
[target
] =
1668 (((uint64_t) arg1
<< 32) & 0x00000fff00000000ULL
) |
1669 (env
->CP0_SAAR
[target
] & 0x00000000ffffffffULL
);
1673 itc_reconfigure(env
->itu
);
1680 void helper_mtc0_entryhi(CPUMIPSState
*env
, target_ulong arg1
)
1682 target_ulong old
, val
, mask
;
1683 mask
= (TARGET_PAGE_MASK
<< 1) | env
->CP0_EntryHi_ASID_mask
;
1684 if (((env
->CP0_Config4
>> CP0C4_IE
) & 0x3) >= 2) {
1685 mask
|= 1 << CP0EnHi_EHINV
;
1688 /* 1k pages not implemented */
1689 #if defined(TARGET_MIPS64)
1690 if (env
->insn_flags
& ISA_MIPS32R6
) {
1691 int entryhi_r
= extract64(arg1
, 62, 2);
1692 int config0_at
= extract32(env
->CP0_Config0
, 13, 2);
1693 bool no_supervisor
= (env
->CP0_Status_rw_bitmask
& 0x8) == 0;
1694 if ((entryhi_r
== 2) ||
1695 (entryhi_r
== 1 && (no_supervisor
|| config0_at
== 1))) {
1696 /* skip EntryHi.R field if new value is reserved */
1697 mask
&= ~(0x3ull
<< 62);
1700 mask
&= env
->SEGMask
;
1702 old
= env
->CP0_EntryHi
;
1703 val
= (arg1
& mask
) | (old
& ~mask
);
1704 env
->CP0_EntryHi
= val
;
1705 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
1706 sync_c0_entryhi(env
, env
->current_tc
);
1708 /* If the ASID changes, flush qemu's TLB. */
1709 if ((old
& env
->CP0_EntryHi_ASID_mask
) !=
1710 (val
& env
->CP0_EntryHi_ASID_mask
)) {
1711 tlb_flush(env_cpu(env
));
1715 void helper_mttc0_entryhi(CPUMIPSState
*env
, target_ulong arg1
)
1717 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1718 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1720 other
->CP0_EntryHi
= arg1
;
1721 sync_c0_entryhi(other
, other_tc
);
1724 void helper_mtc0_compare(CPUMIPSState
*env
, target_ulong arg1
)
1726 cpu_mips_store_compare(env
, arg1
);
1729 void helper_mtc0_status(CPUMIPSState
*env
, target_ulong arg1
)
1733 old
= env
->CP0_Status
;
1734 cpu_mips_store_status(env
, arg1
);
1735 val
= env
->CP0_Status
;
1737 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
1738 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1739 old
, old
& env
->CP0_Cause
& CP0Ca_IP_mask
,
1740 val
, val
& env
->CP0_Cause
& CP0Ca_IP_mask
,
1742 switch (cpu_mmu_index(env
, false)) {
1744 qemu_log(", ERL\n");
1756 cpu_abort(env_cpu(env
), "Invalid MMU mode!\n");
1762 void helper_mttc0_status(CPUMIPSState
*env
, target_ulong arg1
)
1764 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1765 uint32_t mask
= env
->CP0_Status_rw_bitmask
& ~0xf1000018;
1766 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1768 other
->CP0_Status
= (other
->CP0_Status
& ~mask
) | (arg1
& mask
);
1769 sync_c0_status(env
, other
, other_tc
);
1772 void helper_mtc0_intctl(CPUMIPSState
*env
, target_ulong arg1
)
1774 env
->CP0_IntCtl
= (env
->CP0_IntCtl
& ~0x000003e0) | (arg1
& 0x000003e0);
1777 void helper_mtc0_srsctl(CPUMIPSState
*env
, target_ulong arg1
)
1779 uint32_t mask
= (0xf << CP0SRSCtl_ESS
) | (0xf << CP0SRSCtl_PSS
);
1780 env
->CP0_SRSCtl
= (env
->CP0_SRSCtl
& ~mask
) | (arg1
& mask
);
1783 void helper_mtc0_cause(CPUMIPSState
*env
, target_ulong arg1
)
1785 cpu_mips_store_cause(env
, arg1
);
1788 void helper_mttc0_cause(CPUMIPSState
*env
, target_ulong arg1
)
1790 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1791 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1793 cpu_mips_store_cause(other
, arg1
);
1796 target_ulong
helper_mftc0_epc(CPUMIPSState
*env
)
1798 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1799 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1801 return other
->CP0_EPC
;
1804 target_ulong
helper_mftc0_ebase(CPUMIPSState
*env
)
1806 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1807 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1809 return other
->CP0_EBase
;
1812 void helper_mtc0_ebase(CPUMIPSState
*env
, target_ulong arg1
)
1814 target_ulong mask
= 0x3FFFF000 | env
->CP0_EBaseWG_rw_bitmask
;
1815 if (arg1
& env
->CP0_EBaseWG_rw_bitmask
) {
1816 mask
|= ~0x3FFFFFFF;
1818 env
->CP0_EBase
= (env
->CP0_EBase
& ~mask
) | (arg1
& mask
);
1821 void helper_mttc0_ebase(CPUMIPSState
*env
, target_ulong arg1
)
1823 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1824 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1825 target_ulong mask
= 0x3FFFF000 | env
->CP0_EBaseWG_rw_bitmask
;
1826 if (arg1
& env
->CP0_EBaseWG_rw_bitmask
) {
1827 mask
|= ~0x3FFFFFFF;
1829 other
->CP0_EBase
= (other
->CP0_EBase
& ~mask
) | (arg1
& mask
);
1832 target_ulong
helper_mftc0_configx(CPUMIPSState
*env
, target_ulong idx
)
1834 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1835 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1838 case 0: return other
->CP0_Config0
;
1839 case 1: return other
->CP0_Config1
;
1840 case 2: return other
->CP0_Config2
;
1841 case 3: return other
->CP0_Config3
;
1842 /* 4 and 5 are reserved. */
1843 case 6: return other
->CP0_Config6
;
1844 case 7: return other
->CP0_Config7
;
1851 void helper_mtc0_config0(CPUMIPSState
*env
, target_ulong arg1
)
1853 env
->CP0_Config0
= (env
->CP0_Config0
& 0x81FFFFF8) | (arg1
& 0x00000007);
1856 void helper_mtc0_config2(CPUMIPSState
*env
, target_ulong arg1
)
1858 /* tertiary/secondary caches not implemented */
1859 env
->CP0_Config2
= (env
->CP0_Config2
& 0x8FFF0FFF);
1862 void helper_mtc0_config3(CPUMIPSState
*env
, target_ulong arg1
)
1864 if (env
->insn_flags
& ASE_MICROMIPS
) {
1865 env
->CP0_Config3
= (env
->CP0_Config3
& ~(1 << CP0C3_ISA_ON_EXC
)) |
1866 (arg1
& (1 << CP0C3_ISA_ON_EXC
));
1870 void helper_mtc0_config4(CPUMIPSState
*env
, target_ulong arg1
)
1872 env
->CP0_Config4
= (env
->CP0_Config4
& (~env
->CP0_Config4_rw_bitmask
)) |
1873 (arg1
& env
->CP0_Config4_rw_bitmask
);
1876 void helper_mtc0_config5(CPUMIPSState
*env
, target_ulong arg1
)
1878 env
->CP0_Config5
= (env
->CP0_Config5
& (~env
->CP0_Config5_rw_bitmask
)) |
1879 (arg1
& env
->CP0_Config5_rw_bitmask
);
1880 compute_hflags(env
);
1883 void helper_mtc0_lladdr(CPUMIPSState
*env
, target_ulong arg1
)
1885 target_long mask
= env
->CP0_LLAddr_rw_bitmask
;
1886 arg1
= arg1
<< env
->CP0_LLAddr_shift
;
1887 env
->CP0_LLAddr
= (env
->CP0_LLAddr
& ~mask
) | (arg1
& mask
);
1890 #define MTC0_MAAR_MASK(env) \
1891 ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
1893 void helper_mtc0_maar(CPUMIPSState
*env
, target_ulong arg1
)
1895 env
->CP0_MAAR
[env
->CP0_MAARI
] = arg1
& MTC0_MAAR_MASK(env
);
1898 void helper_mthc0_maar(CPUMIPSState
*env
, target_ulong arg1
)
1900 env
->CP0_MAAR
[env
->CP0_MAARI
] =
1901 (((uint64_t) arg1
<< 32) & MTC0_MAAR_MASK(env
)) |
1902 (env
->CP0_MAAR
[env
->CP0_MAARI
] & 0x00000000ffffffffULL
);
1905 void helper_mtc0_maari(CPUMIPSState
*env
, target_ulong arg1
)
1907 int index
= arg1
& 0x3f;
1908 if (index
== 0x3f) {
1910 * Software may write all ones to INDEX to determine the
1911 * maximum value supported.
1913 env
->CP0_MAARI
= MIPS_MAAR_MAX
- 1;
1914 } else if (index
< MIPS_MAAR_MAX
) {
1915 env
->CP0_MAARI
= index
;
1918 * Other than the all ones, if the value written is not supported,
1919 * then INDEX is unchanged from its previous value.
1923 void helper_mtc0_watchlo(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
1926 * Watch exceptions for instructions, data loads, data stores
1929 env
->CP0_WatchLo
[sel
] = (arg1
& ~0x7);
1932 void helper_mtc0_watchhi(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
1934 int mask
= 0x40000FF8 | (env
->CP0_EntryHi_ASID_mask
<< CP0WH_ASID
);
1935 env
->CP0_WatchHi
[sel
] = arg1
& mask
;
1936 env
->CP0_WatchHi
[sel
] &= ~(env
->CP0_WatchHi
[sel
] & arg1
& 0x7);
1939 void helper_mtc0_xcontext(CPUMIPSState
*env
, target_ulong arg1
)
1941 target_ulong mask
= (1ULL << (env
->SEGBITS
- 7)) - 1;
1942 env
->CP0_XContext
= (env
->CP0_XContext
& mask
) | (arg1
& ~mask
);
1945 void helper_mtc0_framemask(CPUMIPSState
*env
, target_ulong arg1
)
1947 env
->CP0_Framemask
= arg1
; /* XXX */
1950 void helper_mtc0_debug(CPUMIPSState
*env
, target_ulong arg1
)
1952 env
->CP0_Debug
= (env
->CP0_Debug
& 0x8C03FC1F) | (arg1
& 0x13300120);
1953 if (arg1
& (1 << CP0DB_DM
)) {
1954 env
->hflags
|= MIPS_HFLAG_DM
;
1956 env
->hflags
&= ~MIPS_HFLAG_DM
;
1960 void helper_mttc0_debug(CPUMIPSState
*env
, target_ulong arg1
)
1962 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1963 uint32_t val
= arg1
& ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
));
1964 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1966 /* XXX: Might be wrong, check with EJTAG spec. */
1967 if (other_tc
== other
->current_tc
) {
1968 other
->active_tc
.CP0_Debug_tcstatus
= val
;
1970 other
->tcs
[other_tc
].CP0_Debug_tcstatus
= val
;
1972 other
->CP0_Debug
= (other
->CP0_Debug
&
1973 ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
))) |
1974 (arg1
& ~((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
)));
1977 void helper_mtc0_performance0(CPUMIPSState
*env
, target_ulong arg1
)
1979 env
->CP0_Performance0
= arg1
& 0x000007ff;
1982 void helper_mtc0_errctl(CPUMIPSState
*env
, target_ulong arg1
)
1984 int32_t wst
= arg1
& (1 << CP0EC_WST
);
1985 int32_t spr
= arg1
& (1 << CP0EC_SPR
);
1986 int32_t itc
= env
->itc_tag
? (arg1
& (1 << CP0EC_ITC
)) : 0;
1988 env
->CP0_ErrCtl
= wst
| spr
| itc
;
1990 if (itc
&& !wst
&& !spr
) {
1991 env
->hflags
|= MIPS_HFLAG_ITC_CACHE
;
1993 env
->hflags
&= ~MIPS_HFLAG_ITC_CACHE
;
1997 void helper_mtc0_taglo(CPUMIPSState
*env
, target_ulong arg1
)
1999 if (env
->hflags
& MIPS_HFLAG_ITC_CACHE
) {
2001 * If CACHE instruction is configured for ITC tags then make all
2002 * CP0.TagLo bits writable. The actual write to ITC Configuration
2003 * Tag will take care of the read-only bits.
2005 env
->CP0_TagLo
= arg1
;
2007 env
->CP0_TagLo
= arg1
& 0xFFFFFCF6;
2011 void helper_mtc0_datalo(CPUMIPSState
*env
, target_ulong arg1
)
2013 env
->CP0_DataLo
= arg1
; /* XXX */
2016 void helper_mtc0_taghi(CPUMIPSState
*env
, target_ulong arg1
)
2018 env
->CP0_TagHi
= arg1
; /* XXX */
2021 void helper_mtc0_datahi(CPUMIPSState
*env
, target_ulong arg1
)
2023 env
->CP0_DataHi
= arg1
; /* XXX */
2026 /* MIPS MT functions */
2027 target_ulong
helper_mftgpr(CPUMIPSState
*env
, uint32_t sel
)
2029 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
2030 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
2032 if (other_tc
== other
->current_tc
) {
2033 return other
->active_tc
.gpr
[sel
];
2035 return other
->tcs
[other_tc
].gpr
[sel
];
2039 target_ulong
helper_mftlo(CPUMIPSState
*env
, uint32_t sel
)
2041 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
2042 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
2044 if (other_tc
== other
->current_tc
) {
2045 return other
->active_tc
.LO
[sel
];
2047 return other
->tcs
[other_tc
].LO
[sel
];
2051 target_ulong
helper_mfthi(CPUMIPSState
*env
, uint32_t sel
)
2053 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
2054 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
2056 if (other_tc
== other
->current_tc
) {
2057 return other
->active_tc
.HI
[sel
];
2059 return other
->tcs
[other_tc
].HI
[sel
];
2063 target_ulong
helper_mftacx(CPUMIPSState
*env
, uint32_t sel
)
2065 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
2066 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
2068 if (other_tc
== other
->current_tc
) {
2069 return other
->active_tc
.ACX
[sel
];
2071 return other
->tcs
[other_tc
].ACX
[sel
];
2075 target_ulong
helper_mftdsp(CPUMIPSState
*env
)
2077 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
2078 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
2080 if (other_tc
== other
->current_tc
) {
2081 return other
->active_tc
.DSPControl
;
2083 return other
->tcs
[other_tc
].DSPControl
;
2087 void helper_mttgpr(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
2089 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
2090 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
2092 if (other_tc
== other
->current_tc
) {
2093 other
->active_tc
.gpr
[sel
] = arg1
;
2095 other
->tcs
[other_tc
].gpr
[sel
] = arg1
;
2099 void helper_mttlo(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
2101 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
2102 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
2104 if (other_tc
== other
->current_tc
) {
2105 other
->active_tc
.LO
[sel
] = arg1
;
2107 other
->tcs
[other_tc
].LO
[sel
] = arg1
;
2111 void helper_mtthi(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
2113 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
2114 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
2116 if (other_tc
== other
->current_tc
) {
2117 other
->active_tc
.HI
[sel
] = arg1
;
2119 other
->tcs
[other_tc
].HI
[sel
] = arg1
;
2123 void helper_mttacx(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
2125 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
2126 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
2128 if (other_tc
== other
->current_tc
) {
2129 other
->active_tc
.ACX
[sel
] = arg1
;
2131 other
->tcs
[other_tc
].ACX
[sel
] = arg1
;
2135 void helper_mttdsp(CPUMIPSState
*env
, target_ulong arg1
)
2137 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
2138 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
2140 if (other_tc
== other
->current_tc
) {
2141 other
->active_tc
.DSPControl
= arg1
;
2143 other
->tcs
[other_tc
].DSPControl
= arg1
;
2147 /* MIPS MT functions */
2148 target_ulong
helper_dmt(void)
2154 target_ulong
helper_emt(void)
2160 target_ulong
helper_dvpe(CPUMIPSState
*env
)
2162 CPUState
*other_cs
= first_cpu
;
2163 target_ulong prev
= env
->mvp
->CP0_MVPControl
;
2165 CPU_FOREACH(other_cs
) {
2166 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
2167 /* Turn off all VPEs except the one executing the dvpe. */
2168 if (&other_cpu
->env
!= env
) {
2169 other_cpu
->env
.mvp
->CP0_MVPControl
&= ~(1 << CP0MVPCo_EVP
);
2170 mips_vpe_sleep(other_cpu
);
2176 target_ulong
helper_evpe(CPUMIPSState
*env
)
2178 CPUState
*other_cs
= first_cpu
;
2179 target_ulong prev
= env
->mvp
->CP0_MVPControl
;
2181 CPU_FOREACH(other_cs
) {
2182 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
2184 if (&other_cpu
->env
!= env
2185 /* If the VPE is WFI, don't disturb its sleep. */
2186 && !mips_vpe_is_wfi(other_cpu
)) {
2187 /* Enable the VPE. */
2188 other_cpu
->env
.mvp
->CP0_MVPControl
|= (1 << CP0MVPCo_EVP
);
2189 mips_vpe_wake(other_cpu
); /* And wake it up. */
2194 #endif /* !CONFIG_USER_ONLY */
2196 void helper_fork(target_ulong arg1
, target_ulong arg2
)
2199 * arg1 = rt, arg2 = rs
2200 * TODO: store to TC register
2204 target_ulong
helper_yield(CPUMIPSState
*env
, target_ulong arg
)
2206 target_long arg1
= arg
;
2209 /* No scheduling policy implemented. */
2211 if (env
->CP0_VPEControl
& (1 << CP0VPECo_YSI
) &&
2212 env
->active_tc
.CP0_TCStatus
& (1 << CP0TCSt_DT
)) {
2213 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
2214 env
->CP0_VPEControl
|= 4 << CP0VPECo_EXCPT
;
2215 do_raise_exception(env
, EXCP_THREAD
, GETPC());
2218 } else if (arg1
== 0) {
2220 /* TODO: TC underflow */
2221 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
2222 do_raise_exception(env
, EXCP_THREAD
, GETPC());
2224 /* TODO: Deallocate TC */
2226 } else if (arg1
> 0) {
2227 /* Yield qualifier inputs not implemented. */
2228 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
2229 env
->CP0_VPEControl
|= 2 << CP0VPECo_EXCPT
;
2230 do_raise_exception(env
, EXCP_THREAD
, GETPC());
2232 return env
->CP0_YQMask
;
2235 /* R6 Multi-threading */
2236 #ifndef CONFIG_USER_ONLY
2237 target_ulong
helper_dvp(CPUMIPSState
*env
)
2239 CPUState
*other_cs
= first_cpu
;
2240 target_ulong prev
= env
->CP0_VPControl
;
2242 if (!((env
->CP0_VPControl
>> CP0VPCtl_DIS
) & 1)) {
2243 CPU_FOREACH(other_cs
) {
2244 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
2245 /* Turn off all VPs except the one executing the dvp. */
2246 if (&other_cpu
->env
!= env
) {
2247 mips_vpe_sleep(other_cpu
);
2250 env
->CP0_VPControl
|= (1 << CP0VPCtl_DIS
);
2255 target_ulong
helper_evp(CPUMIPSState
*env
)
2257 CPUState
*other_cs
= first_cpu
;
2258 target_ulong prev
= env
->CP0_VPControl
;
2260 if ((env
->CP0_VPControl
>> CP0VPCtl_DIS
) & 1) {
2261 CPU_FOREACH(other_cs
) {
2262 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
2263 if ((&other_cpu
->env
!= env
) && !mips_vp_is_wfi(other_cpu
)) {
2265 * If the VP is WFI, don't disturb its sleep.
2266 * Otherwise, wake it up.
2268 mips_vpe_wake(other_cpu
);
2271 env
->CP0_VPControl
&= ~(1 << CP0VPCtl_DIS
);
2275 #endif /* !CONFIG_USER_ONLY */
2277 #ifndef CONFIG_USER_ONLY
2278 /* TLB management */
2279 static void r4k_mips_tlb_flush_extra(CPUMIPSState
*env
, int first
)
2281 /* Discard entries from env->tlb[first] onwards. */
2282 while (env
->tlb
->tlb_in_use
> first
) {
2283 r4k_invalidate_tlb(env
, --env
->tlb
->tlb_in_use
, 0);
2287 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo
)
2289 #if defined(TARGET_MIPS64)
2290 return extract64(entrylo
, 6, 54);
2292 return extract64(entrylo
, 6, 24) | /* PFN */
2293 (extract64(entrylo
, 32, 32) << 24); /* PFNX */
2297 static void r4k_fill_tlb(CPUMIPSState
*env
, int idx
)
2300 uint64_t mask
= env
->CP0_PageMask
>> (TARGET_PAGE_BITS
+ 1);
2302 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
2303 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
2304 if (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) {
2309 tlb
->VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
2310 #if defined(TARGET_MIPS64)
2311 tlb
->VPN
&= env
->SEGMask
;
2313 tlb
->ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
2314 tlb
->PageMask
= env
->CP0_PageMask
;
2315 tlb
->G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
2316 tlb
->V0
= (env
->CP0_EntryLo0
& 2) != 0;
2317 tlb
->D0
= (env
->CP0_EntryLo0
& 4) != 0;
2318 tlb
->C0
= (env
->CP0_EntryLo0
>> 3) & 0x7;
2319 tlb
->XI0
= (env
->CP0_EntryLo0
>> CP0EnLo_XI
) & 1;
2320 tlb
->RI0
= (env
->CP0_EntryLo0
>> CP0EnLo_RI
) & 1;
2321 tlb
->PFN
[0] = (get_tlb_pfn_from_entrylo(env
->CP0_EntryLo0
) & ~mask
) << 12;
2322 tlb
->V1
= (env
->CP0_EntryLo1
& 2) != 0;
2323 tlb
->D1
= (env
->CP0_EntryLo1
& 4) != 0;
2324 tlb
->C1
= (env
->CP0_EntryLo1
>> 3) & 0x7;
2325 tlb
->XI1
= (env
->CP0_EntryLo1
>> CP0EnLo_XI
) & 1;
2326 tlb
->RI1
= (env
->CP0_EntryLo1
>> CP0EnLo_RI
) & 1;
2327 tlb
->PFN
[1] = (get_tlb_pfn_from_entrylo(env
->CP0_EntryLo1
) & ~mask
) << 12;
2330 void r4k_helper_tlbinv(CPUMIPSState
*env
)
2334 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
2336 for (idx
= 0; idx
< env
->tlb
->nb_tlb
; idx
++) {
2337 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
2338 if (!tlb
->G
&& tlb
->ASID
== ASID
) {
2342 cpu_mips_tlb_flush(env
);
2345 void r4k_helper_tlbinvf(CPUMIPSState
*env
)
2349 for (idx
= 0; idx
< env
->tlb
->nb_tlb
; idx
++) {
2350 env
->tlb
->mmu
.r4k
.tlb
[idx
].EHINV
= 1;
2352 cpu_mips_tlb_flush(env
);
2355 void r4k_helper_tlbwi(CPUMIPSState
*env
)
2361 bool EHINV
, G
, V0
, D0
, V1
, D1
, XI0
, XI1
, RI0
, RI1
;
2363 idx
= (env
->CP0_Index
& ~0x80000000) % env
->tlb
->nb_tlb
;
2364 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
2365 VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
2366 #if defined(TARGET_MIPS64)
2367 VPN
&= env
->SEGMask
;
2369 ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
2370 EHINV
= (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) != 0;
2371 G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
2372 V0
= (env
->CP0_EntryLo0
& 2) != 0;
2373 D0
= (env
->CP0_EntryLo0
& 4) != 0;
2374 XI0
= (env
->CP0_EntryLo0
>> CP0EnLo_XI
) &1;
2375 RI0
= (env
->CP0_EntryLo0
>> CP0EnLo_RI
) &1;
2376 V1
= (env
->CP0_EntryLo1
& 2) != 0;
2377 D1
= (env
->CP0_EntryLo1
& 4) != 0;
2378 XI1
= (env
->CP0_EntryLo1
>> CP0EnLo_XI
) &1;
2379 RI1
= (env
->CP0_EntryLo1
>> CP0EnLo_RI
) &1;
2382 * Discard cached TLB entries, unless tlbwi is just upgrading access
2383 * permissions on the current entry.
2385 if (tlb
->VPN
!= VPN
|| tlb
->ASID
!= ASID
|| tlb
->G
!= G
||
2386 (!tlb
->EHINV
&& EHINV
) ||
2387 (tlb
->V0
&& !V0
) || (tlb
->D0
&& !D0
) ||
2388 (!tlb
->XI0
&& XI0
) || (!tlb
->RI0
&& RI0
) ||
2389 (tlb
->V1
&& !V1
) || (tlb
->D1
&& !D1
) ||
2390 (!tlb
->XI1
&& XI1
) || (!tlb
->RI1
&& RI1
)) {
2391 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
2394 r4k_invalidate_tlb(env
, idx
, 0);
2395 r4k_fill_tlb(env
, idx
);
2398 void r4k_helper_tlbwr(CPUMIPSState
*env
)
2400 int r
= cpu_mips_get_random(env
);
2402 r4k_invalidate_tlb(env
, r
, 1);
2403 r4k_fill_tlb(env
, r
);
2406 void r4k_helper_tlbp(CPUMIPSState
*env
)
2415 ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
2416 for (i
= 0; i
< env
->tlb
->nb_tlb
; i
++) {
2417 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
2418 /* 1k pages are not supported. */
2419 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
2420 tag
= env
->CP0_EntryHi
& ~mask
;
2421 VPN
= tlb
->VPN
& ~mask
;
2422 #if defined(TARGET_MIPS64)
2423 tag
&= env
->SEGMask
;
2425 /* Check ASID, virtual page number & size */
2426 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
&& !tlb
->EHINV
) {
2432 if (i
== env
->tlb
->nb_tlb
) {
2433 /* No match. Discard any shadow entries, if any of them match. */
2434 for (i
= env
->tlb
->nb_tlb
; i
< env
->tlb
->tlb_in_use
; i
++) {
2435 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
2436 /* 1k pages are not supported. */
2437 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
2438 tag
= env
->CP0_EntryHi
& ~mask
;
2439 VPN
= tlb
->VPN
& ~mask
;
2440 #if defined(TARGET_MIPS64)
2441 tag
&= env
->SEGMask
;
2443 /* Check ASID, virtual page number & size */
2444 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
2445 r4k_mips_tlb_flush_extra(env
, i
);
2450 env
->CP0_Index
|= 0x80000000;
2454 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn
)
2456 #if defined(TARGET_MIPS64)
2457 return tlb_pfn
<< 6;
2459 return (extract64(tlb_pfn
, 0, 24) << 6) | /* PFN */
2460 (extract64(tlb_pfn
, 24, 32) << 32); /* PFNX */
2464 void r4k_helper_tlbr(CPUMIPSState
*env
)
2470 ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
2471 idx
= (env
->CP0_Index
& ~0x80000000) % env
->tlb
->nb_tlb
;
2472 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
2474 /* If this will change the current ASID, flush qemu's TLB. */
2475 if (ASID
!= tlb
->ASID
) {
2476 cpu_mips_tlb_flush(env
);
2479 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
2482 env
->CP0_EntryHi
= 1 << CP0EnHi_EHINV
;
2483 env
->CP0_PageMask
= 0;
2484 env
->CP0_EntryLo0
= 0;
2485 env
->CP0_EntryLo1
= 0;
2487 env
->CP0_EntryHi
= tlb
->VPN
| tlb
->ASID
;
2488 env
->CP0_PageMask
= tlb
->PageMask
;
2489 env
->CP0_EntryLo0
= tlb
->G
| (tlb
->V0
<< 1) | (tlb
->D0
<< 2) |
2490 ((uint64_t)tlb
->RI0
<< CP0EnLo_RI
) |
2491 ((uint64_t)tlb
->XI0
<< CP0EnLo_XI
) | (tlb
->C0
<< 3) |
2492 get_entrylo_pfn_from_tlb(tlb
->PFN
[0] >> 12);
2493 env
->CP0_EntryLo1
= tlb
->G
| (tlb
->V1
<< 1) | (tlb
->D1
<< 2) |
2494 ((uint64_t)tlb
->RI1
<< CP0EnLo_RI
) |
2495 ((uint64_t)tlb
->XI1
<< CP0EnLo_XI
) | (tlb
->C1
<< 3) |
2496 get_entrylo_pfn_from_tlb(tlb
->PFN
[1] >> 12);
2500 void helper_tlbwi(CPUMIPSState
*env
)
2502 env
->tlb
->helper_tlbwi(env
);
2505 void helper_tlbwr(CPUMIPSState
*env
)
2507 env
->tlb
->helper_tlbwr(env
);
2510 void helper_tlbp(CPUMIPSState
*env
)
2512 env
->tlb
->helper_tlbp(env
);
2515 void helper_tlbr(CPUMIPSState
*env
)
2517 env
->tlb
->helper_tlbr(env
);
2520 void helper_tlbinv(CPUMIPSState
*env
)
2522 env
->tlb
->helper_tlbinv(env
);
2525 void helper_tlbinvf(CPUMIPSState
*env
)
2527 env
->tlb
->helper_tlbinvf(env
);
2531 target_ulong
helper_di(CPUMIPSState
*env
)
2533 target_ulong t0
= env
->CP0_Status
;
2535 env
->CP0_Status
= t0
& ~(1 << CP0St_IE
);
2539 target_ulong
helper_ei(CPUMIPSState
*env
)
2541 target_ulong t0
= env
->CP0_Status
;
2543 env
->CP0_Status
= t0
| (1 << CP0St_IE
);
2547 static void debug_pre_eret(CPUMIPSState
*env
)
2549 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
2550 qemu_log("ERET: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
2551 env
->active_tc
.PC
, env
->CP0_EPC
);
2552 if (env
->CP0_Status
& (1 << CP0St_ERL
)) {
2553 qemu_log(" ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
2555 if (env
->hflags
& MIPS_HFLAG_DM
) {
2556 qemu_log(" DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
2562 static void debug_post_eret(CPUMIPSState
*env
)
2564 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
2565 qemu_log(" => PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
2566 env
->active_tc
.PC
, env
->CP0_EPC
);
2567 if (env
->CP0_Status
& (1 << CP0St_ERL
)) {
2568 qemu_log(" ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
2570 if (env
->hflags
& MIPS_HFLAG_DM
) {
2571 qemu_log(" DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
2573 switch (cpu_mmu_index(env
, false)) {
2575 qemu_log(", ERL\n");
2587 cpu_abort(env_cpu(env
), "Invalid MMU mode!\n");
2593 static void set_pc(CPUMIPSState
*env
, target_ulong error_pc
)
2595 env
->active_tc
.PC
= error_pc
& ~(target_ulong
)1;
2597 env
->hflags
|= MIPS_HFLAG_M16
;
2599 env
->hflags
&= ~(MIPS_HFLAG_M16
);
2603 static inline void exception_return(CPUMIPSState
*env
)
2605 debug_pre_eret(env
);
2606 if (env
->CP0_Status
& (1 << CP0St_ERL
)) {
2607 set_pc(env
, env
->CP0_ErrorEPC
);
2608 env
->CP0_Status
&= ~(1 << CP0St_ERL
);
2610 set_pc(env
, env
->CP0_EPC
);
2611 env
->CP0_Status
&= ~(1 << CP0St_EXL
);
2613 compute_hflags(env
);
2614 debug_post_eret(env
);
2617 void helper_eret(CPUMIPSState
*env
)
2619 exception_return(env
);
2620 env
->CP0_LLAddr
= 1;
2624 void helper_eretnc(CPUMIPSState
*env
)
2626 exception_return(env
);
2629 void helper_deret(CPUMIPSState
*env
)
2631 debug_pre_eret(env
);
2633 env
->hflags
&= ~MIPS_HFLAG_DM
;
2634 compute_hflags(env
);
2636 set_pc(env
, env
->CP0_DEPC
);
2638 debug_post_eret(env
);
2640 #endif /* !CONFIG_USER_ONLY */
2642 static inline void check_hwrena(CPUMIPSState
*env
, int reg
, uintptr_t pc
)
2644 if ((env
->hflags
& MIPS_HFLAG_CP0
) || (env
->CP0_HWREna
& (1 << reg
))) {
2647 do_raise_exception(env
, EXCP_RI
, pc
);
2650 target_ulong
helper_rdhwr_cpunum(CPUMIPSState
*env
)
2652 check_hwrena(env
, 0, GETPC());
2653 return env
->CP0_EBase
& 0x3ff;
2656 target_ulong
helper_rdhwr_synci_step(CPUMIPSState
*env
)
2658 check_hwrena(env
, 1, GETPC());
2659 return env
->SYNCI_Step
;
2662 target_ulong
helper_rdhwr_cc(CPUMIPSState
*env
)
2664 check_hwrena(env
, 2, GETPC());
2665 #ifdef CONFIG_USER_ONLY
2666 return env
->CP0_Count
;
2668 return (int32_t)cpu_mips_get_count(env
);
2672 target_ulong
helper_rdhwr_ccres(CPUMIPSState
*env
)
2674 check_hwrena(env
, 3, GETPC());
2678 target_ulong
helper_rdhwr_performance(CPUMIPSState
*env
)
2680 check_hwrena(env
, 4, GETPC());
2681 return env
->CP0_Performance0
;
2684 target_ulong
helper_rdhwr_xnp(CPUMIPSState
*env
)
2686 check_hwrena(env
, 5, GETPC());
2687 return (env
->CP0_Config5
>> CP0C5_XNP
) & 1;
2690 void helper_pmon(CPUMIPSState
*env
, int function
)
2694 case 2: /* TODO: char inbyte(int waitflag); */
2695 if (env
->active_tc
.gpr
[4] == 0) {
2696 env
->active_tc
.gpr
[2] = -1;
2699 case 11: /* TODO: char inbyte (void); */
2700 env
->active_tc
.gpr
[2] = -1;
2704 printf("%c", (char)(env
->active_tc
.gpr
[4] & 0xFF));
2710 unsigned char *fmt
= (void *)(uintptr_t)env
->active_tc
.gpr
[4];
2717 void helper_wait(CPUMIPSState
*env
)
2719 CPUState
*cs
= env_cpu(env
);
2722 cpu_reset_interrupt(cs
, CPU_INTERRUPT_WAKE
);
2724 * Last instruction in the block, PC was updated before
2725 * - no need to recover PC and icount.
2727 raise_exception(env
, EXCP_HLT
);
2730 #if !defined(CONFIG_USER_ONLY)
2732 void mips_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
2733 MMUAccessType access_type
,
2734 int mmu_idx
, uintptr_t retaddr
)
2736 MIPSCPU
*cpu
= MIPS_CPU(cs
);
2737 CPUMIPSState
*env
= &cpu
->env
;
2741 if (!(env
->hflags
& MIPS_HFLAG_DM
)) {
2742 env
->CP0_BadVAddr
= addr
;
2745 if (access_type
== MMU_DATA_STORE
) {
2749 if (access_type
== MMU_INST_FETCH
) {
2750 error_code
|= EXCP_INST_NOTAVAIL
;
2754 do_raise_exception_err(env
, excp
, error_code
, retaddr
);
2757 void mips_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
2758 vaddr addr
, unsigned size
,
2759 MMUAccessType access_type
,
2760 int mmu_idx
, MemTxAttrs attrs
,
2761 MemTxResult response
, uintptr_t retaddr
)
2763 MIPSCPU
*cpu
= MIPS_CPU(cs
);
2764 CPUMIPSState
*env
= &cpu
->env
;
2766 if (access_type
== MMU_INST_FETCH
) {
2767 do_raise_exception(env
, EXCP_IBE
, retaddr
);
2769 do_raise_exception(env
, EXCP_DBE
, retaddr
);
2772 #endif /* !CONFIG_USER_ONLY */
2774 /* Complex FPU operations which may need stack space. */
2776 #define FLOAT_TWO32 make_float32(1 << 30)
2777 #define FLOAT_TWO64 make_float64(1ULL << 62)
2779 #define FP_TO_INT32_OVERFLOW 0x7fffffff
2780 #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
2782 /* convert MIPS rounding mode in FCR31 to IEEE library */
2783 unsigned int ieee_rm
[] = {
2784 float_round_nearest_even
,
2785 float_round_to_zero
,
2790 target_ulong
helper_cfc1(CPUMIPSState
*env
, uint32_t reg
)
2792 target_ulong arg1
= 0;
2796 arg1
= (int32_t)env
->active_fpu
.fcr0
;
2799 /* UFR Support - Read Status FR */
2800 if (env
->active_fpu
.fcr0
& (1 << FCR0_UFRP
)) {
2801 if (env
->CP0_Config5
& (1 << CP0C5_UFR
)) {
2803 ((env
->CP0_Status
& (1 << CP0St_FR
)) >> CP0St_FR
);
2805 do_raise_exception(env
, EXCP_RI
, GETPC());
2810 /* FRE Support - read Config5.FRE bit */
2811 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
2812 if (env
->CP0_Config5
& (1 << CP0C5_UFE
)) {
2813 arg1
= (env
->CP0_Config5
>> CP0C5_FRE
) & 1;
2815 helper_raise_exception(env
, EXCP_RI
);
2820 arg1
= ((env
->active_fpu
.fcr31
>> 24) & 0xfe) |
2821 ((env
->active_fpu
.fcr31
>> 23) & 0x1);
2824 arg1
= env
->active_fpu
.fcr31
& 0x0003f07c;
2827 arg1
= (env
->active_fpu
.fcr31
& 0x00000f83) |
2828 ((env
->active_fpu
.fcr31
>> 22) & 0x4);
2831 arg1
= (int32_t)env
->active_fpu
.fcr31
;
2838 void helper_ctc1(CPUMIPSState
*env
, target_ulong arg1
, uint32_t fs
, uint32_t rt
)
2842 /* UFR Alias - Reset Status FR */
2843 if (!((env
->active_fpu
.fcr0
& (1 << FCR0_UFRP
)) && (rt
== 0))) {
2846 if (env
->CP0_Config5
& (1 << CP0C5_UFR
)) {
2847 env
->CP0_Status
&= ~(1 << CP0St_FR
);
2848 compute_hflags(env
);
2850 do_raise_exception(env
, EXCP_RI
, GETPC());
2854 /* UNFR Alias - Set Status FR */
2855 if (!((env
->active_fpu
.fcr0
& (1 << FCR0_UFRP
)) && (rt
== 0))) {
2858 if (env
->CP0_Config5
& (1 << CP0C5_UFR
)) {
2859 env
->CP0_Status
|= (1 << CP0St_FR
);
2860 compute_hflags(env
);
2862 do_raise_exception(env
, EXCP_RI
, GETPC());
2866 /* FRE Support - clear Config5.FRE bit */
2867 if (!((env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) && (rt
== 0))) {
2870 if (env
->CP0_Config5
& (1 << CP0C5_UFE
)) {
2871 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
2872 compute_hflags(env
);
2874 helper_raise_exception(env
, EXCP_RI
);
2878 /* FRE Support - set Config5.FRE bit */
2879 if (!((env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) && (rt
== 0))) {
2882 if (env
->CP0_Config5
& (1 << CP0C5_UFE
)) {
2883 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
2884 compute_hflags(env
);
2886 helper_raise_exception(env
, EXCP_RI
);
2890 if ((env
->insn_flags
& ISA_MIPS32R6
) || (arg1
& 0xffffff00)) {
2893 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0x017fffff) |
2894 ((arg1
& 0xfe) << 24) |
2895 ((arg1
& 0x1) << 23);
2898 if (arg1
& 0x007c0000) {
2901 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0xfffc0f83) |
2902 (arg1
& 0x0003f07c);
2905 if (arg1
& 0x007c0000) {
2908 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0xfefff07c) |
2909 (arg1
& 0x00000f83) |
2910 ((arg1
& 0x4) << 22);
2913 env
->active_fpu
.fcr31
= (arg1
& env
->active_fpu
.fcr31_rw_bitmask
) |
2914 (env
->active_fpu
.fcr31
& ~(env
->active_fpu
.fcr31_rw_bitmask
));
2917 if (env
->insn_flags
& ISA_MIPS32R6
) {
2918 do_raise_exception(env
, EXCP_RI
, GETPC());
2922 restore_fp_status(env
);
2923 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2924 if ((GET_FP_ENABLE(env
->active_fpu
.fcr31
) | 0x20) &
2925 GET_FP_CAUSE(env
->active_fpu
.fcr31
)) {
2926 do_raise_exception(env
, EXCP_FPE
, GETPC());
2930 int ieee_ex_to_mips(int xcpt
)
2934 if (xcpt
& float_flag_invalid
) {
2937 if (xcpt
& float_flag_overflow
) {
2940 if (xcpt
& float_flag_underflow
) {
2941 ret
|= FP_UNDERFLOW
;
2943 if (xcpt
& float_flag_divbyzero
) {
2946 if (xcpt
& float_flag_inexact
) {
2953 static inline void update_fcr31(CPUMIPSState
*env
, uintptr_t pc
)
2955 int tmp
= ieee_ex_to_mips(get_float_exception_flags(
2956 &env
->active_fpu
.fp_status
));
2958 SET_FP_CAUSE(env
->active_fpu
.fcr31
, tmp
);
2961 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2963 if (GET_FP_ENABLE(env
->active_fpu
.fcr31
) & tmp
) {
2964 do_raise_exception(env
, EXCP_FPE
, pc
);
2966 UPDATE_FP_FLAGS(env
->active_fpu
.fcr31
, tmp
);
2973 * Single precition routines have a "s" suffix, double precision a
2974 * "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2975 * paired single lower "pl", paired single upper "pu".
2978 /* unary operations, modifying fp status */
2979 uint64_t helper_float_sqrt_d(CPUMIPSState
*env
, uint64_t fdt0
)
2981 fdt0
= float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
2982 update_fcr31(env
, GETPC());
2986 uint32_t helper_float_sqrt_s(CPUMIPSState
*env
, uint32_t fst0
)
2988 fst0
= float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
2989 update_fcr31(env
, GETPC());
2993 uint64_t helper_float_cvtd_s(CPUMIPSState
*env
, uint32_t fst0
)
2997 fdt2
= float32_to_float64(fst0
, &env
->active_fpu
.fp_status
);
2998 update_fcr31(env
, GETPC());
3002 uint64_t helper_float_cvtd_w(CPUMIPSState
*env
, uint32_t wt0
)
3006 fdt2
= int32_to_float64(wt0
, &env
->active_fpu
.fp_status
);
3007 update_fcr31(env
, GETPC());
3011 uint64_t helper_float_cvtd_l(CPUMIPSState
*env
, uint64_t dt0
)
3015 fdt2
= int64_to_float64(dt0
, &env
->active_fpu
.fp_status
);
3016 update_fcr31(env
, GETPC());
3020 uint64_t helper_float_cvt_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3024 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3025 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3026 & (float_flag_invalid
| float_flag_overflow
)) {
3027 dt2
= FP_TO_INT64_OVERFLOW
;
3029 update_fcr31(env
, GETPC());
3033 uint64_t helper_float_cvt_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3037 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3038 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3039 & (float_flag_invalid
| float_flag_overflow
)) {
3040 dt2
= FP_TO_INT64_OVERFLOW
;
3042 update_fcr31(env
, GETPC());
3046 uint64_t helper_float_cvtps_pw(CPUMIPSState
*env
, uint64_t dt0
)
3051 fst2
= int32_to_float32(dt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
3052 fsth2
= int32_to_float32(dt0
>> 32, &env
->active_fpu
.fp_status
);
3053 update_fcr31(env
, GETPC());
3054 return ((uint64_t)fsth2
<< 32) | fst2
;
3057 uint64_t helper_float_cvtpw_ps(CPUMIPSState
*env
, uint64_t fdt0
)
3063 wt2
= float32_to_int32(fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
3064 excp
= get_float_exception_flags(&env
->active_fpu
.fp_status
);
3065 if (excp
& (float_flag_overflow
| float_flag_invalid
)) {
3066 wt2
= FP_TO_INT32_OVERFLOW
;
3069 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
3070 wth2
= float32_to_int32(fdt0
>> 32, &env
->active_fpu
.fp_status
);
3071 excph
= get_float_exception_flags(&env
->active_fpu
.fp_status
);
3072 if (excph
& (float_flag_overflow
| float_flag_invalid
)) {
3073 wth2
= FP_TO_INT32_OVERFLOW
;
3076 set_float_exception_flags(excp
| excph
, &env
->active_fpu
.fp_status
);
3077 update_fcr31(env
, GETPC());
3079 return ((uint64_t)wth2
<< 32) | wt2
;
3082 uint32_t helper_float_cvts_d(CPUMIPSState
*env
, uint64_t fdt0
)
3086 fst2
= float64_to_float32(fdt0
, &env
->active_fpu
.fp_status
);
3087 update_fcr31(env
, GETPC());
3091 uint32_t helper_float_cvts_w(CPUMIPSState
*env
, uint32_t wt0
)
3095 fst2
= int32_to_float32(wt0
, &env
->active_fpu
.fp_status
);
3096 update_fcr31(env
, GETPC());
3100 uint32_t helper_float_cvts_l(CPUMIPSState
*env
, uint64_t dt0
)
3104 fst2
= int64_to_float32(dt0
, &env
->active_fpu
.fp_status
);
3105 update_fcr31(env
, GETPC());
3109 uint32_t helper_float_cvts_pl(CPUMIPSState
*env
, uint32_t wt0
)
3114 update_fcr31(env
, GETPC());
3118 uint32_t helper_float_cvts_pu(CPUMIPSState
*env
, uint32_t wth0
)
3123 update_fcr31(env
, GETPC());
3127 uint32_t helper_float_cvt_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3131 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3132 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3133 & (float_flag_invalid
| float_flag_overflow
)) {
3134 wt2
= FP_TO_INT32_OVERFLOW
;
3136 update_fcr31(env
, GETPC());
3140 uint32_t helper_float_cvt_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3144 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3145 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3146 & (float_flag_invalid
| float_flag_overflow
)) {
3147 wt2
= FP_TO_INT32_OVERFLOW
;
3149 update_fcr31(env
, GETPC());
3153 uint64_t helper_float_round_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3157 set_float_rounding_mode(float_round_nearest_even
,
3158 &env
->active_fpu
.fp_status
);
3159 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3160 restore_rounding_mode(env
);
3161 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3162 & (float_flag_invalid
| float_flag_overflow
)) {
3163 dt2
= FP_TO_INT64_OVERFLOW
;
3165 update_fcr31(env
, GETPC());
3169 uint64_t helper_float_round_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3173 set_float_rounding_mode(float_round_nearest_even
,
3174 &env
->active_fpu
.fp_status
);
3175 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3176 restore_rounding_mode(env
);
3177 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3178 & (float_flag_invalid
| float_flag_overflow
)) {
3179 dt2
= FP_TO_INT64_OVERFLOW
;
3181 update_fcr31(env
, GETPC());
3185 uint32_t helper_float_round_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3189 set_float_rounding_mode(float_round_nearest_even
,
3190 &env
->active_fpu
.fp_status
);
3191 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3192 restore_rounding_mode(env
);
3193 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3194 & (float_flag_invalid
| float_flag_overflow
)) {
3195 wt2
= FP_TO_INT32_OVERFLOW
;
3197 update_fcr31(env
, GETPC());
3201 uint32_t helper_float_round_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3205 set_float_rounding_mode(float_round_nearest_even
,
3206 &env
->active_fpu
.fp_status
);
3207 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3208 restore_rounding_mode(env
);
3209 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3210 & (float_flag_invalid
| float_flag_overflow
)) {
3211 wt2
= FP_TO_INT32_OVERFLOW
;
3213 update_fcr31(env
, GETPC());
3217 uint64_t helper_float_trunc_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3221 dt2
= float64_to_int64_round_to_zero(fdt0
,
3222 &env
->active_fpu
.fp_status
);
3223 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3224 & (float_flag_invalid
| float_flag_overflow
)) {
3225 dt2
= FP_TO_INT64_OVERFLOW
;
3227 update_fcr31(env
, GETPC());
3231 uint64_t helper_float_trunc_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3235 dt2
= float32_to_int64_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
3236 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3237 & (float_flag_invalid
| float_flag_overflow
)) {
3238 dt2
= FP_TO_INT64_OVERFLOW
;
3240 update_fcr31(env
, GETPC());
3244 uint32_t helper_float_trunc_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3248 wt2
= float64_to_int32_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
3249 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3250 & (float_flag_invalid
| float_flag_overflow
)) {
3251 wt2
= FP_TO_INT32_OVERFLOW
;
3253 update_fcr31(env
, GETPC());
3257 uint32_t helper_float_trunc_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3261 wt2
= float32_to_int32_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
3262 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3263 & (float_flag_invalid
| float_flag_overflow
)) {
3264 wt2
= FP_TO_INT32_OVERFLOW
;
3266 update_fcr31(env
, GETPC());
3270 uint64_t helper_float_ceil_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3274 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3275 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3276 restore_rounding_mode(env
);
3277 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3278 & (float_flag_invalid
| float_flag_overflow
)) {
3279 dt2
= FP_TO_INT64_OVERFLOW
;
3281 update_fcr31(env
, GETPC());
3285 uint64_t helper_float_ceil_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3289 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3290 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3291 restore_rounding_mode(env
);
3292 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3293 & (float_flag_invalid
| float_flag_overflow
)) {
3294 dt2
= FP_TO_INT64_OVERFLOW
;
3296 update_fcr31(env
, GETPC());
3300 uint32_t helper_float_ceil_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3304 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3305 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3306 restore_rounding_mode(env
);
3307 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3308 & (float_flag_invalid
| float_flag_overflow
)) {
3309 wt2
= FP_TO_INT32_OVERFLOW
;
3311 update_fcr31(env
, GETPC());
3315 uint32_t helper_float_ceil_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3319 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3320 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3321 restore_rounding_mode(env
);
3322 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3323 & (float_flag_invalid
| float_flag_overflow
)) {
3324 wt2
= FP_TO_INT32_OVERFLOW
;
3326 update_fcr31(env
, GETPC());
3330 uint64_t helper_float_floor_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3334 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3335 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3336 restore_rounding_mode(env
);
3337 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3338 & (float_flag_invalid
| float_flag_overflow
)) {
3339 dt2
= FP_TO_INT64_OVERFLOW
;
3341 update_fcr31(env
, GETPC());
3345 uint64_t helper_float_floor_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3349 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3350 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3351 restore_rounding_mode(env
);
3352 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3353 & (float_flag_invalid
| float_flag_overflow
)) {
3354 dt2
= FP_TO_INT64_OVERFLOW
;
3356 update_fcr31(env
, GETPC());
3360 uint32_t helper_float_floor_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3364 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3365 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3366 restore_rounding_mode(env
);
3367 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3368 & (float_flag_invalid
| float_flag_overflow
)) {
3369 wt2
= FP_TO_INT32_OVERFLOW
;
3371 update_fcr31(env
, GETPC());
3375 uint32_t helper_float_floor_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3379 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3380 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3381 restore_rounding_mode(env
);
3382 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3383 & (float_flag_invalid
| float_flag_overflow
)) {
3384 wt2
= FP_TO_INT32_OVERFLOW
;
3386 update_fcr31(env
, GETPC());
3390 uint64_t helper_float_cvt_2008_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3394 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3395 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3396 & float_flag_invalid
) {
3397 if (float64_is_any_nan(fdt0
)) {
3401 update_fcr31(env
, GETPC());
3405 uint64_t helper_float_cvt_2008_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3409 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3410 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3411 & float_flag_invalid
) {
3412 if (float32_is_any_nan(fst0
)) {
3416 update_fcr31(env
, GETPC());
3420 uint32_t helper_float_cvt_2008_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3424 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3425 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3426 & float_flag_invalid
) {
3427 if (float64_is_any_nan(fdt0
)) {
3431 update_fcr31(env
, GETPC());
3435 uint32_t helper_float_cvt_2008_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3439 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3440 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3441 & float_flag_invalid
) {
3442 if (float32_is_any_nan(fst0
)) {
3446 update_fcr31(env
, GETPC());
3450 uint64_t helper_float_round_2008_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3454 set_float_rounding_mode(float_round_nearest_even
,
3455 &env
->active_fpu
.fp_status
);
3456 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3457 restore_rounding_mode(env
);
3458 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3459 & float_flag_invalid
) {
3460 if (float64_is_any_nan(fdt0
)) {
3464 update_fcr31(env
, GETPC());
3468 uint64_t helper_float_round_2008_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3472 set_float_rounding_mode(float_round_nearest_even
,
3473 &env
->active_fpu
.fp_status
);
3474 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3475 restore_rounding_mode(env
);
3476 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3477 & float_flag_invalid
) {
3478 if (float32_is_any_nan(fst0
)) {
3482 update_fcr31(env
, GETPC());
3486 uint32_t helper_float_round_2008_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3490 set_float_rounding_mode(float_round_nearest_even
,
3491 &env
->active_fpu
.fp_status
);
3492 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3493 restore_rounding_mode(env
);
3494 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3495 & float_flag_invalid
) {
3496 if (float64_is_any_nan(fdt0
)) {
3500 update_fcr31(env
, GETPC());
3504 uint32_t helper_float_round_2008_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3508 set_float_rounding_mode(float_round_nearest_even
,
3509 &env
->active_fpu
.fp_status
);
3510 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3511 restore_rounding_mode(env
);
3512 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3513 & float_flag_invalid
) {
3514 if (float32_is_any_nan(fst0
)) {
3518 update_fcr31(env
, GETPC());
3522 uint64_t helper_float_trunc_2008_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3526 dt2
= float64_to_int64_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
3527 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3528 & float_flag_invalid
) {
3529 if (float64_is_any_nan(fdt0
)) {
3533 update_fcr31(env
, GETPC());
3537 uint64_t helper_float_trunc_2008_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3541 dt2
= float32_to_int64_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
3542 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3543 & float_flag_invalid
) {
3544 if (float32_is_any_nan(fst0
)) {
3548 update_fcr31(env
, GETPC());
3552 uint32_t helper_float_trunc_2008_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3556 wt2
= float64_to_int32_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
3557 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3558 & float_flag_invalid
) {
3559 if (float64_is_any_nan(fdt0
)) {
3563 update_fcr31(env
, GETPC());
3567 uint32_t helper_float_trunc_2008_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3571 wt2
= float32_to_int32_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
3572 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3573 & float_flag_invalid
) {
3574 if (float32_is_any_nan(fst0
)) {
3578 update_fcr31(env
, GETPC());
3582 uint64_t helper_float_ceil_2008_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3586 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3587 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3588 restore_rounding_mode(env
);
3589 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3590 & float_flag_invalid
) {
3591 if (float64_is_any_nan(fdt0
)) {
3595 update_fcr31(env
, GETPC());
3599 uint64_t helper_float_ceil_2008_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3603 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3604 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3605 restore_rounding_mode(env
);
3606 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3607 & float_flag_invalid
) {
3608 if (float32_is_any_nan(fst0
)) {
3612 update_fcr31(env
, GETPC());
3616 uint32_t helper_float_ceil_2008_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3620 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3621 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3622 restore_rounding_mode(env
);
3623 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3624 & float_flag_invalid
) {
3625 if (float64_is_any_nan(fdt0
)) {
3629 update_fcr31(env
, GETPC());
3633 uint32_t helper_float_ceil_2008_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3637 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3638 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3639 restore_rounding_mode(env
);
3640 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3641 & float_flag_invalid
) {
3642 if (float32_is_any_nan(fst0
)) {
3646 update_fcr31(env
, GETPC());
3650 uint64_t helper_float_floor_2008_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3654 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3655 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3656 restore_rounding_mode(env
);
3657 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3658 & float_flag_invalid
) {
3659 if (float64_is_any_nan(fdt0
)) {
3663 update_fcr31(env
, GETPC());
3667 uint64_t helper_float_floor_2008_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3671 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3672 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3673 restore_rounding_mode(env
);
3674 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3675 & float_flag_invalid
) {
3676 if (float32_is_any_nan(fst0
)) {
3680 update_fcr31(env
, GETPC());
3684 uint32_t helper_float_floor_2008_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3688 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3689 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3690 restore_rounding_mode(env
);
3691 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3692 & float_flag_invalid
) {
3693 if (float64_is_any_nan(fdt0
)) {
3697 update_fcr31(env
, GETPC());
3701 uint32_t helper_float_floor_2008_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3705 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3706 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3707 restore_rounding_mode(env
);
3708 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3709 & float_flag_invalid
) {
3710 if (float32_is_any_nan(fst0
)) {
3714 update_fcr31(env
, GETPC());
3718 /* unary operations, not modifying fp status */
3719 #define FLOAT_UNOP(name) \
3720 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
3722 return float64_ ## name(fdt0); \
3724 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
3726 return float32_ ## name(fst0); \
3728 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
3733 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
3734 wth0 = float32_ ## name(fdt0 >> 32); \
3735 return ((uint64_t)wth0 << 32) | wt0; \
3741 /* MIPS specific unary operations */
3742 uint64_t helper_float_recip_d(CPUMIPSState
*env
, uint64_t fdt0
)
3746 fdt2
= float64_div(float64_one
, fdt0
, &env
->active_fpu
.fp_status
);
3747 update_fcr31(env
, GETPC());
3751 uint32_t helper_float_recip_s(CPUMIPSState
*env
, uint32_t fst0
)
3755 fst2
= float32_div(float32_one
, fst0
, &env
->active_fpu
.fp_status
);
3756 update_fcr31(env
, GETPC());
3760 uint64_t helper_float_rsqrt_d(CPUMIPSState
*env
, uint64_t fdt0
)
3764 fdt2
= float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
3765 fdt2
= float64_div(float64_one
, fdt2
, &env
->active_fpu
.fp_status
);
3766 update_fcr31(env
, GETPC());
3770 uint32_t helper_float_rsqrt_s(CPUMIPSState
*env
, uint32_t fst0
)
3774 fst2
= float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
3775 fst2
= float32_div(float32_one
, fst2
, &env
->active_fpu
.fp_status
);
3776 update_fcr31(env
, GETPC());
3780 uint64_t helper_float_recip1_d(CPUMIPSState
*env
, uint64_t fdt0
)
3784 fdt2
= float64_div(float64_one
, fdt0
, &env
->active_fpu
.fp_status
);
3785 update_fcr31(env
, GETPC());
3789 uint32_t helper_float_recip1_s(CPUMIPSState
*env
, uint32_t fst0
)
3793 fst2
= float32_div(float32_one
, fst0
, &env
->active_fpu
.fp_status
);
3794 update_fcr31(env
, GETPC());
3798 uint64_t helper_float_recip1_ps(CPUMIPSState
*env
, uint64_t fdt0
)
3803 fst2
= float32_div(float32_one
, fdt0
& 0XFFFFFFFF,
3804 &env
->active_fpu
.fp_status
);
3805 fsth2
= float32_div(float32_one
, fdt0
>> 32, &env
->active_fpu
.fp_status
);
3806 update_fcr31(env
, GETPC());
3807 return ((uint64_t)fsth2
<< 32) | fst2
;
3810 uint64_t helper_float_rsqrt1_d(CPUMIPSState
*env
, uint64_t fdt0
)
3814 fdt2
= float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
3815 fdt2
= float64_div(float64_one
, fdt2
, &env
->active_fpu
.fp_status
);
3816 update_fcr31(env
, GETPC());
3820 uint32_t helper_float_rsqrt1_s(CPUMIPSState
*env
, uint32_t fst0
)
3824 fst2
= float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
3825 fst2
= float32_div(float32_one
, fst2
, &env
->active_fpu
.fp_status
);
3826 update_fcr31(env
, GETPC());
3830 uint64_t helper_float_rsqrt1_ps(CPUMIPSState
*env
, uint64_t fdt0
)
3835 fst2
= float32_sqrt(fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
3836 fsth2
= float32_sqrt(fdt0
>> 32, &env
->active_fpu
.fp_status
);
3837 fst2
= float32_div(float32_one
, fst2
, &env
->active_fpu
.fp_status
);
3838 fsth2
= float32_div(float32_one
, fsth2
, &env
->active_fpu
.fp_status
);
3839 update_fcr31(env
, GETPC());
3840 return ((uint64_t)fsth2
<< 32) | fst2
;
3843 #define FLOAT_RINT(name, bits) \
3844 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \
3845 uint ## bits ## _t fs) \
3847 uint ## bits ## _t fdret; \
3849 fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \
3850 update_fcr31(env, GETPC()); \
3854 FLOAT_RINT(rint_s
, 32)
3855 FLOAT_RINT(rint_d
, 64)
3858 #define FLOAT_CLASS_SIGNALING_NAN 0x001
3859 #define FLOAT_CLASS_QUIET_NAN 0x002
3860 #define FLOAT_CLASS_NEGATIVE_INFINITY 0x004
3861 #define FLOAT_CLASS_NEGATIVE_NORMAL 0x008
3862 #define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010
3863 #define FLOAT_CLASS_NEGATIVE_ZERO 0x020
3864 #define FLOAT_CLASS_POSITIVE_INFINITY 0x040
3865 #define FLOAT_CLASS_POSITIVE_NORMAL 0x080
3866 #define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100
3867 #define FLOAT_CLASS_POSITIVE_ZERO 0x200
3869 #define FLOAT_CLASS(name, bits) \
3870 uint ## bits ## _t float_ ## name(uint ## bits ## _t arg, \
3871 float_status *status) \
3873 if (float ## bits ## _is_signaling_nan(arg, status)) { \
3874 return FLOAT_CLASS_SIGNALING_NAN; \
3875 } else if (float ## bits ## _is_quiet_nan(arg, status)) { \
3876 return FLOAT_CLASS_QUIET_NAN; \
3877 } else if (float ## bits ## _is_neg(arg)) { \
3878 if (float ## bits ## _is_infinity(arg)) { \
3879 return FLOAT_CLASS_NEGATIVE_INFINITY; \
3880 } else if (float ## bits ## _is_zero(arg)) { \
3881 return FLOAT_CLASS_NEGATIVE_ZERO; \
3882 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
3883 return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \
3885 return FLOAT_CLASS_NEGATIVE_NORMAL; \
3888 if (float ## bits ## _is_infinity(arg)) { \
3889 return FLOAT_CLASS_POSITIVE_INFINITY; \
3890 } else if (float ## bits ## _is_zero(arg)) { \
3891 return FLOAT_CLASS_POSITIVE_ZERO; \
3892 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
3893 return FLOAT_CLASS_POSITIVE_SUBNORMAL; \
3895 return FLOAT_CLASS_POSITIVE_NORMAL; \
3900 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \
3901 uint ## bits ## _t arg) \
3903 return float_ ## name(arg, &env->active_fpu.fp_status); \
3906 FLOAT_CLASS(class_s
, 32)
3907 FLOAT_CLASS(class_d
, 64)
3910 /* binary operations */
3911 #define FLOAT_BINOP(name) \
3912 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
3913 uint64_t fdt0, uint64_t fdt1) \
3917 dt2 = float64_ ## name(fdt0, fdt1, &env->active_fpu.fp_status);\
3918 update_fcr31(env, GETPC()); \
3922 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
3923 uint32_t fst0, uint32_t fst1) \
3927 wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status);\
3928 update_fcr31(env, GETPC()); \
3932 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
3936 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3937 uint32_t fsth0 = fdt0 >> 32; \
3938 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3939 uint32_t fsth1 = fdt1 >> 32; \
3943 wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status); \
3944 wth2 = float32_ ## name(fsth0, fsth1, &env->active_fpu.fp_status); \
3945 update_fcr31(env, GETPC()); \
3946 return ((uint64_t)wth2 << 32) | wt2; \
3955 /* MIPS specific binary operations */
3956 uint64_t helper_float_recip2_d(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt2
)
3958 fdt2
= float64_mul(fdt0
, fdt2
, &env
->active_fpu
.fp_status
);
3959 fdt2
= float64_chs(float64_sub(fdt2
, float64_one
,
3960 &env
->active_fpu
.fp_status
));
3961 update_fcr31(env
, GETPC());
3965 uint32_t helper_float_recip2_s(CPUMIPSState
*env
, uint32_t fst0
, uint32_t fst2
)
3967 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
3968 fst2
= float32_chs(float32_sub(fst2
, float32_one
,
3969 &env
->active_fpu
.fp_status
));
3970 update_fcr31(env
, GETPC());
3974 uint64_t helper_float_recip2_ps(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt2
)
3976 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
3977 uint32_t fsth0
= fdt0
>> 32;
3978 uint32_t fst2
= fdt2
& 0XFFFFFFFF;
3979 uint32_t fsth2
= fdt2
>> 32;
3981 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
3982 fsth2
= float32_mul(fsth0
, fsth2
, &env
->active_fpu
.fp_status
);
3983 fst2
= float32_chs(float32_sub(fst2
, float32_one
,
3984 &env
->active_fpu
.fp_status
));
3985 fsth2
= float32_chs(float32_sub(fsth2
, float32_one
,
3986 &env
->active_fpu
.fp_status
));
3987 update_fcr31(env
, GETPC());
3988 return ((uint64_t)fsth2
<< 32) | fst2
;
3991 uint64_t helper_float_rsqrt2_d(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt2
)
3993 fdt2
= float64_mul(fdt0
, fdt2
, &env
->active_fpu
.fp_status
);
3994 fdt2
= float64_sub(fdt2
, float64_one
, &env
->active_fpu
.fp_status
);
3995 fdt2
= float64_chs(float64_div(fdt2
, FLOAT_TWO64
,
3996 &env
->active_fpu
.fp_status
));
3997 update_fcr31(env
, GETPC());
4001 uint32_t helper_float_rsqrt2_s(CPUMIPSState
*env
, uint32_t fst0
, uint32_t fst2
)
4003 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
4004 fst2
= float32_sub(fst2
, float32_one
, &env
->active_fpu
.fp_status
);
4005 fst2
= float32_chs(float32_div(fst2
, FLOAT_TWO32
,
4006 &env
->active_fpu
.fp_status
));
4007 update_fcr31(env
, GETPC());
4011 uint64_t helper_float_rsqrt2_ps(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt2
)
4013 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
4014 uint32_t fsth0
= fdt0
>> 32;
4015 uint32_t fst2
= fdt2
& 0XFFFFFFFF;
4016 uint32_t fsth2
= fdt2
>> 32;
4018 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
4019 fsth2
= float32_mul(fsth0
, fsth2
, &env
->active_fpu
.fp_status
);
4020 fst2
= float32_sub(fst2
, float32_one
, &env
->active_fpu
.fp_status
);
4021 fsth2
= float32_sub(fsth2
, float32_one
, &env
->active_fpu
.fp_status
);
4022 fst2
= float32_chs(float32_div(fst2
, FLOAT_TWO32
,
4023 &env
->active_fpu
.fp_status
));
4024 fsth2
= float32_chs(float32_div(fsth2
, FLOAT_TWO32
,
4025 &env
->active_fpu
.fp_status
));
4026 update_fcr31(env
, GETPC());
4027 return ((uint64_t)fsth2
<< 32) | fst2
;
4030 uint64_t helper_float_addr_ps(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt1
)
4032 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
4033 uint32_t fsth0
= fdt0
>> 32;
4034 uint32_t fst1
= fdt1
& 0XFFFFFFFF;
4035 uint32_t fsth1
= fdt1
>> 32;
4039 fst2
= float32_add(fst0
, fsth0
, &env
->active_fpu
.fp_status
);
4040 fsth2
= float32_add(fst1
, fsth1
, &env
->active_fpu
.fp_status
);
4041 update_fcr31(env
, GETPC());
4042 return ((uint64_t)fsth2
<< 32) | fst2
;
4045 uint64_t helper_float_mulr_ps(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt1
)
4047 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
4048 uint32_t fsth0
= fdt0
>> 32;
4049 uint32_t fst1
= fdt1
& 0XFFFFFFFF;
4050 uint32_t fsth1
= fdt1
>> 32;
4054 fst2
= float32_mul(fst0
, fsth0
, &env
->active_fpu
.fp_status
);
4055 fsth2
= float32_mul(fst1
, fsth1
, &env
->active_fpu
.fp_status
);
4056 update_fcr31(env
, GETPC());
4057 return ((uint64_t)fsth2
<< 32) | fst2
;
4060 #define FLOAT_MINMAX(name, bits, minmaxfunc) \
4061 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \
4062 uint ## bits ## _t fs, \
4063 uint ## bits ## _t ft) \
4065 uint ## bits ## _t fdret; \
4067 fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \
4068 &env->active_fpu.fp_status); \
4069 update_fcr31(env, GETPC()); \
4073 FLOAT_MINMAX(max_s
, 32, maxnum
)
4074 FLOAT_MINMAX(max_d
, 64, maxnum
)
4075 FLOAT_MINMAX(maxa_s
, 32, maxnummag
)
4076 FLOAT_MINMAX(maxa_d
, 64, maxnummag
)
4078 FLOAT_MINMAX(min_s
, 32, minnum
)
4079 FLOAT_MINMAX(min_d
, 64, minnum
)
4080 FLOAT_MINMAX(mina_s
, 32, minnummag
)
4081 FLOAT_MINMAX(mina_d
, 64, minnummag
)
4084 /* ternary operations */
4085 #define UNFUSED_FMA(prefix, a, b, c, flags) \
4087 a = prefix##_mul(a, b, &env->active_fpu.fp_status); \
4088 if ((flags) & float_muladd_negate_c) { \
4089 a = prefix##_sub(a, c, &env->active_fpu.fp_status); \
4091 a = prefix##_add(a, c, &env->active_fpu.fp_status); \
4093 if ((flags) & float_muladd_negate_result) { \
4094 a = prefix##_chs(a); \
4098 /* FMA based operations */
4099 #define FLOAT_FMA(name, type) \
4100 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
4101 uint64_t fdt0, uint64_t fdt1, \
4104 UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \
4105 update_fcr31(env, GETPC()); \
4109 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
4110 uint32_t fst0, uint32_t fst1, \
4113 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
4114 update_fcr31(env, GETPC()); \
4118 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
4119 uint64_t fdt0, uint64_t fdt1, \
4122 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
4123 uint32_t fsth0 = fdt0 >> 32; \
4124 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
4125 uint32_t fsth1 = fdt1 >> 32; \
4126 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
4127 uint32_t fsth2 = fdt2 >> 32; \
4129 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
4130 UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \
4131 update_fcr31(env, GETPC()); \
4132 return ((uint64_t)fsth0 << 32) | fst0; \
4135 FLOAT_FMA(msub
, float_muladd_negate_c
)
4136 FLOAT_FMA(nmadd
, float_muladd_negate_result
)
4137 FLOAT_FMA(nmsub
, float_muladd_negate_result
| float_muladd_negate_c
)
4140 #define FLOAT_FMADDSUB(name, bits, muladd_arg) \
4141 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \
4142 uint ## bits ## _t fs, \
4143 uint ## bits ## _t ft, \
4144 uint ## bits ## _t fd) \
4146 uint ## bits ## _t fdret; \
4148 fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \
4149 &env->active_fpu.fp_status); \
4150 update_fcr31(env, GETPC()); \
4154 FLOAT_FMADDSUB(maddf_s
, 32, 0)
4155 FLOAT_FMADDSUB(maddf_d
, 64, 0)
4156 FLOAT_FMADDSUB(msubf_s
, 32, float_muladd_negate_product
)
4157 FLOAT_FMADDSUB(msubf_d
, 64, float_muladd_negate_product
)
4158 #undef FLOAT_FMADDSUB
4160 /* compare operations */
4161 #define FOP_COND_D(op, cond) \
4162 void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4163 uint64_t fdt1, int cc) \
4167 update_fcr31(env, GETPC()); \
4169 SET_FP_COND(cc, env->active_fpu); \
4171 CLEAR_FP_COND(cc, env->active_fpu); \
4173 void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4174 uint64_t fdt1, int cc) \
4177 fdt0 = float64_abs(fdt0); \
4178 fdt1 = float64_abs(fdt1); \
4180 update_fcr31(env, GETPC()); \
4182 SET_FP_COND(cc, env->active_fpu); \
4184 CLEAR_FP_COND(cc, env->active_fpu); \
4188 * NOTE: the comma operator will make "cond" to eval to false,
4189 * but float64_unordered_quiet() is still called.
4191 FOP_COND_D(f
, (float64_unordered_quiet(fdt1
, fdt0
,
4192 &env
->active_fpu
.fp_status
), 0))
4193 FOP_COND_D(un
, float64_unordered_quiet(fdt1
, fdt0
,
4194 &env
->active_fpu
.fp_status
))
4195 FOP_COND_D(eq
, float64_eq_quiet(fdt0
, fdt1
,
4196 &env
->active_fpu
.fp_status
))
4197 FOP_COND_D(ueq
, float64_unordered_quiet(fdt1
, fdt0
,
4198 &env
->active_fpu
.fp_status
)
4199 || float64_eq_quiet(fdt0
, fdt1
,
4200 &env
->active_fpu
.fp_status
))
4201 FOP_COND_D(olt
, float64_lt_quiet(fdt0
, fdt1
,
4202 &env
->active_fpu
.fp_status
))
4203 FOP_COND_D(ult
, float64_unordered_quiet(fdt1
, fdt0
,
4204 &env
->active_fpu
.fp_status
)
4205 || float64_lt_quiet(fdt0
, fdt1
,
4206 &env
->active_fpu
.fp_status
))
4207 FOP_COND_D(ole
, float64_le_quiet(fdt0
, fdt1
,
4208 &env
->active_fpu
.fp_status
))
4209 FOP_COND_D(ule
, float64_unordered_quiet(fdt1
, fdt0
,
4210 &env
->active_fpu
.fp_status
)
4211 || float64_le_quiet(fdt0
, fdt1
,
4212 &env
->active_fpu
.fp_status
))
4214 * NOTE: the comma operator will make "cond" to eval to false,
4215 * but float64_unordered() is still called.
4217 FOP_COND_D(sf
, (float64_unordered(fdt1
, fdt0
,
4218 &env
->active_fpu
.fp_status
), 0))
4219 FOP_COND_D(ngle
, float64_unordered(fdt1
, fdt0
,
4220 &env
->active_fpu
.fp_status
))
4221 FOP_COND_D(seq
, float64_eq(fdt0
, fdt1
,
4222 &env
->active_fpu
.fp_status
))
4223 FOP_COND_D(ngl
, float64_unordered(fdt1
, fdt0
,
4224 &env
->active_fpu
.fp_status
)
4225 || float64_eq(fdt0
, fdt1
,
4226 &env
->active_fpu
.fp_status
))
4227 FOP_COND_D(lt
, float64_lt(fdt0
, fdt1
,
4228 &env
->active_fpu
.fp_status
))
4229 FOP_COND_D(nge
, float64_unordered(fdt1
, fdt0
,
4230 &env
->active_fpu
.fp_status
)
4231 || float64_lt(fdt0
, fdt1
,
4232 &env
->active_fpu
.fp_status
))
4233 FOP_COND_D(le
, float64_le(fdt0
, fdt1
,
4234 &env
->active_fpu
.fp_status
))
4235 FOP_COND_D(ngt
, float64_unordered(fdt1
, fdt0
,
4236 &env
->active_fpu
.fp_status
)
4237 || float64_le(fdt0
, fdt1
,
4238 &env
->active_fpu
.fp_status
))
4240 #define FOP_COND_S(op, cond) \
4241 void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
4242 uint32_t fst1, int cc) \
4246 update_fcr31(env, GETPC()); \
4248 SET_FP_COND(cc, env->active_fpu); \
4250 CLEAR_FP_COND(cc, env->active_fpu); \
4252 void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
4253 uint32_t fst1, int cc) \
4256 fst0 = float32_abs(fst0); \
4257 fst1 = float32_abs(fst1); \
4259 update_fcr31(env, GETPC()); \
4261 SET_FP_COND(cc, env->active_fpu); \
4263 CLEAR_FP_COND(cc, env->active_fpu); \
4267 * NOTE: the comma operator will make "cond" to eval to false,
4268 * but float32_unordered_quiet() is still called.
4270 FOP_COND_S(f
, (float32_unordered_quiet(fst1
, fst0
,
4271 &env
->active_fpu
.fp_status
), 0))
4272 FOP_COND_S(un
, float32_unordered_quiet(fst1
, fst0
,
4273 &env
->active_fpu
.fp_status
))
4274 FOP_COND_S(eq
, float32_eq_quiet(fst0
, fst1
,
4275 &env
->active_fpu
.fp_status
))
4276 FOP_COND_S(ueq
, float32_unordered_quiet(fst1
, fst0
,
4277 &env
->active_fpu
.fp_status
)
4278 || float32_eq_quiet(fst0
, fst1
,
4279 &env
->active_fpu
.fp_status
))
4280 FOP_COND_S(olt
, float32_lt_quiet(fst0
, fst1
,
4281 &env
->active_fpu
.fp_status
))
4282 FOP_COND_S(ult
, float32_unordered_quiet(fst1
, fst0
,
4283 &env
->active_fpu
.fp_status
)
4284 || float32_lt_quiet(fst0
, fst1
,
4285 &env
->active_fpu
.fp_status
))
4286 FOP_COND_S(ole
, float32_le_quiet(fst0
, fst1
,
4287 &env
->active_fpu
.fp_status
))
4288 FOP_COND_S(ule
, float32_unordered_quiet(fst1
, fst0
,
4289 &env
->active_fpu
.fp_status
)
4290 || float32_le_quiet(fst0
, fst1
,
4291 &env
->active_fpu
.fp_status
))
4293 * NOTE: the comma operator will make "cond" to eval to false,
4294 * but float32_unordered() is still called.
4296 FOP_COND_S(sf
, (float32_unordered(fst1
, fst0
,
4297 &env
->active_fpu
.fp_status
), 0))
4298 FOP_COND_S(ngle
, float32_unordered(fst1
, fst0
,
4299 &env
->active_fpu
.fp_status
))
4300 FOP_COND_S(seq
, float32_eq(fst0
, fst1
,
4301 &env
->active_fpu
.fp_status
))
4302 FOP_COND_S(ngl
, float32_unordered(fst1
, fst0
,
4303 &env
->active_fpu
.fp_status
)
4304 || float32_eq(fst0
, fst1
,
4305 &env
->active_fpu
.fp_status
))
4306 FOP_COND_S(lt
, float32_lt(fst0
, fst1
,
4307 &env
->active_fpu
.fp_status
))
4308 FOP_COND_S(nge
, float32_unordered(fst1
, fst0
,
4309 &env
->active_fpu
.fp_status
)
4310 || float32_lt(fst0
, fst1
,
4311 &env
->active_fpu
.fp_status
))
4312 FOP_COND_S(le
, float32_le(fst0
, fst1
,
4313 &env
->active_fpu
.fp_status
))
4314 FOP_COND_S(ngt
, float32_unordered(fst1
, fst0
,
4315 &env
->active_fpu
.fp_status
)
4316 || float32_le(fst0
, fst1
,
4317 &env
->active_fpu
.fp_status
))
4319 #define FOP_COND_PS(op, condl, condh) \
4320 void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4321 uint64_t fdt1, int cc) \
4323 uint32_t fst0, fsth0, fst1, fsth1; \
4325 fst0 = fdt0 & 0XFFFFFFFF; \
4326 fsth0 = fdt0 >> 32; \
4327 fst1 = fdt1 & 0XFFFFFFFF; \
4328 fsth1 = fdt1 >> 32; \
4331 update_fcr31(env, GETPC()); \
4333 SET_FP_COND(cc, env->active_fpu); \
4335 CLEAR_FP_COND(cc, env->active_fpu); \
4337 SET_FP_COND(cc + 1, env->active_fpu); \
4339 CLEAR_FP_COND(cc + 1, env->active_fpu); \
4341 void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4342 uint64_t fdt1, int cc) \
4344 uint32_t fst0, fsth0, fst1, fsth1; \
4346 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
4347 fsth0 = float32_abs(fdt0 >> 32); \
4348 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
4349 fsth1 = float32_abs(fdt1 >> 32); \
4352 update_fcr31(env, GETPC()); \
4354 SET_FP_COND(cc, env->active_fpu); \
4356 CLEAR_FP_COND(cc, env->active_fpu); \
4358 SET_FP_COND(cc + 1, env->active_fpu); \
4360 CLEAR_FP_COND(cc + 1, env->active_fpu); \
4364 * NOTE: the comma operator will make "cond" to eval to false,
4365 * but float32_unordered_quiet() is still called.
4367 FOP_COND_PS(f
, (float32_unordered_quiet(fst1
, fst0
,
4368 &env
->active_fpu
.fp_status
), 0),
4369 (float32_unordered_quiet(fsth1
, fsth0
,
4370 &env
->active_fpu
.fp_status
), 0))
4371 FOP_COND_PS(un
, float32_unordered_quiet(fst1
, fst0
,
4372 &env
->active_fpu
.fp_status
),
4373 float32_unordered_quiet(fsth1
, fsth0
,
4374 &env
->active_fpu
.fp_status
))
4375 FOP_COND_PS(eq
, float32_eq_quiet(fst0
, fst1
,
4376 &env
->active_fpu
.fp_status
),
4377 float32_eq_quiet(fsth0
, fsth1
,
4378 &env
->active_fpu
.fp_status
))
4379 FOP_COND_PS(ueq
, float32_unordered_quiet(fst1
, fst0
,
4380 &env
->active_fpu
.fp_status
)
4381 || float32_eq_quiet(fst0
, fst1
,
4382 &env
->active_fpu
.fp_status
),
4383 float32_unordered_quiet(fsth1
, fsth0
,
4384 &env
->active_fpu
.fp_status
)
4385 || float32_eq_quiet(fsth0
, fsth1
,
4386 &env
->active_fpu
.fp_status
))
4387 FOP_COND_PS(olt
, float32_lt_quiet(fst0
, fst1
,
4388 &env
->active_fpu
.fp_status
),
4389 float32_lt_quiet(fsth0
, fsth1
,
4390 &env
->active_fpu
.fp_status
))
4391 FOP_COND_PS(ult
, float32_unordered_quiet(fst1
, fst0
,
4392 &env
->active_fpu
.fp_status
)
4393 || float32_lt_quiet(fst0
, fst1
,
4394 &env
->active_fpu
.fp_status
),
4395 float32_unordered_quiet(fsth1
, fsth0
,
4396 &env
->active_fpu
.fp_status
)
4397 || float32_lt_quiet(fsth0
, fsth1
,
4398 &env
->active_fpu
.fp_status
))
4399 FOP_COND_PS(ole
, float32_le_quiet(fst0
, fst1
,
4400 &env
->active_fpu
.fp_status
),
4401 float32_le_quiet(fsth0
, fsth1
,
4402 &env
->active_fpu
.fp_status
))
4403 FOP_COND_PS(ule
, float32_unordered_quiet(fst1
, fst0
,
4404 &env
->active_fpu
.fp_status
)
4405 || float32_le_quiet(fst0
, fst1
,
4406 &env
->active_fpu
.fp_status
),
4407 float32_unordered_quiet(fsth1
, fsth0
,
4408 &env
->active_fpu
.fp_status
)
4409 || float32_le_quiet(fsth0
, fsth1
,
4410 &env
->active_fpu
.fp_status
))
4412 * NOTE: the comma operator will make "cond" to eval to false,
4413 * but float32_unordered() is still called.
4415 FOP_COND_PS(sf
, (float32_unordered(fst1
, fst0
,
4416 &env
->active_fpu
.fp_status
), 0),
4417 (float32_unordered(fsth1
, fsth0
,
4418 &env
->active_fpu
.fp_status
), 0))
4419 FOP_COND_PS(ngle
, float32_unordered(fst1
, fst0
,
4420 &env
->active_fpu
.fp_status
),
4421 float32_unordered(fsth1
, fsth0
,
4422 &env
->active_fpu
.fp_status
))
4423 FOP_COND_PS(seq
, float32_eq(fst0
, fst1
,
4424 &env
->active_fpu
.fp_status
),
4425 float32_eq(fsth0
, fsth1
,
4426 &env
->active_fpu
.fp_status
))
4427 FOP_COND_PS(ngl
, float32_unordered(fst1
, fst0
,
4428 &env
->active_fpu
.fp_status
)
4429 || float32_eq(fst0
, fst1
,
4430 &env
->active_fpu
.fp_status
),
4431 float32_unordered(fsth1
, fsth0
,
4432 &env
->active_fpu
.fp_status
)
4433 || float32_eq(fsth0
, fsth1
,
4434 &env
->active_fpu
.fp_status
))
4435 FOP_COND_PS(lt
, float32_lt(fst0
, fst1
,
4436 &env
->active_fpu
.fp_status
),
4437 float32_lt(fsth0
, fsth1
,
4438 &env
->active_fpu
.fp_status
))
4439 FOP_COND_PS(nge
, float32_unordered(fst1
, fst0
,
4440 &env
->active_fpu
.fp_status
)
4441 || float32_lt(fst0
, fst1
,
4442 &env
->active_fpu
.fp_status
),
4443 float32_unordered(fsth1
, fsth0
,
4444 &env
->active_fpu
.fp_status
)
4445 || float32_lt(fsth0
, fsth1
,
4446 &env
->active_fpu
.fp_status
))
4447 FOP_COND_PS(le
, float32_le(fst0
, fst1
,
4448 &env
->active_fpu
.fp_status
),
4449 float32_le(fsth0
, fsth1
,
4450 &env
->active_fpu
.fp_status
))
4451 FOP_COND_PS(ngt
, float32_unordered(fst1
, fst0
,
4452 &env
->active_fpu
.fp_status
)
4453 || float32_le(fst0
, fst1
,
4454 &env
->active_fpu
.fp_status
),
4455 float32_unordered(fsth1
, fsth0
,
4456 &env
->active_fpu
.fp_status
)
4457 || float32_le(fsth0
, fsth1
,
4458 &env
->active_fpu
.fp_status
))
4460 /* R6 compare operations */
4461 #define FOP_CONDN_D(op, cond) \
4462 uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
4467 update_fcr31(env, GETPC()); \
4476 * NOTE: the comma operator will make "cond" to eval to false,
4477 * but float64_unordered_quiet() is still called.
4479 FOP_CONDN_D(af
, (float64_unordered_quiet(fdt1
, fdt0
,
4480 &env
->active_fpu
.fp_status
), 0))
4481 FOP_CONDN_D(un
, (float64_unordered_quiet(fdt1
, fdt0
,
4482 &env
->active_fpu
.fp_status
)))
4483 FOP_CONDN_D(eq
, (float64_eq_quiet(fdt0
, fdt1
,
4484 &env
->active_fpu
.fp_status
)))
4485 FOP_CONDN_D(ueq
, (float64_unordered_quiet(fdt1
, fdt0
,
4486 &env
->active_fpu
.fp_status
)
4487 || float64_eq_quiet(fdt0
, fdt1
,
4488 &env
->active_fpu
.fp_status
)))
4489 FOP_CONDN_D(lt
, (float64_lt_quiet(fdt0
, fdt1
,
4490 &env
->active_fpu
.fp_status
)))
4491 FOP_CONDN_D(ult
, (float64_unordered_quiet(fdt1
, fdt0
,
4492 &env
->active_fpu
.fp_status
)
4493 || float64_lt_quiet(fdt0
, fdt1
,
4494 &env
->active_fpu
.fp_status
)))
4495 FOP_CONDN_D(le
, (float64_le_quiet(fdt0
, fdt1
,
4496 &env
->active_fpu
.fp_status
)))
4497 FOP_CONDN_D(ule
, (float64_unordered_quiet(fdt1
, fdt0
,
4498 &env
->active_fpu
.fp_status
)
4499 || float64_le_quiet(fdt0
, fdt1
,
4500 &env
->active_fpu
.fp_status
)))
4502 * NOTE: the comma operator will make "cond" to eval to false,
4503 * but float64_unordered() is still called.\
4505 FOP_CONDN_D(saf
, (float64_unordered(fdt1
, fdt0
,
4506 &env
->active_fpu
.fp_status
), 0))
4507 FOP_CONDN_D(sun
, (float64_unordered(fdt1
, fdt0
,
4508 &env
->active_fpu
.fp_status
)))
4509 FOP_CONDN_D(seq
, (float64_eq(fdt0
, fdt1
,
4510 &env
->active_fpu
.fp_status
)))
4511 FOP_CONDN_D(sueq
, (float64_unordered(fdt1
, fdt0
,
4512 &env
->active_fpu
.fp_status
)
4513 || float64_eq(fdt0
, fdt1
,
4514 &env
->active_fpu
.fp_status
)))
4515 FOP_CONDN_D(slt
, (float64_lt(fdt0
, fdt1
,
4516 &env
->active_fpu
.fp_status
)))
4517 FOP_CONDN_D(sult
, (float64_unordered(fdt1
, fdt0
,
4518 &env
->active_fpu
.fp_status
)
4519 || float64_lt(fdt0
, fdt1
,
4520 &env
->active_fpu
.fp_status
)))
4521 FOP_CONDN_D(sle
, (float64_le(fdt0
, fdt1
,
4522 &env
->active_fpu
.fp_status
)))
4523 FOP_CONDN_D(sule
, (float64_unordered(fdt1
, fdt0
,
4524 &env
->active_fpu
.fp_status
)
4525 || float64_le(fdt0
, fdt1
,
4526 &env
->active_fpu
.fp_status
)))
4527 FOP_CONDN_D(or, (float64_le_quiet(fdt1
, fdt0
,
4528 &env
->active_fpu
.fp_status
)
4529 || float64_le_quiet(fdt0
, fdt1
,
4530 &env
->active_fpu
.fp_status
)))
4531 FOP_CONDN_D(une
, (float64_unordered_quiet(fdt1
, fdt0
,
4532 &env
->active_fpu
.fp_status
)
4533 || float64_lt_quiet(fdt1
, fdt0
,
4534 &env
->active_fpu
.fp_status
)
4535 || float64_lt_quiet(fdt0
, fdt1
,
4536 &env
->active_fpu
.fp_status
)))
4537 FOP_CONDN_D(ne
, (float64_lt_quiet(fdt1
, fdt0
,
4538 &env
->active_fpu
.fp_status
)
4539 || float64_lt_quiet(fdt0
, fdt1
,
4540 &env
->active_fpu
.fp_status
)))
4541 FOP_CONDN_D(sor
, (float64_le(fdt1
, fdt0
,
4542 &env
->active_fpu
.fp_status
)
4543 || float64_le(fdt0
, fdt1
,
4544 &env
->active_fpu
.fp_status
)))
4545 FOP_CONDN_D(sune
, (float64_unordered(fdt1
, fdt0
,
4546 &env
->active_fpu
.fp_status
)
4547 || float64_lt(fdt1
, fdt0
,
4548 &env
->active_fpu
.fp_status
)
4549 || float64_lt(fdt0
, fdt1
,
4550 &env
->active_fpu
.fp_status
)))
4551 FOP_CONDN_D(sne
, (float64_lt(fdt1
, fdt0
,
4552 &env
->active_fpu
.fp_status
)
4553 || float64_lt(fdt0
, fdt1
,
4554 &env
->active_fpu
.fp_status
)))
4556 #define FOP_CONDN_S(op, cond) \
4557 uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
4562 update_fcr31(env, GETPC()); \
4571 * NOTE: the comma operator will make "cond" to eval to false,
4572 * but float32_unordered_quiet() is still called.
4574 FOP_CONDN_S(af
, (float32_unordered_quiet(fst1
, fst0
,
4575 &env
->active_fpu
.fp_status
), 0))
4576 FOP_CONDN_S(un
, (float32_unordered_quiet(fst1
, fst0
,
4577 &env
->active_fpu
.fp_status
)))
4578 FOP_CONDN_S(eq
, (float32_eq_quiet(fst0
, fst1
,
4579 &env
->active_fpu
.fp_status
)))
4580 FOP_CONDN_S(ueq
, (float32_unordered_quiet(fst1
, fst0
,
4581 &env
->active_fpu
.fp_status
)
4582 || float32_eq_quiet(fst0
, fst1
,
4583 &env
->active_fpu
.fp_status
)))
4584 FOP_CONDN_S(lt
, (float32_lt_quiet(fst0
, fst1
,
4585 &env
->active_fpu
.fp_status
)))
4586 FOP_CONDN_S(ult
, (float32_unordered_quiet(fst1
, fst0
,
4587 &env
->active_fpu
.fp_status
)
4588 || float32_lt_quiet(fst0
, fst1
,
4589 &env
->active_fpu
.fp_status
)))
4590 FOP_CONDN_S(le
, (float32_le_quiet(fst0
, fst1
,
4591 &env
->active_fpu
.fp_status
)))
4592 FOP_CONDN_S(ule
, (float32_unordered_quiet(fst1
, fst0
,
4593 &env
->active_fpu
.fp_status
)
4594 || float32_le_quiet(fst0
, fst1
,
4595 &env
->active_fpu
.fp_status
)))
4597 * NOTE: the comma operator will make "cond" to eval to false,
4598 * but float32_unordered() is still called.
4600 FOP_CONDN_S(saf
, (float32_unordered(fst1
, fst0
,
4601 &env
->active_fpu
.fp_status
), 0))
4602 FOP_CONDN_S(sun
, (float32_unordered(fst1
, fst0
,
4603 &env
->active_fpu
.fp_status
)))
4604 FOP_CONDN_S(seq
, (float32_eq(fst0
, fst1
,
4605 &env
->active_fpu
.fp_status
)))
4606 FOP_CONDN_S(sueq
, (float32_unordered(fst1
, fst0
,
4607 &env
->active_fpu
.fp_status
)
4608 || float32_eq(fst0
, fst1
,
4609 &env
->active_fpu
.fp_status
)))
4610 FOP_CONDN_S(slt
, (float32_lt(fst0
, fst1
,
4611 &env
->active_fpu
.fp_status
)))
4612 FOP_CONDN_S(sult
, (float32_unordered(fst1
, fst0
,
4613 &env
->active_fpu
.fp_status
)
4614 || float32_lt(fst0
, fst1
,
4615 &env
->active_fpu
.fp_status
)))
4616 FOP_CONDN_S(sle
, (float32_le(fst0
, fst1
,
4617 &env
->active_fpu
.fp_status
)))
4618 FOP_CONDN_S(sule
, (float32_unordered(fst1
, fst0
,
4619 &env
->active_fpu
.fp_status
)
4620 || float32_le(fst0
, fst1
,
4621 &env
->active_fpu
.fp_status
)))
4622 FOP_CONDN_S(or, (float32_le_quiet(fst1
, fst0
,
4623 &env
->active_fpu
.fp_status
)
4624 || float32_le_quiet(fst0
, fst1
,
4625 &env
->active_fpu
.fp_status
)))
4626 FOP_CONDN_S(une
, (float32_unordered_quiet(fst1
, fst0
,
4627 &env
->active_fpu
.fp_status
)
4628 || float32_lt_quiet(fst1
, fst0
,
4629 &env
->active_fpu
.fp_status
)
4630 || float32_lt_quiet(fst0
, fst1
,
4631 &env
->active_fpu
.fp_status
)))
4632 FOP_CONDN_S(ne
, (float32_lt_quiet(fst1
, fst0
,
4633 &env
->active_fpu
.fp_status
)
4634 || float32_lt_quiet(fst0
, fst1
,
4635 &env
->active_fpu
.fp_status
)))
4636 FOP_CONDN_S(sor
, (float32_le(fst1
, fst0
,
4637 &env
->active_fpu
.fp_status
)
4638 || float32_le(fst0
, fst1
,
4639 &env
->active_fpu
.fp_status
)))
4640 FOP_CONDN_S(sune
, (float32_unordered(fst1
, fst0
,
4641 &env
->active_fpu
.fp_status
)
4642 || float32_lt(fst1
, fst0
,
4643 &env
->active_fpu
.fp_status
)
4644 || float32_lt(fst0
, fst1
,
4645 &env
->active_fpu
.fp_status
)))
4646 FOP_CONDN_S(sne
, (float32_lt(fst1
, fst0
,
4647 &env
->active_fpu
.fp_status
)
4648 || float32_lt(fst0
, fst1
,
4649 &env
->active_fpu
.fp_status
)))
4652 /* Data format min and max values */
4653 #define DF_BITS(df) (1 << ((df) + 3))
4655 /* Element-by-element access macros */
4656 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
4658 #if !defined(CONFIG_USER_ONLY)
4659 #define MEMOP_IDX(DF) \
4660 TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
4661 cpu_mmu_index(env, false));
4663 #define MEMOP_IDX(DF)
4666 void helper_msa_ld_b(CPUMIPSState
*env
, uint32_t wd
,
4669 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
4671 #if !defined(CONFIG_USER_ONLY)
4672 #if !defined(HOST_WORDS_BIGENDIAN)
4673 pwd
->b
[0] = helper_ret_ldub_mmu(env
, addr
+ (0 << DF_BYTE
), oi
, GETPC());
4674 pwd
->b
[1] = helper_ret_ldub_mmu(env
, addr
+ (1 << DF_BYTE
), oi
, GETPC());
4675 pwd
->b
[2] = helper_ret_ldub_mmu(env
, addr
+ (2 << DF_BYTE
), oi
, GETPC());
4676 pwd
->b
[3] = helper_ret_ldub_mmu(env
, addr
+ (3 << DF_BYTE
), oi
, GETPC());
4677 pwd
->b
[4] = helper_ret_ldub_mmu(env
, addr
+ (4 << DF_BYTE
), oi
, GETPC());
4678 pwd
->b
[5] = helper_ret_ldub_mmu(env
, addr
+ (5 << DF_BYTE
), oi
, GETPC());
4679 pwd
->b
[6] = helper_ret_ldub_mmu(env
, addr
+ (6 << DF_BYTE
), oi
, GETPC());
4680 pwd
->b
[7] = helper_ret_ldub_mmu(env
, addr
+ (7 << DF_BYTE
), oi
, GETPC());
4681 pwd
->b
[8] = helper_ret_ldub_mmu(env
, addr
+ (8 << DF_BYTE
), oi
, GETPC());
4682 pwd
->b
[9] = helper_ret_ldub_mmu(env
, addr
+ (9 << DF_BYTE
), oi
, GETPC());
4683 pwd
->b
[10] = helper_ret_ldub_mmu(env
, addr
+ (10 << DF_BYTE
), oi
, GETPC());
4684 pwd
->b
[11] = helper_ret_ldub_mmu(env
, addr
+ (11 << DF_BYTE
), oi
, GETPC());
4685 pwd
->b
[12] = helper_ret_ldub_mmu(env
, addr
+ (12 << DF_BYTE
), oi
, GETPC());
4686 pwd
->b
[13] = helper_ret_ldub_mmu(env
, addr
+ (13 << DF_BYTE
), oi
, GETPC());
4687 pwd
->b
[14] = helper_ret_ldub_mmu(env
, addr
+ (14 << DF_BYTE
), oi
, GETPC());
4688 pwd
->b
[15] = helper_ret_ldub_mmu(env
, addr
+ (15 << DF_BYTE
), oi
, GETPC());
4690 pwd
->b
[0] = helper_ret_ldub_mmu(env
, addr
+ (7 << DF_BYTE
), oi
, GETPC());
4691 pwd
->b
[1] = helper_ret_ldub_mmu(env
, addr
+ (6 << DF_BYTE
), oi
, GETPC());
4692 pwd
->b
[2] = helper_ret_ldub_mmu(env
, addr
+ (5 << DF_BYTE
), oi
, GETPC());
4693 pwd
->b
[3] = helper_ret_ldub_mmu(env
, addr
+ (4 << DF_BYTE
), oi
, GETPC());
4694 pwd
->b
[4] = helper_ret_ldub_mmu(env
, addr
+ (3 << DF_BYTE
), oi
, GETPC());
4695 pwd
->b
[5] = helper_ret_ldub_mmu(env
, addr
+ (2 << DF_BYTE
), oi
, GETPC());
4696 pwd
->b
[6] = helper_ret_ldub_mmu(env
, addr
+ (1 << DF_BYTE
), oi
, GETPC());
4697 pwd
->b
[7] = helper_ret_ldub_mmu(env
, addr
+ (0 << DF_BYTE
), oi
, GETPC());
4698 pwd
->b
[8] = helper_ret_ldub_mmu(env
, addr
+ (15 << DF_BYTE
), oi
, GETPC());
4699 pwd
->b
[9] = helper_ret_ldub_mmu(env
, addr
+ (14 << DF_BYTE
), oi
, GETPC());
4700 pwd
->b
[10] = helper_ret_ldub_mmu(env
, addr
+ (13 << DF_BYTE
), oi
, GETPC());
4701 pwd
->b
[11] = helper_ret_ldub_mmu(env
, addr
+ (12 << DF_BYTE
), oi
, GETPC());
4702 pwd
->b
[12] = helper_ret_ldub_mmu(env
, addr
+ (11 << DF_BYTE
), oi
, GETPC());
4703 pwd
->b
[13] = helper_ret_ldub_mmu(env
, addr
+ (10 << DF_BYTE
), oi
, GETPC());
4704 pwd
->b
[14] = helper_ret_ldub_mmu(env
, addr
+ (9 << DF_BYTE
), oi
, GETPC());
4705 pwd
->b
[15] = helper_ret_ldub_mmu(env
, addr
+ (8 << DF_BYTE
), oi
, GETPC());
4708 #if !defined(HOST_WORDS_BIGENDIAN)
4709 pwd
->b
[0] = cpu_ldub_data(env
, addr
+ (0 << DF_BYTE
));
4710 pwd
->b
[1] = cpu_ldub_data(env
, addr
+ (1 << DF_BYTE
));
4711 pwd
->b
[2] = cpu_ldub_data(env
, addr
+ (2 << DF_BYTE
));
4712 pwd
->b
[3] = cpu_ldub_data(env
, addr
+ (3 << DF_BYTE
));
4713 pwd
->b
[4] = cpu_ldub_data(env
, addr
+ (4 << DF_BYTE
));
4714 pwd
->b
[5] = cpu_ldub_data(env
, addr
+ (5 << DF_BYTE
));
4715 pwd
->b
[6] = cpu_ldub_data(env
, addr
+ (6 << DF_BYTE
));
4716 pwd
->b
[7] = cpu_ldub_data(env
, addr
+ (7 << DF_BYTE
));
4717 pwd
->b
[8] = cpu_ldub_data(env
, addr
+ (8 << DF_BYTE
));
4718 pwd
->b
[9] = cpu_ldub_data(env
, addr
+ (9 << DF_BYTE
));
4719 pwd
->b
[10] = cpu_ldub_data(env
, addr
+ (10 << DF_BYTE
));
4720 pwd
->b
[11] = cpu_ldub_data(env
, addr
+ (11 << DF_BYTE
));
4721 pwd
->b
[12] = cpu_ldub_data(env
, addr
+ (12 << DF_BYTE
));
4722 pwd
->b
[13] = cpu_ldub_data(env
, addr
+ (13 << DF_BYTE
));
4723 pwd
->b
[14] = cpu_ldub_data(env
, addr
+ (14 << DF_BYTE
));
4724 pwd
->b
[15] = cpu_ldub_data(env
, addr
+ (15 << DF_BYTE
));
4726 pwd
->b
[0] = cpu_ldub_data(env
, addr
+ (7 << DF_BYTE
));
4727 pwd
->b
[1] = cpu_ldub_data(env
, addr
+ (6 << DF_BYTE
));
4728 pwd
->b
[2] = cpu_ldub_data(env
, addr
+ (5 << DF_BYTE
));
4729 pwd
->b
[3] = cpu_ldub_data(env
, addr
+ (4 << DF_BYTE
));
4730 pwd
->b
[4] = cpu_ldub_data(env
, addr
+ (3 << DF_BYTE
));
4731 pwd
->b
[5] = cpu_ldub_data(env
, addr
+ (2 << DF_BYTE
));
4732 pwd
->b
[6] = cpu_ldub_data(env
, addr
+ (1 << DF_BYTE
));
4733 pwd
->b
[7] = cpu_ldub_data(env
, addr
+ (0 << DF_BYTE
));
4734 pwd
->b
[8] = cpu_ldub_data(env
, addr
+ (15 << DF_BYTE
));
4735 pwd
->b
[9] = cpu_ldub_data(env
, addr
+ (14 << DF_BYTE
));
4736 pwd
->b
[10] = cpu_ldub_data(env
, addr
+ (13 << DF_BYTE
));
4737 pwd
->b
[11] = cpu_ldub_data(env
, addr
+ (12 << DF_BYTE
));
4738 pwd
->b
[12] = cpu_ldub_data(env
, addr
+ (11 << DF_BYTE
));
4739 pwd
->b
[13] = cpu_ldub_data(env
, addr
+ (10 << DF_BYTE
));
4740 pwd
->b
[14] = cpu_ldub_data(env
, addr
+ (9 << DF_BYTE
));
4741 pwd
->b
[15] = cpu_ldub_data(env
, addr
+ (8 << DF_BYTE
));
4746 void helper_msa_ld_h(CPUMIPSState
*env
, uint32_t wd
,
4749 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
4751 #if !defined(CONFIG_USER_ONLY)
4752 #if !defined(HOST_WORDS_BIGENDIAN)
4753 pwd
->h
[0] = helper_ret_lduw_mmu(env
, addr
+ (0 << DF_HALF
), oi
, GETPC());
4754 pwd
->h
[1] = helper_ret_lduw_mmu(env
, addr
+ (1 << DF_HALF
), oi
, GETPC());
4755 pwd
->h
[2] = helper_ret_lduw_mmu(env
, addr
+ (2 << DF_HALF
), oi
, GETPC());
4756 pwd
->h
[3] = helper_ret_lduw_mmu(env
, addr
+ (3 << DF_HALF
), oi
, GETPC());
4757 pwd
->h
[4] = helper_ret_lduw_mmu(env
, addr
+ (4 << DF_HALF
), oi
, GETPC());
4758 pwd
->h
[5] = helper_ret_lduw_mmu(env
, addr
+ (5 << DF_HALF
), oi
, GETPC());
4759 pwd
->h
[6] = helper_ret_lduw_mmu(env
, addr
+ (6 << DF_HALF
), oi
, GETPC());
4760 pwd
->h
[7] = helper_ret_lduw_mmu(env
, addr
+ (7 << DF_HALF
), oi
, GETPC());
4762 pwd
->h
[0] = helper_ret_lduw_mmu(env
, addr
+ (3 << DF_HALF
), oi
, GETPC());
4763 pwd
->h
[1] = helper_ret_lduw_mmu(env
, addr
+ (2 << DF_HALF
), oi
, GETPC());
4764 pwd
->h
[2] = helper_ret_lduw_mmu(env
, addr
+ (1 << DF_HALF
), oi
, GETPC());
4765 pwd
->h
[3] = helper_ret_lduw_mmu(env
, addr
+ (0 << DF_HALF
), oi
, GETPC());
4766 pwd
->h
[4] = helper_ret_lduw_mmu(env
, addr
+ (7 << DF_HALF
), oi
, GETPC());
4767 pwd
->h
[5] = helper_ret_lduw_mmu(env
, addr
+ (6 << DF_HALF
), oi
, GETPC());
4768 pwd
->h
[6] = helper_ret_lduw_mmu(env
, addr
+ (5 << DF_HALF
), oi
, GETPC());
4769 pwd
->h
[7] = helper_ret_lduw_mmu(env
, addr
+ (4 << DF_HALF
), oi
, GETPC());
4772 #if !defined(HOST_WORDS_BIGENDIAN)
4773 pwd
->h
[0] = cpu_lduw_data(env
, addr
+ (0 << DF_HALF
));
4774 pwd
->h
[1] = cpu_lduw_data(env
, addr
+ (1 << DF_HALF
));
4775 pwd
->h
[2] = cpu_lduw_data(env
, addr
+ (2 << DF_HALF
));
4776 pwd
->h
[3] = cpu_lduw_data(env
, addr
+ (3 << DF_HALF
));
4777 pwd
->h
[4] = cpu_lduw_data(env
, addr
+ (4 << DF_HALF
));
4778 pwd
->h
[5] = cpu_lduw_data(env
, addr
+ (5 << DF_HALF
));
4779 pwd
->h
[6] = cpu_lduw_data(env
, addr
+ (6 << DF_HALF
));
4780 pwd
->h
[7] = cpu_lduw_data(env
, addr
+ (7 << DF_HALF
));
4782 pwd
->h
[0] = cpu_lduw_data(env
, addr
+ (3 << DF_HALF
));
4783 pwd
->h
[1] = cpu_lduw_data(env
, addr
+ (2 << DF_HALF
));
4784 pwd
->h
[2] = cpu_lduw_data(env
, addr
+ (1 << DF_HALF
));
4785 pwd
->h
[3] = cpu_lduw_data(env
, addr
+ (0 << DF_HALF
));
4786 pwd
->h
[4] = cpu_lduw_data(env
, addr
+ (7 << DF_HALF
));
4787 pwd
->h
[5] = cpu_lduw_data(env
, addr
+ (6 << DF_HALF
));
4788 pwd
->h
[6] = cpu_lduw_data(env
, addr
+ (5 << DF_HALF
));
4789 pwd
->h
[7] = cpu_lduw_data(env
, addr
+ (4 << DF_HALF
));
4794 void helper_msa_ld_w(CPUMIPSState
*env
, uint32_t wd
,
4797 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
4799 #if !defined(CONFIG_USER_ONLY)
4800 #if !defined(HOST_WORDS_BIGENDIAN)
4801 pwd
->w
[0] = helper_ret_ldul_mmu(env
, addr
+ (0 << DF_WORD
), oi
, GETPC());
4802 pwd
->w
[1] = helper_ret_ldul_mmu(env
, addr
+ (1 << DF_WORD
), oi
, GETPC());
4803 pwd
->w
[2] = helper_ret_ldul_mmu(env
, addr
+ (2 << DF_WORD
), oi
, GETPC());
4804 pwd
->w
[3] = helper_ret_ldul_mmu(env
, addr
+ (3 << DF_WORD
), oi
, GETPC());
4806 pwd
->w
[0] = helper_ret_ldul_mmu(env
, addr
+ (1 << DF_WORD
), oi
, GETPC());
4807 pwd
->w
[1] = helper_ret_ldul_mmu(env
, addr
+ (0 << DF_WORD
), oi
, GETPC());
4808 pwd
->w
[2] = helper_ret_ldul_mmu(env
, addr
+ (3 << DF_WORD
), oi
, GETPC());
4809 pwd
->w
[3] = helper_ret_ldul_mmu(env
, addr
+ (2 << DF_WORD
), oi
, GETPC());
4812 #if !defined(HOST_WORDS_BIGENDIAN)
4813 pwd
->w
[0] = cpu_ldl_data(env
, addr
+ (0 << DF_WORD
));
4814 pwd
->w
[1] = cpu_ldl_data(env
, addr
+ (1 << DF_WORD
));
4815 pwd
->w
[2] = cpu_ldl_data(env
, addr
+ (2 << DF_WORD
));
4816 pwd
->w
[3] = cpu_ldl_data(env
, addr
+ (3 << DF_WORD
));
4818 pwd
->w
[0] = cpu_ldl_data(env
, addr
+ (1 << DF_WORD
));
4819 pwd
->w
[1] = cpu_ldl_data(env
, addr
+ (0 << DF_WORD
));
4820 pwd
->w
[2] = cpu_ldl_data(env
, addr
+ (3 << DF_WORD
));
4821 pwd
->w
[3] = cpu_ldl_data(env
, addr
+ (2 << DF_WORD
));
4826 void helper_msa_ld_d(CPUMIPSState
*env
, uint32_t wd
,
4829 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
4830 MEMOP_IDX(DF_DOUBLE
)
4831 #if !defined(CONFIG_USER_ONLY)
4832 pwd
->d
[0] = helper_ret_ldq_mmu(env
, addr
+ (0 << DF_DOUBLE
), oi
, GETPC());
4833 pwd
->d
[1] = helper_ret_ldq_mmu(env
, addr
+ (1 << DF_DOUBLE
), oi
, GETPC());
4835 pwd
->d
[0] = cpu_ldq_data(env
, addr
+ (0 << DF_DOUBLE
));
4836 pwd
->d
[1] = cpu_ldq_data(env
, addr
+ (1 << DF_DOUBLE
));
4840 #define MSA_PAGESPAN(x) \
4841 ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN / 8 - 1) >= TARGET_PAGE_SIZE)
4843 static inline void ensure_writable_pages(CPUMIPSState
*env
,
4848 /* FIXME: Probe the actual accesses (pass and use a size) */
4849 if (unlikely(MSA_PAGESPAN(addr
))) {
4851 probe_write(env
, addr
, 0, mmu_idx
, retaddr
);
4853 addr
= (addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4854 probe_write(env
, addr
, 0, mmu_idx
, retaddr
);
4858 void helper_msa_st_b(CPUMIPSState
*env
, uint32_t wd
,
4861 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
4862 int mmu_idx
= cpu_mmu_index(env
, false);
4865 ensure_writable_pages(env
, addr
, mmu_idx
, GETPC());
4866 #if !defined(CONFIG_USER_ONLY)
4867 #if !defined(HOST_WORDS_BIGENDIAN)
4868 helper_ret_stb_mmu(env
, addr
+ (0 << DF_BYTE
), pwd
->b
[0], oi
, GETPC());
4869 helper_ret_stb_mmu(env
, addr
+ (1 << DF_BYTE
), pwd
->b
[1], oi
, GETPC());
4870 helper_ret_stb_mmu(env
, addr
+ (2 << DF_BYTE
), pwd
->b
[2], oi
, GETPC());
4871 helper_ret_stb_mmu(env
, addr
+ (3 << DF_BYTE
), pwd
->b
[3], oi
, GETPC());
4872 helper_ret_stb_mmu(env
, addr
+ (4 << DF_BYTE
), pwd
->b
[4], oi
, GETPC());
4873 helper_ret_stb_mmu(env
, addr
+ (5 << DF_BYTE
), pwd
->b
[5], oi
, GETPC());
4874 helper_ret_stb_mmu(env
, addr
+ (6 << DF_BYTE
), pwd
->b
[6], oi
, GETPC());
4875 helper_ret_stb_mmu(env
, addr
+ (7 << DF_BYTE
), pwd
->b
[7], oi
, GETPC());
4876 helper_ret_stb_mmu(env
, addr
+ (8 << DF_BYTE
), pwd
->b
[8], oi
, GETPC());
4877 helper_ret_stb_mmu(env
, addr
+ (9 << DF_BYTE
), pwd
->b
[9], oi
, GETPC());
4878 helper_ret_stb_mmu(env
, addr
+ (10 << DF_BYTE
), pwd
->b
[10], oi
, GETPC());
4879 helper_ret_stb_mmu(env
, addr
+ (11 << DF_BYTE
), pwd
->b
[11], oi
, GETPC());
4880 helper_ret_stb_mmu(env
, addr
+ (12 << DF_BYTE
), pwd
->b
[12], oi
, GETPC());
4881 helper_ret_stb_mmu(env
, addr
+ (13 << DF_BYTE
), pwd
->b
[13], oi
, GETPC());
4882 helper_ret_stb_mmu(env
, addr
+ (14 << DF_BYTE
), pwd
->b
[14], oi
, GETPC());
4883 helper_ret_stb_mmu(env
, addr
+ (15 << DF_BYTE
), pwd
->b
[15], oi
, GETPC());
4885 helper_ret_stb_mmu(env
, addr
+ (7 << DF_BYTE
), pwd
->b
[0], oi
, GETPC());
4886 helper_ret_stb_mmu(env
, addr
+ (6 << DF_BYTE
), pwd
->b
[1], oi
, GETPC());
4887 helper_ret_stb_mmu(env
, addr
+ (5 << DF_BYTE
), pwd
->b
[2], oi
, GETPC());
4888 helper_ret_stb_mmu(env
, addr
+ (4 << DF_BYTE
), pwd
->b
[3], oi
, GETPC());
4889 helper_ret_stb_mmu(env
, addr
+ (3 << DF_BYTE
), pwd
->b
[4], oi
, GETPC());
4890 helper_ret_stb_mmu(env
, addr
+ (2 << DF_BYTE
), pwd
->b
[5], oi
, GETPC());
4891 helper_ret_stb_mmu(env
, addr
+ (1 << DF_BYTE
), pwd
->b
[6], oi
, GETPC());
4892 helper_ret_stb_mmu(env
, addr
+ (0 << DF_BYTE
), pwd
->b
[7], oi
, GETPC());
4893 helper_ret_stb_mmu(env
, addr
+ (15 << DF_BYTE
), pwd
->b
[8], oi
, GETPC());
4894 helper_ret_stb_mmu(env
, addr
+ (14 << DF_BYTE
), pwd
->b
[9], oi
, GETPC());
4895 helper_ret_stb_mmu(env
, addr
+ (13 << DF_BYTE
), pwd
->b
[10], oi
, GETPC());
4896 helper_ret_stb_mmu(env
, addr
+ (12 << DF_BYTE
), pwd
->b
[11], oi
, GETPC());
4897 helper_ret_stb_mmu(env
, addr
+ (11 << DF_BYTE
), pwd
->b
[12], oi
, GETPC());
4898 helper_ret_stb_mmu(env
, addr
+ (10 << DF_BYTE
), pwd
->b
[13], oi
, GETPC());
4899 helper_ret_stb_mmu(env
, addr
+ (9 << DF_BYTE
), pwd
->b
[14], oi
, GETPC());
4900 helper_ret_stb_mmu(env
, addr
+ (8 << DF_BYTE
), pwd
->b
[15], oi
, GETPC());
4903 #if !defined(HOST_WORDS_BIGENDIAN)
4904 cpu_stb_data(env
, addr
+ (0 << DF_BYTE
), pwd
->b
[0]);
4905 cpu_stb_data(env
, addr
+ (1 << DF_BYTE
), pwd
->b
[1]);
4906 cpu_stb_data(env
, addr
+ (2 << DF_BYTE
), pwd
->b
[2]);
4907 cpu_stb_data(env
, addr
+ (3 << DF_BYTE
), pwd
->b
[3]);
4908 cpu_stb_data(env
, addr
+ (4 << DF_BYTE
), pwd
->b
[4]);
4909 cpu_stb_data(env
, addr
+ (5 << DF_BYTE
), pwd
->b
[5]);
4910 cpu_stb_data(env
, addr
+ (6 << DF_BYTE
), pwd
->b
[6]);
4911 cpu_stb_data(env
, addr
+ (7 << DF_BYTE
), pwd
->b
[7]);
4912 cpu_stb_data(env
, addr
+ (8 << DF_BYTE
), pwd
->b
[8]);
4913 cpu_stb_data(env
, addr
+ (9 << DF_BYTE
), pwd
->b
[9]);
4914 cpu_stb_data(env
, addr
+ (10 << DF_BYTE
), pwd
->b
[10]);
4915 cpu_stb_data(env
, addr
+ (11 << DF_BYTE
), pwd
->b
[11]);
4916 cpu_stb_data(env
, addr
+ (12 << DF_BYTE
), pwd
->b
[12]);
4917 cpu_stb_data(env
, addr
+ (13 << DF_BYTE
), pwd
->b
[13]);
4918 cpu_stb_data(env
, addr
+ (14 << DF_BYTE
), pwd
->b
[14]);
4919 cpu_stb_data(env
, addr
+ (15 << DF_BYTE
), pwd
->b
[15]);
4921 cpu_stb_data(env
, addr
+ (7 << DF_BYTE
), pwd
->b
[0]);
4922 cpu_stb_data(env
, addr
+ (6 << DF_BYTE
), pwd
->b
[1]);
4923 cpu_stb_data(env
, addr
+ (5 << DF_BYTE
), pwd
->b
[2]);
4924 cpu_stb_data(env
, addr
+ (4 << DF_BYTE
), pwd
->b
[3]);
4925 cpu_stb_data(env
, addr
+ (3 << DF_BYTE
), pwd
->b
[4]);
4926 cpu_stb_data(env
, addr
+ (2 << DF_BYTE
), pwd
->b
[5]);
4927 cpu_stb_data(env
, addr
+ (1 << DF_BYTE
), pwd
->b
[6]);
4928 cpu_stb_data(env
, addr
+ (0 << DF_BYTE
), pwd
->b
[7]);
4929 cpu_stb_data(env
, addr
+ (15 << DF_BYTE
), pwd
->b
[8]);
4930 cpu_stb_data(env
, addr
+ (14 << DF_BYTE
), pwd
->b
[9]);
4931 cpu_stb_data(env
, addr
+ (13 << DF_BYTE
), pwd
->b
[10]);
4932 cpu_stb_data(env
, addr
+ (12 << DF_BYTE
), pwd
->b
[11]);
4933 cpu_stb_data(env
, addr
+ (11 << DF_BYTE
), pwd
->b
[12]);
4934 cpu_stb_data(env
, addr
+ (10 << DF_BYTE
), pwd
->b
[13]);
4935 cpu_stb_data(env
, addr
+ (9 << DF_BYTE
), pwd
->b
[14]);
4936 cpu_stb_data(env
, addr
+ (8 << DF_BYTE
), pwd
->b
[15]);
4941 void helper_msa_st_h(CPUMIPSState
*env
, uint32_t wd
,
4944 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
4945 int mmu_idx
= cpu_mmu_index(env
, false);
4948 ensure_writable_pages(env
, addr
, mmu_idx
, GETPC());
4949 #if !defined(CONFIG_USER_ONLY)
4950 #if !defined(HOST_WORDS_BIGENDIAN)
4951 helper_ret_stw_mmu(env
, addr
+ (0 << DF_HALF
), pwd
->h
[0], oi
, GETPC());
4952 helper_ret_stw_mmu(env
, addr
+ (1 << DF_HALF
), pwd
->h
[1], oi
, GETPC());
4953 helper_ret_stw_mmu(env
, addr
+ (2 << DF_HALF
), pwd
->h
[2], oi
, GETPC());
4954 helper_ret_stw_mmu(env
, addr
+ (3 << DF_HALF
), pwd
->h
[3], oi
, GETPC());
4955 helper_ret_stw_mmu(env
, addr
+ (4 << DF_HALF
), pwd
->h
[4], oi
, GETPC());
4956 helper_ret_stw_mmu(env
, addr
+ (5 << DF_HALF
), pwd
->h
[5], oi
, GETPC());
4957 helper_ret_stw_mmu(env
, addr
+ (6 << DF_HALF
), pwd
->h
[6], oi
, GETPC());
4958 helper_ret_stw_mmu(env
, addr
+ (7 << DF_HALF
), pwd
->h
[7], oi
, GETPC());
4960 helper_ret_stw_mmu(env
, addr
+ (3 << DF_HALF
), pwd
->h
[0], oi
, GETPC());
4961 helper_ret_stw_mmu(env
, addr
+ (2 << DF_HALF
), pwd
->h
[1], oi
, GETPC());
4962 helper_ret_stw_mmu(env
, addr
+ (1 << DF_HALF
), pwd
->h
[2], oi
, GETPC());
4963 helper_ret_stw_mmu(env
, addr
+ (0 << DF_HALF
), pwd
->h
[3], oi
, GETPC());
4964 helper_ret_stw_mmu(env
, addr
+ (7 << DF_HALF
), pwd
->h
[4], oi
, GETPC());
4965 helper_ret_stw_mmu(env
, addr
+ (6 << DF_HALF
), pwd
->h
[5], oi
, GETPC());
4966 helper_ret_stw_mmu(env
, addr
+ (5 << DF_HALF
), pwd
->h
[6], oi
, GETPC());
4967 helper_ret_stw_mmu(env
, addr
+ (4 << DF_HALF
), pwd
->h
[7], oi
, GETPC());
4970 #if !defined(HOST_WORDS_BIGENDIAN)
4971 cpu_stw_data(env
, addr
+ (0 << DF_HALF
), pwd
->h
[0]);
4972 cpu_stw_data(env
, addr
+ (1 << DF_HALF
), pwd
->h
[1]);
4973 cpu_stw_data(env
, addr
+ (2 << DF_HALF
), pwd
->h
[2]);
4974 cpu_stw_data(env
, addr
+ (3 << DF_HALF
), pwd
->h
[3]);
4975 cpu_stw_data(env
, addr
+ (4 << DF_HALF
), pwd
->h
[4]);
4976 cpu_stw_data(env
, addr
+ (5 << DF_HALF
), pwd
->h
[5]);
4977 cpu_stw_data(env
, addr
+ (6 << DF_HALF
), pwd
->h
[6]);
4978 cpu_stw_data(env
, addr
+ (7 << DF_HALF
), pwd
->h
[7]);
4980 cpu_stw_data(env
, addr
+ (3 << DF_HALF
), pwd
->h
[0]);
4981 cpu_stw_data(env
, addr
+ (2 << DF_HALF
), pwd
->h
[1]);
4982 cpu_stw_data(env
, addr
+ (1 << DF_HALF
), pwd
->h
[2]);
4983 cpu_stw_data(env
, addr
+ (0 << DF_HALF
), pwd
->h
[3]);
4984 cpu_stw_data(env
, addr
+ (7 << DF_HALF
), pwd
->h
[4]);
4985 cpu_stw_data(env
, addr
+ (6 << DF_HALF
), pwd
->h
[5]);
4986 cpu_stw_data(env
, addr
+ (5 << DF_HALF
), pwd
->h
[6]);
4987 cpu_stw_data(env
, addr
+ (4 << DF_HALF
), pwd
->h
[7]);
4992 void helper_msa_st_w(CPUMIPSState
*env
, uint32_t wd
,
4995 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
4996 int mmu_idx
= cpu_mmu_index(env
, false);
4999 ensure_writable_pages(env
, addr
, mmu_idx
, GETPC());
5000 #if !defined(CONFIG_USER_ONLY)
5001 #if !defined(HOST_WORDS_BIGENDIAN)
5002 helper_ret_stl_mmu(env
, addr
+ (0 << DF_WORD
), pwd
->w
[0], oi
, GETPC());
5003 helper_ret_stl_mmu(env
, addr
+ (1 << DF_WORD
), pwd
->w
[1], oi
, GETPC());
5004 helper_ret_stl_mmu(env
, addr
+ (2 << DF_WORD
), pwd
->w
[2], oi
, GETPC());
5005 helper_ret_stl_mmu(env
, addr
+ (3 << DF_WORD
), pwd
->w
[3], oi
, GETPC());
5007 helper_ret_stl_mmu(env
, addr
+ (1 << DF_WORD
), pwd
->w
[0], oi
, GETPC());
5008 helper_ret_stl_mmu(env
, addr
+ (0 << DF_WORD
), pwd
->w
[1], oi
, GETPC());
5009 helper_ret_stl_mmu(env
, addr
+ (3 << DF_WORD
), pwd
->w
[2], oi
, GETPC());
5010 helper_ret_stl_mmu(env
, addr
+ (2 << DF_WORD
), pwd
->w
[3], oi
, GETPC());
5013 #if !defined(HOST_WORDS_BIGENDIAN)
5014 cpu_stl_data(env
, addr
+ (0 << DF_WORD
), pwd
->w
[0]);
5015 cpu_stl_data(env
, addr
+ (1 << DF_WORD
), pwd
->w
[1]);
5016 cpu_stl_data(env
, addr
+ (2 << DF_WORD
), pwd
->w
[2]);
5017 cpu_stl_data(env
, addr
+ (3 << DF_WORD
), pwd
->w
[3]);
5019 cpu_stl_data(env
, addr
+ (1 << DF_WORD
), pwd
->w
[0]);
5020 cpu_stl_data(env
, addr
+ (0 << DF_WORD
), pwd
->w
[1]);
5021 cpu_stl_data(env
, addr
+ (3 << DF_WORD
), pwd
->w
[2]);
5022 cpu_stl_data(env
, addr
+ (2 << DF_WORD
), pwd
->w
[3]);
5027 void helper_msa_st_d(CPUMIPSState
*env
, uint32_t wd
,
5030 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
5031 int mmu_idx
= cpu_mmu_index(env
, false);
5033 MEMOP_IDX(DF_DOUBLE
)
5034 ensure_writable_pages(env
, addr
, mmu_idx
, GETPC());
5035 #if !defined(CONFIG_USER_ONLY)
5036 helper_ret_stq_mmu(env
, addr
+ (0 << DF_DOUBLE
), pwd
->d
[0], oi
, GETPC());
5037 helper_ret_stq_mmu(env
, addr
+ (1 << DF_DOUBLE
), pwd
->d
[1], oi
, GETPC());
5039 cpu_stq_data(env
, addr
+ (0 << DF_DOUBLE
), pwd
->d
[0]);
5040 cpu_stq_data(env
, addr
+ (1 << DF_DOUBLE
), pwd
->d
[1]);
5044 void helper_cache(CPUMIPSState
*env
, target_ulong addr
, uint32_t op
)
5046 #ifndef CONFIG_USER_ONLY
5047 target_ulong index
= addr
& 0x1fffffff;
5049 /* Index Store Tag */
5050 memory_region_dispatch_write(env
->itc_tag
, index
, env
->CP0_TagLo
,
5051 MO_64
, MEMTXATTRS_UNSPECIFIED
);
5052 } else if (op
== 5) {
5053 /* Index Load Tag */
5054 memory_region_dispatch_read(env
->itc_tag
, index
, &env
->CP0_TagLo
,
5055 MO_64
, MEMTXATTRS_UNSPECIFIED
);