2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
23 #include "qemu/host-utils.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "sysemu/kvm.h"
29 /*****************************************************************************/
30 /* Exceptions processing helpers */
32 void helper_raise_exception_err(CPUMIPSState
*env
, uint32_t exception
,
35 do_raise_exception_err(env
, exception
, error_code
, 0);
38 void helper_raise_exception(CPUMIPSState
*env
, uint32_t exception
)
40 do_raise_exception(env
, exception
, GETPC());
43 void helper_raise_exception_debug(CPUMIPSState
*env
)
45 do_raise_exception(env
, EXCP_DEBUG
, 0);
48 static void raise_exception(CPUMIPSState
*env
, uint32_t exception
)
50 do_raise_exception(env
, exception
, 0);
53 #if defined(CONFIG_USER_ONLY)
54 #define HELPER_LD(name, insn, type) \
55 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
56 int mem_idx, uintptr_t retaddr) \
58 return (type) cpu_##insn##_data_ra(env, addr, retaddr); \
61 #define HELPER_LD(name, insn, type) \
62 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
63 int mem_idx, uintptr_t retaddr) \
67 case 0: return (type) cpu_##insn##_kernel_ra(env, addr, retaddr); \
68 case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
70 case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
71 case 3: return (type) cpu_##insn##_error_ra(env, addr, retaddr); \
75 HELPER_LD(lw
, ldl
, int32_t)
76 #if defined(TARGET_MIPS64)
77 HELPER_LD(ld
, ldq
, int64_t)
81 #if defined(CONFIG_USER_ONLY)
82 #define HELPER_ST(name, insn, type) \
83 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
84 type val, int mem_idx, uintptr_t retaddr) \
86 cpu_##insn##_data_ra(env, addr, val, retaddr); \
89 #define HELPER_ST(name, insn, type) \
90 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
91 type val, int mem_idx, uintptr_t retaddr) \
95 case 0: cpu_##insn##_kernel_ra(env, addr, val, retaddr); break; \
96 case 1: cpu_##insn##_super_ra(env, addr, val, retaddr); break; \
98 case 2: cpu_##insn##_user_ra(env, addr, val, retaddr); break; \
100 cpu_##insn##_error_ra(env, addr, val, retaddr); \
105 HELPER_ST(sb
, stb
, uint8_t)
106 HELPER_ST(sw
, stl
, uint32_t)
107 #if defined(TARGET_MIPS64)
108 HELPER_ST(sd
, stq
, uint64_t)
112 /* 64 bits arithmetic for 32 bits hosts */
113 static inline uint64_t get_HILO(CPUMIPSState
*env
)
115 return ((uint64_t)(env
->active_tc
.HI
[0]) << 32) | (uint32_t)env
->active_tc
.LO
[0];
118 static inline target_ulong
set_HIT0_LO(CPUMIPSState
*env
, uint64_t HILO
)
120 env
->active_tc
.LO
[0] = (int32_t)(HILO
& 0xFFFFFFFF);
121 return env
->active_tc
.HI
[0] = (int32_t)(HILO
>> 32);
124 static inline target_ulong
set_HI_LOT0(CPUMIPSState
*env
, uint64_t HILO
)
126 target_ulong tmp
= env
->active_tc
.LO
[0] = (int32_t)(HILO
& 0xFFFFFFFF);
127 env
->active_tc
.HI
[0] = (int32_t)(HILO
>> 32);
131 /* Multiplication variants of the vr54xx. */
132 target_ulong
helper_muls(CPUMIPSState
*env
, target_ulong arg1
,
135 return set_HI_LOT0(env
, 0 - ((int64_t)(int32_t)arg1
*
136 (int64_t)(int32_t)arg2
));
139 target_ulong
helper_mulsu(CPUMIPSState
*env
, target_ulong arg1
,
142 return set_HI_LOT0(env
, 0 - (uint64_t)(uint32_t)arg1
*
143 (uint64_t)(uint32_t)arg2
);
146 target_ulong
helper_macc(CPUMIPSState
*env
, target_ulong arg1
,
149 return set_HI_LOT0(env
, (int64_t)get_HILO(env
) + (int64_t)(int32_t)arg1
*
150 (int64_t)(int32_t)arg2
);
153 target_ulong
helper_macchi(CPUMIPSState
*env
, target_ulong arg1
,
156 return set_HIT0_LO(env
, (int64_t)get_HILO(env
) + (int64_t)(int32_t)arg1
*
157 (int64_t)(int32_t)arg2
);
160 target_ulong
helper_maccu(CPUMIPSState
*env
, target_ulong arg1
,
163 return set_HI_LOT0(env
, (uint64_t)get_HILO(env
) +
164 (uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
);
167 target_ulong
helper_macchiu(CPUMIPSState
*env
, target_ulong arg1
,
170 return set_HIT0_LO(env
, (uint64_t)get_HILO(env
) +
171 (uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
);
174 target_ulong
helper_msac(CPUMIPSState
*env
, target_ulong arg1
,
177 return set_HI_LOT0(env
, (int64_t)get_HILO(env
) - (int64_t)(int32_t)arg1
*
178 (int64_t)(int32_t)arg2
);
181 target_ulong
helper_msachi(CPUMIPSState
*env
, target_ulong arg1
,
184 return set_HIT0_LO(env
, (int64_t)get_HILO(env
) - (int64_t)(int32_t)arg1
*
185 (int64_t)(int32_t)arg2
);
188 target_ulong
helper_msacu(CPUMIPSState
*env
, target_ulong arg1
,
191 return set_HI_LOT0(env
, (uint64_t)get_HILO(env
) -
192 (uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
);
195 target_ulong
helper_msachiu(CPUMIPSState
*env
, target_ulong arg1
,
198 return set_HIT0_LO(env
, (uint64_t)get_HILO(env
) -
199 (uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
);
202 target_ulong
helper_mulhi(CPUMIPSState
*env
, target_ulong arg1
,
205 return set_HIT0_LO(env
, (int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
);
208 target_ulong
helper_mulhiu(CPUMIPSState
*env
, target_ulong arg1
,
211 return set_HIT0_LO(env
, (uint64_t)(uint32_t)arg1
*
212 (uint64_t)(uint32_t)arg2
);
215 target_ulong
helper_mulshi(CPUMIPSState
*env
, target_ulong arg1
,
218 return set_HIT0_LO(env
, 0 - (int64_t)(int32_t)arg1
*
219 (int64_t)(int32_t)arg2
);
222 target_ulong
helper_mulshiu(CPUMIPSState
*env
, target_ulong arg1
,
225 return set_HIT0_LO(env
, 0 - (uint64_t)(uint32_t)arg1
*
226 (uint64_t)(uint32_t)arg2
);
229 static inline target_ulong
bitswap(target_ulong v
)
231 v
= ((v
>> 1) & (target_ulong
)0x5555555555555555ULL
) |
232 ((v
& (target_ulong
)0x5555555555555555ULL
) << 1);
233 v
= ((v
>> 2) & (target_ulong
)0x3333333333333333ULL
) |
234 ((v
& (target_ulong
)0x3333333333333333ULL
) << 2);
235 v
= ((v
>> 4) & (target_ulong
)0x0F0F0F0F0F0F0F0FULL
) |
236 ((v
& (target_ulong
)0x0F0F0F0F0F0F0F0FULL
) << 4);
241 target_ulong
helper_dbitswap(target_ulong rt
)
247 target_ulong
helper_bitswap(target_ulong rt
)
249 return (int32_t)bitswap(rt
);
252 #ifndef CONFIG_USER_ONLY
254 static inline hwaddr
do_translate_address(CPUMIPSState
*env
,
255 target_ulong address
,
256 int rw
, uintptr_t retaddr
)
259 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
261 lladdr
= cpu_mips_translate_address(env
, address
, rw
);
263 if (lladdr
== -1LL) {
264 cpu_loop_exit_restore(cs
, retaddr
);
270 #define HELPER_LD_ATOMIC(name, insn, almask) \
271 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
273 if (arg & almask) { \
274 env->CP0_BadVAddr = arg; \
275 do_raise_exception(env, EXCP_AdEL, GETPC()); \
277 env->lladdr = do_translate_address(env, arg, 0, GETPC()); \
278 env->llval = do_##insn(env, arg, mem_idx, GETPC()); \
281 HELPER_LD_ATOMIC(ll
, lw
, 0x3)
283 HELPER_LD_ATOMIC(lld
, ld
, 0x7)
285 #undef HELPER_LD_ATOMIC
287 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
288 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \
289 target_ulong arg2, int mem_idx) \
293 if (arg2 & almask) { \
294 env->CP0_BadVAddr = arg2; \
295 do_raise_exception(env, EXCP_AdES, GETPC()); \
297 if (do_translate_address(env, arg2, 1, GETPC()) == env->lladdr) { \
298 tmp = do_##ld_insn(env, arg2, mem_idx, GETPC()); \
299 if (tmp == env->llval) { \
300 do_##st_insn(env, arg2, arg1, mem_idx, GETPC()); \
306 HELPER_ST_ATOMIC(sc
, lw
, sw
, 0x3)
308 HELPER_ST_ATOMIC(scd
, ld
, sd
, 0x7)
310 #undef HELPER_ST_ATOMIC
313 #ifdef TARGET_WORDS_BIGENDIAN
314 #define GET_LMASK(v) ((v) & 3)
315 #define GET_OFFSET(addr, offset) (addr + (offset))
317 #define GET_LMASK(v) (((v) & 3) ^ 3)
318 #define GET_OFFSET(addr, offset) (addr - (offset))
321 void helper_swl(CPUMIPSState
*env
, target_ulong arg1
, target_ulong arg2
,
324 do_sb(env
, arg2
, (uint8_t)(arg1
>> 24), mem_idx
, GETPC());
326 if (GET_LMASK(arg2
) <= 2) {
327 do_sb(env
, GET_OFFSET(arg2
, 1), (uint8_t)(arg1
>> 16), mem_idx
,
331 if (GET_LMASK(arg2
) <= 1) {
332 do_sb(env
, GET_OFFSET(arg2
, 2), (uint8_t)(arg1
>> 8), mem_idx
,
336 if (GET_LMASK(arg2
) == 0) {
337 do_sb(env
, GET_OFFSET(arg2
, 3), (uint8_t)arg1
, mem_idx
,
342 void helper_swr(CPUMIPSState
*env
, target_ulong arg1
, target_ulong arg2
,
345 do_sb(env
, arg2
, (uint8_t)arg1
, mem_idx
, GETPC());
347 if (GET_LMASK(arg2
) >= 1) {
348 do_sb(env
, GET_OFFSET(arg2
, -1), (uint8_t)(arg1
>> 8), mem_idx
,
352 if (GET_LMASK(arg2
) >= 2) {
353 do_sb(env
, GET_OFFSET(arg2
, -2), (uint8_t)(arg1
>> 16), mem_idx
,
357 if (GET_LMASK(arg2
) == 3) {
358 do_sb(env
, GET_OFFSET(arg2
, -3), (uint8_t)(arg1
>> 24), mem_idx
,
363 #if defined(TARGET_MIPS64)
364 /* "half" load and stores. We must do the memory access inline,
365 or fault handling won't work. */
367 #ifdef TARGET_WORDS_BIGENDIAN
368 #define GET_LMASK64(v) ((v) & 7)
370 #define GET_LMASK64(v) (((v) & 7) ^ 7)
373 void helper_sdl(CPUMIPSState
*env
, target_ulong arg1
, target_ulong arg2
,
376 do_sb(env
, arg2
, (uint8_t)(arg1
>> 56), mem_idx
, GETPC());
378 if (GET_LMASK64(arg2
) <= 6) {
379 do_sb(env
, GET_OFFSET(arg2
, 1), (uint8_t)(arg1
>> 48), mem_idx
,
383 if (GET_LMASK64(arg2
) <= 5) {
384 do_sb(env
, GET_OFFSET(arg2
, 2), (uint8_t)(arg1
>> 40), mem_idx
,
388 if (GET_LMASK64(arg2
) <= 4) {
389 do_sb(env
, GET_OFFSET(arg2
, 3), (uint8_t)(arg1
>> 32), mem_idx
,
393 if (GET_LMASK64(arg2
) <= 3) {
394 do_sb(env
, GET_OFFSET(arg2
, 4), (uint8_t)(arg1
>> 24), mem_idx
,
398 if (GET_LMASK64(arg2
) <= 2) {
399 do_sb(env
, GET_OFFSET(arg2
, 5), (uint8_t)(arg1
>> 16), mem_idx
,
403 if (GET_LMASK64(arg2
) <= 1) {
404 do_sb(env
, GET_OFFSET(arg2
, 6), (uint8_t)(arg1
>> 8), mem_idx
,
408 if (GET_LMASK64(arg2
) <= 0) {
409 do_sb(env
, GET_OFFSET(arg2
, 7), (uint8_t)arg1
, mem_idx
,
414 void helper_sdr(CPUMIPSState
*env
, target_ulong arg1
, target_ulong arg2
,
417 do_sb(env
, arg2
, (uint8_t)arg1
, mem_idx
, GETPC());
419 if (GET_LMASK64(arg2
) >= 1) {
420 do_sb(env
, GET_OFFSET(arg2
, -1), (uint8_t)(arg1
>> 8), mem_idx
,
424 if (GET_LMASK64(arg2
) >= 2) {
425 do_sb(env
, GET_OFFSET(arg2
, -2), (uint8_t)(arg1
>> 16), mem_idx
,
429 if (GET_LMASK64(arg2
) >= 3) {
430 do_sb(env
, GET_OFFSET(arg2
, -3), (uint8_t)(arg1
>> 24), mem_idx
,
434 if (GET_LMASK64(arg2
) >= 4) {
435 do_sb(env
, GET_OFFSET(arg2
, -4), (uint8_t)(arg1
>> 32), mem_idx
,
439 if (GET_LMASK64(arg2
) >= 5) {
440 do_sb(env
, GET_OFFSET(arg2
, -5), (uint8_t)(arg1
>> 40), mem_idx
,
444 if (GET_LMASK64(arg2
) >= 6) {
445 do_sb(env
, GET_OFFSET(arg2
, -6), (uint8_t)(arg1
>> 48), mem_idx
,
449 if (GET_LMASK64(arg2
) == 7) {
450 do_sb(env
, GET_OFFSET(arg2
, -7), (uint8_t)(arg1
>> 56), mem_idx
,
454 #endif /* TARGET_MIPS64 */
456 static const int multiple_regs
[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
458 void helper_lwm(CPUMIPSState
*env
, target_ulong addr
, target_ulong reglist
,
461 target_ulong base_reglist
= reglist
& 0xf;
462 target_ulong do_r31
= reglist
& 0x10;
464 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
467 for (i
= 0; i
< base_reglist
; i
++) {
468 env
->active_tc
.gpr
[multiple_regs
[i
]] =
469 (target_long
)do_lw(env
, addr
, mem_idx
, GETPC());
475 env
->active_tc
.gpr
[31] = (target_long
)do_lw(env
, addr
, mem_idx
,
480 void helper_swm(CPUMIPSState
*env
, target_ulong addr
, target_ulong reglist
,
483 target_ulong base_reglist
= reglist
& 0xf;
484 target_ulong do_r31
= reglist
& 0x10;
486 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
489 for (i
= 0; i
< base_reglist
; i
++) {
490 do_sw(env
, addr
, env
->active_tc
.gpr
[multiple_regs
[i
]], mem_idx
,
497 do_sw(env
, addr
, env
->active_tc
.gpr
[31], mem_idx
, GETPC());
501 #if defined(TARGET_MIPS64)
502 void helper_ldm(CPUMIPSState
*env
, target_ulong addr
, target_ulong reglist
,
505 target_ulong base_reglist
= reglist
& 0xf;
506 target_ulong do_r31
= reglist
& 0x10;
508 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
511 for (i
= 0; i
< base_reglist
; i
++) {
512 env
->active_tc
.gpr
[multiple_regs
[i
]] = do_ld(env
, addr
, mem_idx
,
519 env
->active_tc
.gpr
[31] = do_ld(env
, addr
, mem_idx
, GETPC());
523 void helper_sdm(CPUMIPSState
*env
, target_ulong addr
, target_ulong reglist
,
526 target_ulong base_reglist
= reglist
& 0xf;
527 target_ulong do_r31
= reglist
& 0x10;
529 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
532 for (i
= 0; i
< base_reglist
; i
++) {
533 do_sd(env
, addr
, env
->active_tc
.gpr
[multiple_regs
[i
]], mem_idx
,
540 do_sd(env
, addr
, env
->active_tc
.gpr
[31], mem_idx
, GETPC());
545 #ifndef CONFIG_USER_ONLY
547 static bool mips_vpe_is_wfi(MIPSCPU
*c
)
549 CPUState
*cpu
= CPU(c
);
550 CPUMIPSState
*env
= &c
->env
;
552 /* If the VPE is halted but otherwise active, it means it's waiting for
554 return cpu
->halted
&& mips_vpe_active(env
);
557 static bool mips_vp_is_wfi(MIPSCPU
*c
)
559 CPUState
*cpu
= CPU(c
);
560 CPUMIPSState
*env
= &c
->env
;
562 return cpu
->halted
&& mips_vp_active(env
);
565 static inline void mips_vpe_wake(MIPSCPU
*c
)
567 /* Don't set ->halted = 0 directly, let it be done via cpu_has_work
568 because there might be other conditions that state that c should
570 cpu_interrupt(CPU(c
), CPU_INTERRUPT_WAKE
);
573 static inline void mips_vpe_sleep(MIPSCPU
*cpu
)
575 CPUState
*cs
= CPU(cpu
);
577 /* The VPE was shut off, really go to bed.
578 Reset any old _WAKE requests. */
580 cpu_reset_interrupt(cs
, CPU_INTERRUPT_WAKE
);
583 static inline void mips_tc_wake(MIPSCPU
*cpu
, int tc
)
585 CPUMIPSState
*c
= &cpu
->env
;
587 /* FIXME: TC reschedule. */
588 if (mips_vpe_active(c
) && !mips_vpe_is_wfi(cpu
)) {
593 static inline void mips_tc_sleep(MIPSCPU
*cpu
, int tc
)
595 CPUMIPSState
*c
= &cpu
->env
;
597 /* FIXME: TC reschedule. */
598 if (!mips_vpe_active(c
)) {
605 * @env: CPU from which mapping is performed.
606 * @tc: Should point to an int with the value of the global TC index.
608 * This function will transform @tc into a local index within the
609 * returned #CPUMIPSState.
611 /* FIXME: This code assumes that all VPEs have the same number of TCs,
612 which depends on runtime setup. Can probably be fixed by
613 walking the list of CPUMIPSStates. */
614 static CPUMIPSState
*mips_cpu_map_tc(CPUMIPSState
*env
, int *tc
)
622 if (!(env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
))) {
623 /* Not allowed to address other CPUs. */
624 *tc
= env
->current_tc
;
628 cs
= CPU(mips_env_get_cpu(env
));
629 vpe_idx
= tc_idx
/ cs
->nr_threads
;
630 *tc
= tc_idx
% cs
->nr_threads
;
631 other_cs
= qemu_get_cpu(vpe_idx
);
632 if (other_cs
== NULL
) {
635 cpu
= MIPS_CPU(other_cs
);
639 /* The per VPE CP0_Status register shares some fields with the per TC
640 CP0_TCStatus registers. These fields are wired to the same registers,
641 so changes to either of them should be reflected on both registers.
643 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
645 These helper call synchronizes the regs for a given cpu. */
647 /* Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c. */
648 /* static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
651 /* Called for updates to CP0_TCStatus. */
652 static void sync_c0_tcstatus(CPUMIPSState
*cpu
, int tc
,
656 uint32_t tcu
, tmx
, tasid
, tksu
;
657 uint32_t mask
= ((1U << CP0St_CU3
)
664 tcu
= (v
>> CP0TCSt_TCU0
) & 0xf;
665 tmx
= (v
>> CP0TCSt_TMX
) & 0x1;
666 tasid
= v
& cpu
->CP0_EntryHi_ASID_mask
;
667 tksu
= (v
>> CP0TCSt_TKSU
) & 0x3;
669 status
= tcu
<< CP0St_CU0
;
670 status
|= tmx
<< CP0St_MX
;
671 status
|= tksu
<< CP0St_KSU
;
673 cpu
->CP0_Status
&= ~mask
;
674 cpu
->CP0_Status
|= status
;
676 /* Sync the TASID with EntryHi. */
677 cpu
->CP0_EntryHi
&= ~cpu
->CP0_EntryHi_ASID_mask
;
678 cpu
->CP0_EntryHi
|= tasid
;
683 /* Called for updates to CP0_EntryHi. */
684 static void sync_c0_entryhi(CPUMIPSState
*cpu
, int tc
)
687 uint32_t asid
, v
= cpu
->CP0_EntryHi
;
689 asid
= v
& cpu
->CP0_EntryHi_ASID_mask
;
691 if (tc
== cpu
->current_tc
) {
692 tcst
= &cpu
->active_tc
.CP0_TCStatus
;
694 tcst
= &cpu
->tcs
[tc
].CP0_TCStatus
;
697 *tcst
&= ~cpu
->CP0_EntryHi_ASID_mask
;
702 target_ulong
helper_mfc0_mvpcontrol(CPUMIPSState
*env
)
704 return env
->mvp
->CP0_MVPControl
;
707 target_ulong
helper_mfc0_mvpconf0(CPUMIPSState
*env
)
709 return env
->mvp
->CP0_MVPConf0
;
712 target_ulong
helper_mfc0_mvpconf1(CPUMIPSState
*env
)
714 return env
->mvp
->CP0_MVPConf1
;
717 target_ulong
helper_mfc0_random(CPUMIPSState
*env
)
719 return (int32_t)cpu_mips_get_random(env
);
722 target_ulong
helper_mfc0_tcstatus(CPUMIPSState
*env
)
724 return env
->active_tc
.CP0_TCStatus
;
727 target_ulong
helper_mftc0_tcstatus(CPUMIPSState
*env
)
729 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
730 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
732 if (other_tc
== other
->current_tc
)
733 return other
->active_tc
.CP0_TCStatus
;
735 return other
->tcs
[other_tc
].CP0_TCStatus
;
738 target_ulong
helper_mfc0_tcbind(CPUMIPSState
*env
)
740 return env
->active_tc
.CP0_TCBind
;
743 target_ulong
helper_mftc0_tcbind(CPUMIPSState
*env
)
745 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
746 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
748 if (other_tc
== other
->current_tc
)
749 return other
->active_tc
.CP0_TCBind
;
751 return other
->tcs
[other_tc
].CP0_TCBind
;
754 target_ulong
helper_mfc0_tcrestart(CPUMIPSState
*env
)
756 return env
->active_tc
.PC
;
759 target_ulong
helper_mftc0_tcrestart(CPUMIPSState
*env
)
761 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
762 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
764 if (other_tc
== other
->current_tc
)
765 return other
->active_tc
.PC
;
767 return other
->tcs
[other_tc
].PC
;
770 target_ulong
helper_mfc0_tchalt(CPUMIPSState
*env
)
772 return env
->active_tc
.CP0_TCHalt
;
775 target_ulong
helper_mftc0_tchalt(CPUMIPSState
*env
)
777 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
778 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
780 if (other_tc
== other
->current_tc
)
781 return other
->active_tc
.CP0_TCHalt
;
783 return other
->tcs
[other_tc
].CP0_TCHalt
;
786 target_ulong
helper_mfc0_tccontext(CPUMIPSState
*env
)
788 return env
->active_tc
.CP0_TCContext
;
791 target_ulong
helper_mftc0_tccontext(CPUMIPSState
*env
)
793 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
794 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
796 if (other_tc
== other
->current_tc
)
797 return other
->active_tc
.CP0_TCContext
;
799 return other
->tcs
[other_tc
].CP0_TCContext
;
802 target_ulong
helper_mfc0_tcschedule(CPUMIPSState
*env
)
804 return env
->active_tc
.CP0_TCSchedule
;
807 target_ulong
helper_mftc0_tcschedule(CPUMIPSState
*env
)
809 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
810 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
812 if (other_tc
== other
->current_tc
)
813 return other
->active_tc
.CP0_TCSchedule
;
815 return other
->tcs
[other_tc
].CP0_TCSchedule
;
818 target_ulong
helper_mfc0_tcschefback(CPUMIPSState
*env
)
820 return env
->active_tc
.CP0_TCScheFBack
;
823 target_ulong
helper_mftc0_tcschefback(CPUMIPSState
*env
)
825 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
826 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
828 if (other_tc
== other
->current_tc
)
829 return other
->active_tc
.CP0_TCScheFBack
;
831 return other
->tcs
[other_tc
].CP0_TCScheFBack
;
834 target_ulong
helper_mfc0_count(CPUMIPSState
*env
)
837 qemu_mutex_lock_iothread();
838 count
= (int32_t) cpu_mips_get_count(env
);
839 qemu_mutex_unlock_iothread();
843 target_ulong
helper_mftc0_entryhi(CPUMIPSState
*env
)
845 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
846 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
848 return other
->CP0_EntryHi
;
851 target_ulong
helper_mftc0_cause(CPUMIPSState
*env
)
853 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
855 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
857 if (other_tc
== other
->current_tc
) {
858 tccause
= other
->CP0_Cause
;
860 tccause
= other
->CP0_Cause
;
866 target_ulong
helper_mftc0_status(CPUMIPSState
*env
)
868 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
869 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
871 return other
->CP0_Status
;
874 target_ulong
helper_mfc0_lladdr(CPUMIPSState
*env
)
876 return (int32_t)(env
->lladdr
>> env
->CP0_LLAddr_shift
);
879 target_ulong
helper_mfc0_maar(CPUMIPSState
*env
)
881 return (int32_t) env
->CP0_MAAR
[env
->CP0_MAARI
];
884 target_ulong
helper_mfhc0_maar(CPUMIPSState
*env
)
886 return env
->CP0_MAAR
[env
->CP0_MAARI
] >> 32;
889 target_ulong
helper_mfc0_watchlo(CPUMIPSState
*env
, uint32_t sel
)
891 return (int32_t)env
->CP0_WatchLo
[sel
];
894 target_ulong
helper_mfc0_watchhi(CPUMIPSState
*env
, uint32_t sel
)
896 return env
->CP0_WatchHi
[sel
];
899 target_ulong
helper_mfc0_debug(CPUMIPSState
*env
)
901 target_ulong t0
= env
->CP0_Debug
;
902 if (env
->hflags
& MIPS_HFLAG_DM
)
908 target_ulong
helper_mftc0_debug(CPUMIPSState
*env
)
910 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
912 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
914 if (other_tc
== other
->current_tc
)
915 tcstatus
= other
->active_tc
.CP0_Debug_tcstatus
;
917 tcstatus
= other
->tcs
[other_tc
].CP0_Debug_tcstatus
;
919 /* XXX: Might be wrong, check with EJTAG spec. */
920 return (other
->CP0_Debug
& ~((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
))) |
921 (tcstatus
& ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
)));
924 #if defined(TARGET_MIPS64)
925 target_ulong
helper_dmfc0_tcrestart(CPUMIPSState
*env
)
927 return env
->active_tc
.PC
;
930 target_ulong
helper_dmfc0_tchalt(CPUMIPSState
*env
)
932 return env
->active_tc
.CP0_TCHalt
;
935 target_ulong
helper_dmfc0_tccontext(CPUMIPSState
*env
)
937 return env
->active_tc
.CP0_TCContext
;
940 target_ulong
helper_dmfc0_tcschedule(CPUMIPSState
*env
)
942 return env
->active_tc
.CP0_TCSchedule
;
945 target_ulong
helper_dmfc0_tcschefback(CPUMIPSState
*env
)
947 return env
->active_tc
.CP0_TCScheFBack
;
950 target_ulong
helper_dmfc0_lladdr(CPUMIPSState
*env
)
952 return env
->lladdr
>> env
->CP0_LLAddr_shift
;
955 target_ulong
helper_dmfc0_maar(CPUMIPSState
*env
)
957 return env
->CP0_MAAR
[env
->CP0_MAARI
];
960 target_ulong
helper_dmfc0_watchlo(CPUMIPSState
*env
, uint32_t sel
)
962 return env
->CP0_WatchLo
[sel
];
964 #endif /* TARGET_MIPS64 */
966 void helper_mtc0_index(CPUMIPSState
*env
, target_ulong arg1
)
968 uint32_t index_p
= env
->CP0_Index
& 0x80000000;
969 uint32_t tlb_index
= arg1
& 0x7fffffff;
970 if (tlb_index
< env
->tlb
->nb_tlb
) {
971 if (env
->insn_flags
& ISA_MIPS32R6
) {
972 index_p
|= arg1
& 0x80000000;
974 env
->CP0_Index
= index_p
| tlb_index
;
978 void helper_mtc0_mvpcontrol(CPUMIPSState
*env
, target_ulong arg1
)
983 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
))
984 mask
|= (1 << CP0MVPCo_CPA
) | (1 << CP0MVPCo_VPC
) |
986 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
987 mask
|= (1 << CP0MVPCo_STLB
);
988 newval
= (env
->mvp
->CP0_MVPControl
& ~mask
) | (arg1
& mask
);
990 // TODO: Enable/disable shared TLB, enable/disable VPEs.
992 env
->mvp
->CP0_MVPControl
= newval
;
995 void helper_mtc0_vpecontrol(CPUMIPSState
*env
, target_ulong arg1
)
1000 mask
= (1 << CP0VPECo_YSI
) | (1 << CP0VPECo_GSI
) |
1001 (1 << CP0VPECo_TE
) | (0xff << CP0VPECo_TargTC
);
1002 newval
= (env
->CP0_VPEControl
& ~mask
) | (arg1
& mask
);
1004 /* Yield scheduler intercept not implemented. */
1005 /* Gating storage scheduler intercept not implemented. */
1007 // TODO: Enable/disable TCs.
1009 env
->CP0_VPEControl
= newval
;
1012 void helper_mttc0_vpecontrol(CPUMIPSState
*env
, target_ulong arg1
)
1014 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1015 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1019 mask
= (1 << CP0VPECo_YSI
) | (1 << CP0VPECo_GSI
) |
1020 (1 << CP0VPECo_TE
) | (0xff << CP0VPECo_TargTC
);
1021 newval
= (other
->CP0_VPEControl
& ~mask
) | (arg1
& mask
);
1023 /* TODO: Enable/disable TCs. */
1025 other
->CP0_VPEControl
= newval
;
1028 target_ulong
helper_mftc0_vpecontrol(CPUMIPSState
*env
)
1030 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1031 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1032 /* FIXME: Mask away return zero on read bits. */
1033 return other
->CP0_VPEControl
;
1036 target_ulong
helper_mftc0_vpeconf0(CPUMIPSState
*env
)
1038 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1039 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1041 return other
->CP0_VPEConf0
;
1044 void helper_mtc0_vpeconf0(CPUMIPSState
*env
, target_ulong arg1
)
1049 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
)) {
1050 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_VPA
))
1051 mask
|= (0xff << CP0VPEC0_XTC
);
1052 mask
|= (1 << CP0VPEC0_MVP
) | (1 << CP0VPEC0_VPA
);
1054 newval
= (env
->CP0_VPEConf0
& ~mask
) | (arg1
& mask
);
1056 // TODO: TC exclusive handling due to ERL/EXL.
1058 env
->CP0_VPEConf0
= newval
;
1061 void helper_mttc0_vpeconf0(CPUMIPSState
*env
, target_ulong arg1
)
1063 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1064 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1068 mask
|= (1 << CP0VPEC0_MVP
) | (1 << CP0VPEC0_VPA
);
1069 newval
= (other
->CP0_VPEConf0
& ~mask
) | (arg1
& mask
);
1071 /* TODO: TC exclusive handling due to ERL/EXL. */
1072 other
->CP0_VPEConf0
= newval
;
1075 void helper_mtc0_vpeconf1(CPUMIPSState
*env
, target_ulong arg1
)
1080 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1081 mask
|= (0xff << CP0VPEC1_NCX
) | (0xff << CP0VPEC1_NCP2
) |
1082 (0xff << CP0VPEC1_NCP1
);
1083 newval
= (env
->CP0_VPEConf1
& ~mask
) | (arg1
& mask
);
1085 /* UDI not implemented. */
1086 /* CP2 not implemented. */
1088 // TODO: Handle FPU (CP1) binding.
1090 env
->CP0_VPEConf1
= newval
;
1093 void helper_mtc0_yqmask(CPUMIPSState
*env
, target_ulong arg1
)
1095 /* Yield qualifier inputs not implemented. */
1096 env
->CP0_YQMask
= 0x00000000;
1099 void helper_mtc0_vpeopt(CPUMIPSState
*env
, target_ulong arg1
)
1101 env
->CP0_VPEOpt
= arg1
& 0x0000ffff;
1104 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
1106 void helper_mtc0_entrylo0(CPUMIPSState
*env
, target_ulong arg1
)
1108 /* 1k pages not implemented */
1109 target_ulong rxi
= arg1
& (env
->CP0_PageGrain
& (3u << CP0PG_XIE
));
1110 env
->CP0_EntryLo0
= (arg1
& MTC0_ENTRYLO_MASK(env
))
1111 | (rxi
<< (CP0EnLo_XI
- 30));
1114 #if defined(TARGET_MIPS64)
1115 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
1117 void helper_dmtc0_entrylo0(CPUMIPSState
*env
, uint64_t arg1
)
1119 uint64_t rxi
= arg1
& ((env
->CP0_PageGrain
& (3ull << CP0PG_XIE
)) << 32);
1120 env
->CP0_EntryLo0
= (arg1
& DMTC0_ENTRYLO_MASK(env
)) | rxi
;
1124 void helper_mtc0_tcstatus(CPUMIPSState
*env
, target_ulong arg1
)
1126 uint32_t mask
= env
->CP0_TCStatus_rw_bitmask
;
1129 newval
= (env
->active_tc
.CP0_TCStatus
& ~mask
) | (arg1
& mask
);
1131 env
->active_tc
.CP0_TCStatus
= newval
;
1132 sync_c0_tcstatus(env
, env
->current_tc
, newval
);
1135 void helper_mttc0_tcstatus(CPUMIPSState
*env
, target_ulong arg1
)
1137 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1138 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1140 if (other_tc
== other
->current_tc
)
1141 other
->active_tc
.CP0_TCStatus
= arg1
;
1143 other
->tcs
[other_tc
].CP0_TCStatus
= arg1
;
1144 sync_c0_tcstatus(other
, other_tc
, arg1
);
1147 void helper_mtc0_tcbind(CPUMIPSState
*env
, target_ulong arg1
)
1149 uint32_t mask
= (1 << CP0TCBd_TBE
);
1152 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1153 mask
|= (1 << CP0TCBd_CurVPE
);
1154 newval
= (env
->active_tc
.CP0_TCBind
& ~mask
) | (arg1
& mask
);
1155 env
->active_tc
.CP0_TCBind
= newval
;
1158 void helper_mttc0_tcbind(CPUMIPSState
*env
, target_ulong arg1
)
1160 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1161 uint32_t mask
= (1 << CP0TCBd_TBE
);
1163 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1165 if (other
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1166 mask
|= (1 << CP0TCBd_CurVPE
);
1167 if (other_tc
== other
->current_tc
) {
1168 newval
= (other
->active_tc
.CP0_TCBind
& ~mask
) | (arg1
& mask
);
1169 other
->active_tc
.CP0_TCBind
= newval
;
1171 newval
= (other
->tcs
[other_tc
].CP0_TCBind
& ~mask
) | (arg1
& mask
);
1172 other
->tcs
[other_tc
].CP0_TCBind
= newval
;
1176 void helper_mtc0_tcrestart(CPUMIPSState
*env
, target_ulong arg1
)
1178 env
->active_tc
.PC
= arg1
;
1179 env
->active_tc
.CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1181 /* MIPS16 not implemented. */
1184 void helper_mttc0_tcrestart(CPUMIPSState
*env
, target_ulong arg1
)
1186 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1187 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1189 if (other_tc
== other
->current_tc
) {
1190 other
->active_tc
.PC
= arg1
;
1191 other
->active_tc
.CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1192 other
->lladdr
= 0ULL;
1193 /* MIPS16 not implemented. */
1195 other
->tcs
[other_tc
].PC
= arg1
;
1196 other
->tcs
[other_tc
].CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1197 other
->lladdr
= 0ULL;
1198 /* MIPS16 not implemented. */
1202 void helper_mtc0_tchalt(CPUMIPSState
*env
, target_ulong arg1
)
1204 MIPSCPU
*cpu
= mips_env_get_cpu(env
);
1206 env
->active_tc
.CP0_TCHalt
= arg1
& 0x1;
1208 // TODO: Halt TC / Restart (if allocated+active) TC.
1209 if (env
->active_tc
.CP0_TCHalt
& 1) {
1210 mips_tc_sleep(cpu
, env
->current_tc
);
1212 mips_tc_wake(cpu
, env
->current_tc
);
1216 void helper_mttc0_tchalt(CPUMIPSState
*env
, target_ulong arg1
)
1218 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1219 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1220 MIPSCPU
*other_cpu
= mips_env_get_cpu(other
);
1222 // TODO: Halt TC / Restart (if allocated+active) TC.
1224 if (other_tc
== other
->current_tc
)
1225 other
->active_tc
.CP0_TCHalt
= arg1
;
1227 other
->tcs
[other_tc
].CP0_TCHalt
= arg1
;
1230 mips_tc_sleep(other_cpu
, other_tc
);
1232 mips_tc_wake(other_cpu
, other_tc
);
1236 void helper_mtc0_tccontext(CPUMIPSState
*env
, target_ulong arg1
)
1238 env
->active_tc
.CP0_TCContext
= arg1
;
1241 void helper_mttc0_tccontext(CPUMIPSState
*env
, target_ulong arg1
)
1243 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1244 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1246 if (other_tc
== other
->current_tc
)
1247 other
->active_tc
.CP0_TCContext
= arg1
;
1249 other
->tcs
[other_tc
].CP0_TCContext
= arg1
;
1252 void helper_mtc0_tcschedule(CPUMIPSState
*env
, target_ulong arg1
)
1254 env
->active_tc
.CP0_TCSchedule
= arg1
;
1257 void helper_mttc0_tcschedule(CPUMIPSState
*env
, target_ulong arg1
)
1259 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1260 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1262 if (other_tc
== other
->current_tc
)
1263 other
->active_tc
.CP0_TCSchedule
= arg1
;
1265 other
->tcs
[other_tc
].CP0_TCSchedule
= arg1
;
1268 void helper_mtc0_tcschefback(CPUMIPSState
*env
, target_ulong arg1
)
1270 env
->active_tc
.CP0_TCScheFBack
= arg1
;
1273 void helper_mttc0_tcschefback(CPUMIPSState
*env
, target_ulong arg1
)
1275 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1276 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1278 if (other_tc
== other
->current_tc
)
1279 other
->active_tc
.CP0_TCScheFBack
= arg1
;
1281 other
->tcs
[other_tc
].CP0_TCScheFBack
= arg1
;
1284 void helper_mtc0_entrylo1(CPUMIPSState
*env
, target_ulong arg1
)
1286 /* 1k pages not implemented */
1287 target_ulong rxi
= arg1
& (env
->CP0_PageGrain
& (3u << CP0PG_XIE
));
1288 env
->CP0_EntryLo1
= (arg1
& MTC0_ENTRYLO_MASK(env
))
1289 | (rxi
<< (CP0EnLo_XI
- 30));
1292 #if defined(TARGET_MIPS64)
1293 void helper_dmtc0_entrylo1(CPUMIPSState
*env
, uint64_t arg1
)
1295 uint64_t rxi
= arg1
& ((env
->CP0_PageGrain
& (3ull << CP0PG_XIE
)) << 32);
1296 env
->CP0_EntryLo1
= (arg1
& DMTC0_ENTRYLO_MASK(env
)) | rxi
;
1300 void helper_mtc0_context(CPUMIPSState
*env
, target_ulong arg1
)
1302 env
->CP0_Context
= (env
->CP0_Context
& 0x007FFFFF) | (arg1
& ~0x007FFFFF);
1305 void helper_mtc0_pagemask(CPUMIPSState
*env
, target_ulong arg1
)
1307 uint64_t mask
= arg1
>> (TARGET_PAGE_BITS
+ 1);
1308 if (!(env
->insn_flags
& ISA_MIPS32R6
) || (arg1
== ~0) ||
1309 (mask
== 0x0000 || mask
== 0x0003 || mask
== 0x000F ||
1310 mask
== 0x003F || mask
== 0x00FF || mask
== 0x03FF ||
1311 mask
== 0x0FFF || mask
== 0x3FFF || mask
== 0xFFFF)) {
1312 env
->CP0_PageMask
= arg1
& (0x1FFFFFFF & (TARGET_PAGE_MASK
<< 1));
1316 void helper_mtc0_pagegrain(CPUMIPSState
*env
, target_ulong arg1
)
1318 /* SmartMIPS not implemented */
1319 /* 1k pages not implemented */
1320 env
->CP0_PageGrain
= (arg1
& env
->CP0_PageGrain_rw_bitmask
) |
1321 (env
->CP0_PageGrain
& ~env
->CP0_PageGrain_rw_bitmask
);
1322 compute_hflags(env
);
1323 restore_pamask(env
);
1326 void helper_mtc0_segctl0(CPUMIPSState
*env
, target_ulong arg1
)
1328 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
1330 env
->CP0_SegCtl0
= arg1
& CP0SC0_MASK
;
1334 void helper_mtc0_segctl1(CPUMIPSState
*env
, target_ulong arg1
)
1336 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
1338 env
->CP0_SegCtl1
= arg1
& CP0SC1_MASK
;
1342 void helper_mtc0_segctl2(CPUMIPSState
*env
, target_ulong arg1
)
1344 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
1346 env
->CP0_SegCtl2
= arg1
& CP0SC2_MASK
;
1350 void helper_mtc0_wired(CPUMIPSState
*env
, target_ulong arg1
)
1352 if (env
->insn_flags
& ISA_MIPS32R6
) {
1353 if (arg1
< env
->tlb
->nb_tlb
) {
1354 env
->CP0_Wired
= arg1
;
1357 env
->CP0_Wired
= arg1
% env
->tlb
->nb_tlb
;
1361 void helper_mtc0_srsconf0(CPUMIPSState
*env
, target_ulong arg1
)
1363 env
->CP0_SRSConf0
|= arg1
& env
->CP0_SRSConf0_rw_bitmask
;
1366 void helper_mtc0_srsconf1(CPUMIPSState
*env
, target_ulong arg1
)
1368 env
->CP0_SRSConf1
|= arg1
& env
->CP0_SRSConf1_rw_bitmask
;
1371 void helper_mtc0_srsconf2(CPUMIPSState
*env
, target_ulong arg1
)
1373 env
->CP0_SRSConf2
|= arg1
& env
->CP0_SRSConf2_rw_bitmask
;
1376 void helper_mtc0_srsconf3(CPUMIPSState
*env
, target_ulong arg1
)
1378 env
->CP0_SRSConf3
|= arg1
& env
->CP0_SRSConf3_rw_bitmask
;
1381 void helper_mtc0_srsconf4(CPUMIPSState
*env
, target_ulong arg1
)
1383 env
->CP0_SRSConf4
|= arg1
& env
->CP0_SRSConf4_rw_bitmask
;
1386 void helper_mtc0_hwrena(CPUMIPSState
*env
, target_ulong arg1
)
1388 uint32_t mask
= 0x0000000F;
1390 if ((env
->CP0_Config1
& (1 << CP0C1_PC
)) &&
1391 (env
->insn_flags
& ISA_MIPS32R6
)) {
1394 if (env
->insn_flags
& ISA_MIPS32R6
) {
1397 if (env
->CP0_Config3
& (1 << CP0C3_ULRI
)) {
1400 if (arg1
& (1 << 29)) {
1401 env
->hflags
|= MIPS_HFLAG_HWRENA_ULR
;
1403 env
->hflags
&= ~MIPS_HFLAG_HWRENA_ULR
;
1407 env
->CP0_HWREna
= arg1
& mask
;
1410 void helper_mtc0_count(CPUMIPSState
*env
, target_ulong arg1
)
1412 qemu_mutex_lock_iothread();
1413 cpu_mips_store_count(env
, arg1
);
1414 qemu_mutex_unlock_iothread();
1417 void helper_mtc0_entryhi(CPUMIPSState
*env
, target_ulong arg1
)
1419 target_ulong old
, val
, mask
;
1420 mask
= (TARGET_PAGE_MASK
<< 1) | env
->CP0_EntryHi_ASID_mask
;
1421 if (((env
->CP0_Config4
>> CP0C4_IE
) & 0x3) >= 2) {
1422 mask
|= 1 << CP0EnHi_EHINV
;
1425 /* 1k pages not implemented */
1426 #if defined(TARGET_MIPS64)
1427 if (env
->insn_flags
& ISA_MIPS32R6
) {
1428 int entryhi_r
= extract64(arg1
, 62, 2);
1429 int config0_at
= extract32(env
->CP0_Config0
, 13, 2);
1430 bool no_supervisor
= (env
->CP0_Status_rw_bitmask
& 0x8) == 0;
1431 if ((entryhi_r
== 2) ||
1432 (entryhi_r
== 1 && (no_supervisor
|| config0_at
== 1))) {
1433 /* skip EntryHi.R field if new value is reserved */
1434 mask
&= ~(0x3ull
<< 62);
1437 mask
&= env
->SEGMask
;
1439 old
= env
->CP0_EntryHi
;
1440 val
= (arg1
& mask
) | (old
& ~mask
);
1441 env
->CP0_EntryHi
= val
;
1442 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
1443 sync_c0_entryhi(env
, env
->current_tc
);
1445 /* If the ASID changes, flush qemu's TLB. */
1446 if ((old
& env
->CP0_EntryHi_ASID_mask
) !=
1447 (val
& env
->CP0_EntryHi_ASID_mask
)) {
1448 tlb_flush(CPU(mips_env_get_cpu(env
)));
1452 void helper_mttc0_entryhi(CPUMIPSState
*env
, target_ulong arg1
)
1454 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1455 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1457 other
->CP0_EntryHi
= arg1
;
1458 sync_c0_entryhi(other
, other_tc
);
1461 void helper_mtc0_compare(CPUMIPSState
*env
, target_ulong arg1
)
1463 qemu_mutex_lock_iothread();
1464 cpu_mips_store_compare(env
, arg1
);
1465 qemu_mutex_unlock_iothread();
1468 void helper_mtc0_status(CPUMIPSState
*env
, target_ulong arg1
)
1470 MIPSCPU
*cpu
= mips_env_get_cpu(env
);
1473 old
= env
->CP0_Status
;
1474 cpu_mips_store_status(env
, arg1
);
1475 val
= env
->CP0_Status
;
1477 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
1478 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1479 old
, old
& env
->CP0_Cause
& CP0Ca_IP_mask
,
1480 val
, val
& env
->CP0_Cause
& CP0Ca_IP_mask
,
1482 switch (cpu_mmu_index(env
, false)) {
1484 qemu_log(", ERL\n");
1486 case MIPS_HFLAG_UM
: qemu_log(", UM\n"); break;
1487 case MIPS_HFLAG_SM
: qemu_log(", SM\n"); break;
1488 case MIPS_HFLAG_KM
: qemu_log("\n"); break;
1490 cpu_abort(CPU(cpu
), "Invalid MMU mode!\n");
1496 void helper_mttc0_status(CPUMIPSState
*env
, target_ulong arg1
)
1498 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1499 uint32_t mask
= env
->CP0_Status_rw_bitmask
& ~0xf1000018;
1500 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1502 other
->CP0_Status
= (other
->CP0_Status
& ~mask
) | (arg1
& mask
);
1503 sync_c0_status(env
, other
, other_tc
);
1506 void helper_mtc0_intctl(CPUMIPSState
*env
, target_ulong arg1
)
1508 env
->CP0_IntCtl
= (env
->CP0_IntCtl
& ~0x000003e0) | (arg1
& 0x000003e0);
1511 void helper_mtc0_srsctl(CPUMIPSState
*env
, target_ulong arg1
)
1513 uint32_t mask
= (0xf << CP0SRSCtl_ESS
) | (0xf << CP0SRSCtl_PSS
);
1514 env
->CP0_SRSCtl
= (env
->CP0_SRSCtl
& ~mask
) | (arg1
& mask
);
1517 void helper_mtc0_cause(CPUMIPSState
*env
, target_ulong arg1
)
1519 qemu_mutex_lock_iothread();
1520 cpu_mips_store_cause(env
, arg1
);
1521 qemu_mutex_unlock_iothread();
1524 void helper_mttc0_cause(CPUMIPSState
*env
, target_ulong arg1
)
1526 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1527 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1529 cpu_mips_store_cause(other
, arg1
);
1532 target_ulong
helper_mftc0_epc(CPUMIPSState
*env
)
1534 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1535 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1537 return other
->CP0_EPC
;
1540 target_ulong
helper_mftc0_ebase(CPUMIPSState
*env
)
1542 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1543 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1545 return other
->CP0_EBase
;
1548 void helper_mtc0_ebase(CPUMIPSState
*env
, target_ulong arg1
)
1550 target_ulong mask
= 0x3FFFF000 | env
->CP0_EBaseWG_rw_bitmask
;
1551 if (arg1
& env
->CP0_EBaseWG_rw_bitmask
) {
1552 mask
|= ~0x3FFFFFFF;
1554 env
->CP0_EBase
= (env
->CP0_EBase
& ~mask
) | (arg1
& mask
);
1557 void helper_mttc0_ebase(CPUMIPSState
*env
, target_ulong arg1
)
1559 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1560 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1561 target_ulong mask
= 0x3FFFF000 | env
->CP0_EBaseWG_rw_bitmask
;
1562 if (arg1
& env
->CP0_EBaseWG_rw_bitmask
) {
1563 mask
|= ~0x3FFFFFFF;
1565 other
->CP0_EBase
= (other
->CP0_EBase
& ~mask
) | (arg1
& mask
);
1568 target_ulong
helper_mftc0_configx(CPUMIPSState
*env
, target_ulong idx
)
1570 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1571 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1574 case 0: return other
->CP0_Config0
;
1575 case 1: return other
->CP0_Config1
;
1576 case 2: return other
->CP0_Config2
;
1577 case 3: return other
->CP0_Config3
;
1578 /* 4 and 5 are reserved. */
1579 case 6: return other
->CP0_Config6
;
1580 case 7: return other
->CP0_Config7
;
1587 void helper_mtc0_config0(CPUMIPSState
*env
, target_ulong arg1
)
1589 env
->CP0_Config0
= (env
->CP0_Config0
& 0x81FFFFF8) | (arg1
& 0x00000007);
1592 void helper_mtc0_config2(CPUMIPSState
*env
, target_ulong arg1
)
1594 /* tertiary/secondary caches not implemented */
1595 env
->CP0_Config2
= (env
->CP0_Config2
& 0x8FFF0FFF);
1598 void helper_mtc0_config3(CPUMIPSState
*env
, target_ulong arg1
)
1600 if (env
->insn_flags
& ASE_MICROMIPS
) {
1601 env
->CP0_Config3
= (env
->CP0_Config3
& ~(1 << CP0C3_ISA_ON_EXC
)) |
1602 (arg1
& (1 << CP0C3_ISA_ON_EXC
));
1606 void helper_mtc0_config4(CPUMIPSState
*env
, target_ulong arg1
)
1608 env
->CP0_Config4
= (env
->CP0_Config4
& (~env
->CP0_Config4_rw_bitmask
)) |
1609 (arg1
& env
->CP0_Config4_rw_bitmask
);
1612 void helper_mtc0_config5(CPUMIPSState
*env
, target_ulong arg1
)
1614 env
->CP0_Config5
= (env
->CP0_Config5
& (~env
->CP0_Config5_rw_bitmask
)) |
1615 (arg1
& env
->CP0_Config5_rw_bitmask
);
1616 compute_hflags(env
);
1619 void helper_mtc0_lladdr(CPUMIPSState
*env
, target_ulong arg1
)
1621 target_long mask
= env
->CP0_LLAddr_rw_bitmask
;
1622 arg1
= arg1
<< env
->CP0_LLAddr_shift
;
1623 env
->lladdr
= (env
->lladdr
& ~mask
) | (arg1
& mask
);
1626 #define MTC0_MAAR_MASK(env) \
1627 ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
1629 void helper_mtc0_maar(CPUMIPSState
*env
, target_ulong arg1
)
1631 env
->CP0_MAAR
[env
->CP0_MAARI
] = arg1
& MTC0_MAAR_MASK(env
);
1634 void helper_mthc0_maar(CPUMIPSState
*env
, target_ulong arg1
)
1636 env
->CP0_MAAR
[env
->CP0_MAARI
] =
1637 (((uint64_t) arg1
<< 32) & MTC0_MAAR_MASK(env
)) |
1638 (env
->CP0_MAAR
[env
->CP0_MAARI
] & 0x00000000ffffffffULL
);
1641 void helper_mtc0_maari(CPUMIPSState
*env
, target_ulong arg1
)
1643 int index
= arg1
& 0x3f;
1644 if (index
== 0x3f) {
1645 /* Software may write all ones to INDEX to determine the
1646 maximum value supported. */
1647 env
->CP0_MAARI
= MIPS_MAAR_MAX
- 1;
1648 } else if (index
< MIPS_MAAR_MAX
) {
1649 env
->CP0_MAARI
= index
;
1651 /* Other than the all ones, if the
1652 value written is not supported, then INDEX is unchanged
1653 from its previous value. */
1656 void helper_mtc0_watchlo(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
1658 /* Watch exceptions for instructions, data loads, data stores
1660 env
->CP0_WatchLo
[sel
] = (arg1
& ~0x7);
1663 void helper_mtc0_watchhi(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
1665 int mask
= 0x40000FF8 | (env
->CP0_EntryHi_ASID_mask
<< CP0WH_ASID
);
1666 env
->CP0_WatchHi
[sel
] = arg1
& mask
;
1667 env
->CP0_WatchHi
[sel
] &= ~(env
->CP0_WatchHi
[sel
] & arg1
& 0x7);
1670 void helper_mtc0_xcontext(CPUMIPSState
*env
, target_ulong arg1
)
1672 target_ulong mask
= (1ULL << (env
->SEGBITS
- 7)) - 1;
1673 env
->CP0_XContext
= (env
->CP0_XContext
& mask
) | (arg1
& ~mask
);
1676 void helper_mtc0_framemask(CPUMIPSState
*env
, target_ulong arg1
)
1678 env
->CP0_Framemask
= arg1
; /* XXX */
1681 void helper_mtc0_debug(CPUMIPSState
*env
, target_ulong arg1
)
1683 env
->CP0_Debug
= (env
->CP0_Debug
& 0x8C03FC1F) | (arg1
& 0x13300120);
1684 if (arg1
& (1 << CP0DB_DM
))
1685 env
->hflags
|= MIPS_HFLAG_DM
;
1687 env
->hflags
&= ~MIPS_HFLAG_DM
;
1690 void helper_mttc0_debug(CPUMIPSState
*env
, target_ulong arg1
)
1692 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1693 uint32_t val
= arg1
& ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
));
1694 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1696 /* XXX: Might be wrong, check with EJTAG spec. */
1697 if (other_tc
== other
->current_tc
)
1698 other
->active_tc
.CP0_Debug_tcstatus
= val
;
1700 other
->tcs
[other_tc
].CP0_Debug_tcstatus
= val
;
1701 other
->CP0_Debug
= (other
->CP0_Debug
&
1702 ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
))) |
1703 (arg1
& ~((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
)));
1706 void helper_mtc0_performance0(CPUMIPSState
*env
, target_ulong arg1
)
1708 env
->CP0_Performance0
= arg1
& 0x000007ff;
1711 void helper_mtc0_errctl(CPUMIPSState
*env
, target_ulong arg1
)
1713 int32_t wst
= arg1
& (1 << CP0EC_WST
);
1714 int32_t spr
= arg1
& (1 << CP0EC_SPR
);
1715 int32_t itc
= env
->itc_tag
? (arg1
& (1 << CP0EC_ITC
)) : 0;
1717 env
->CP0_ErrCtl
= wst
| spr
| itc
;
1719 if (itc
&& !wst
&& !spr
) {
1720 env
->hflags
|= MIPS_HFLAG_ITC_CACHE
;
1722 env
->hflags
&= ~MIPS_HFLAG_ITC_CACHE
;
1726 void helper_mtc0_taglo(CPUMIPSState
*env
, target_ulong arg1
)
1728 if (env
->hflags
& MIPS_HFLAG_ITC_CACHE
) {
1729 /* If CACHE instruction is configured for ITC tags then make all
1730 CP0.TagLo bits writable. The actual write to ITC Configuration
1731 Tag will take care of the read-only bits. */
1732 env
->CP0_TagLo
= arg1
;
1734 env
->CP0_TagLo
= arg1
& 0xFFFFFCF6;
1738 void helper_mtc0_datalo(CPUMIPSState
*env
, target_ulong arg1
)
1740 env
->CP0_DataLo
= arg1
; /* XXX */
1743 void helper_mtc0_taghi(CPUMIPSState
*env
, target_ulong arg1
)
1745 env
->CP0_TagHi
= arg1
; /* XXX */
1748 void helper_mtc0_datahi(CPUMIPSState
*env
, target_ulong arg1
)
1750 env
->CP0_DataHi
= arg1
; /* XXX */
1753 /* MIPS MT functions */
1754 target_ulong
helper_mftgpr(CPUMIPSState
*env
, uint32_t sel
)
1756 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1757 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1759 if (other_tc
== other
->current_tc
)
1760 return other
->active_tc
.gpr
[sel
];
1762 return other
->tcs
[other_tc
].gpr
[sel
];
1765 target_ulong
helper_mftlo(CPUMIPSState
*env
, uint32_t sel
)
1767 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1768 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1770 if (other_tc
== other
->current_tc
)
1771 return other
->active_tc
.LO
[sel
];
1773 return other
->tcs
[other_tc
].LO
[sel
];
1776 target_ulong
helper_mfthi(CPUMIPSState
*env
, uint32_t sel
)
1778 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1779 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1781 if (other_tc
== other
->current_tc
)
1782 return other
->active_tc
.HI
[sel
];
1784 return other
->tcs
[other_tc
].HI
[sel
];
1787 target_ulong
helper_mftacx(CPUMIPSState
*env
, uint32_t sel
)
1789 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1790 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1792 if (other_tc
== other
->current_tc
)
1793 return other
->active_tc
.ACX
[sel
];
1795 return other
->tcs
[other_tc
].ACX
[sel
];
1798 target_ulong
helper_mftdsp(CPUMIPSState
*env
)
1800 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1801 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1803 if (other_tc
== other
->current_tc
)
1804 return other
->active_tc
.DSPControl
;
1806 return other
->tcs
[other_tc
].DSPControl
;
1809 void helper_mttgpr(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
1811 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1812 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1814 if (other_tc
== other
->current_tc
)
1815 other
->active_tc
.gpr
[sel
] = arg1
;
1817 other
->tcs
[other_tc
].gpr
[sel
] = arg1
;
1820 void helper_mttlo(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
1822 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1823 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1825 if (other_tc
== other
->current_tc
)
1826 other
->active_tc
.LO
[sel
] = arg1
;
1828 other
->tcs
[other_tc
].LO
[sel
] = arg1
;
1831 void helper_mtthi(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
1833 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1834 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1836 if (other_tc
== other
->current_tc
)
1837 other
->active_tc
.HI
[sel
] = arg1
;
1839 other
->tcs
[other_tc
].HI
[sel
] = arg1
;
1842 void helper_mttacx(CPUMIPSState
*env
, target_ulong arg1
, uint32_t sel
)
1844 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1845 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1847 if (other_tc
== other
->current_tc
)
1848 other
->active_tc
.ACX
[sel
] = arg1
;
1850 other
->tcs
[other_tc
].ACX
[sel
] = arg1
;
1853 void helper_mttdsp(CPUMIPSState
*env
, target_ulong arg1
)
1855 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1856 CPUMIPSState
*other
= mips_cpu_map_tc(env
, &other_tc
);
1858 if (other_tc
== other
->current_tc
)
1859 other
->active_tc
.DSPControl
= arg1
;
1861 other
->tcs
[other_tc
].DSPControl
= arg1
;
1864 /* MIPS MT functions */
1865 target_ulong
helper_dmt(void)
1871 target_ulong
helper_emt(void)
1877 target_ulong
helper_dvpe(CPUMIPSState
*env
)
1879 CPUState
*other_cs
= first_cpu
;
1880 target_ulong prev
= env
->mvp
->CP0_MVPControl
;
1882 CPU_FOREACH(other_cs
) {
1883 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
1884 /* Turn off all VPEs except the one executing the dvpe. */
1885 if (&other_cpu
->env
!= env
) {
1886 other_cpu
->env
.mvp
->CP0_MVPControl
&= ~(1 << CP0MVPCo_EVP
);
1887 mips_vpe_sleep(other_cpu
);
1893 target_ulong
helper_evpe(CPUMIPSState
*env
)
1895 CPUState
*other_cs
= first_cpu
;
1896 target_ulong prev
= env
->mvp
->CP0_MVPControl
;
1898 CPU_FOREACH(other_cs
) {
1899 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
1901 if (&other_cpu
->env
!= env
1902 /* If the VPE is WFI, don't disturb its sleep. */
1903 && !mips_vpe_is_wfi(other_cpu
)) {
1904 /* Enable the VPE. */
1905 other_cpu
->env
.mvp
->CP0_MVPControl
|= (1 << CP0MVPCo_EVP
);
1906 mips_vpe_wake(other_cpu
); /* And wake it up. */
1911 #endif /* !CONFIG_USER_ONLY */
1913 void helper_fork(target_ulong arg1
, target_ulong arg2
)
1915 // arg1 = rt, arg2 = rs
1916 // TODO: store to TC register
1919 target_ulong
helper_yield(CPUMIPSState
*env
, target_ulong arg
)
1921 target_long arg1
= arg
;
1924 /* No scheduling policy implemented. */
1926 if (env
->CP0_VPEControl
& (1 << CP0VPECo_YSI
) &&
1927 env
->active_tc
.CP0_TCStatus
& (1 << CP0TCSt_DT
)) {
1928 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
1929 env
->CP0_VPEControl
|= 4 << CP0VPECo_EXCPT
;
1930 do_raise_exception(env
, EXCP_THREAD
, GETPC());
1933 } else if (arg1
== 0) {
1934 if (0 /* TODO: TC underflow */) {
1935 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
1936 do_raise_exception(env
, EXCP_THREAD
, GETPC());
1938 // TODO: Deallocate TC
1940 } else if (arg1
> 0) {
1941 /* Yield qualifier inputs not implemented. */
1942 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
1943 env
->CP0_VPEControl
|= 2 << CP0VPECo_EXCPT
;
1944 do_raise_exception(env
, EXCP_THREAD
, GETPC());
1946 return env
->CP0_YQMask
;
1949 /* R6 Multi-threading */
1950 #ifndef CONFIG_USER_ONLY
1951 target_ulong
helper_dvp(CPUMIPSState
*env
)
1953 CPUState
*other_cs
= first_cpu
;
1954 target_ulong prev
= env
->CP0_VPControl
;
1956 if (!((env
->CP0_VPControl
>> CP0VPCtl_DIS
) & 1)) {
1957 CPU_FOREACH(other_cs
) {
1958 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
1959 /* Turn off all VPs except the one executing the dvp. */
1960 if (&other_cpu
->env
!= env
) {
1961 mips_vpe_sleep(other_cpu
);
1964 env
->CP0_VPControl
|= (1 << CP0VPCtl_DIS
);
1969 target_ulong
helper_evp(CPUMIPSState
*env
)
1971 CPUState
*other_cs
= first_cpu
;
1972 target_ulong prev
= env
->CP0_VPControl
;
1974 if ((env
->CP0_VPControl
>> CP0VPCtl_DIS
) & 1) {
1975 CPU_FOREACH(other_cs
) {
1976 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
1977 if ((&other_cpu
->env
!= env
) && !mips_vp_is_wfi(other_cpu
)) {
1978 /* If the VP is WFI, don't disturb its sleep.
1979 * Otherwise, wake it up. */
1980 mips_vpe_wake(other_cpu
);
1983 env
->CP0_VPControl
&= ~(1 << CP0VPCtl_DIS
);
1987 #endif /* !CONFIG_USER_ONLY */
1989 #ifndef CONFIG_USER_ONLY
1990 /* TLB management */
1991 static void r4k_mips_tlb_flush_extra (CPUMIPSState
*env
, int first
)
1993 /* Discard entries from env->tlb[first] onwards. */
1994 while (env
->tlb
->tlb_in_use
> first
) {
1995 r4k_invalidate_tlb(env
, --env
->tlb
->tlb_in_use
, 0);
1999 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo
)
2001 #if defined(TARGET_MIPS64)
2002 return extract64(entrylo
, 6, 54);
2004 return extract64(entrylo
, 6, 24) | /* PFN */
2005 (extract64(entrylo
, 32, 32) << 24); /* PFNX */
2009 static void r4k_fill_tlb(CPUMIPSState
*env
, int idx
)
2012 uint64_t mask
= env
->CP0_PageMask
>> (TARGET_PAGE_BITS
+ 1);
2014 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
2015 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
2016 if (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) {
2021 tlb
->VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
2022 #if defined(TARGET_MIPS64)
2023 tlb
->VPN
&= env
->SEGMask
;
2025 tlb
->ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
2026 tlb
->PageMask
= env
->CP0_PageMask
;
2027 tlb
->G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
2028 tlb
->V0
= (env
->CP0_EntryLo0
& 2) != 0;
2029 tlb
->D0
= (env
->CP0_EntryLo0
& 4) != 0;
2030 tlb
->C0
= (env
->CP0_EntryLo0
>> 3) & 0x7;
2031 tlb
->XI0
= (env
->CP0_EntryLo0
>> CP0EnLo_XI
) & 1;
2032 tlb
->RI0
= (env
->CP0_EntryLo0
>> CP0EnLo_RI
) & 1;
2033 tlb
->PFN
[0] = (get_tlb_pfn_from_entrylo(env
->CP0_EntryLo0
) & ~mask
) << 12;
2034 tlb
->V1
= (env
->CP0_EntryLo1
& 2) != 0;
2035 tlb
->D1
= (env
->CP0_EntryLo1
& 4) != 0;
2036 tlb
->C1
= (env
->CP0_EntryLo1
>> 3) & 0x7;
2037 tlb
->XI1
= (env
->CP0_EntryLo1
>> CP0EnLo_XI
) & 1;
2038 tlb
->RI1
= (env
->CP0_EntryLo1
>> CP0EnLo_RI
) & 1;
2039 tlb
->PFN
[1] = (get_tlb_pfn_from_entrylo(env
->CP0_EntryLo1
) & ~mask
) << 12;
2042 void r4k_helper_tlbinv(CPUMIPSState
*env
)
2046 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
2048 for (idx
= 0; idx
< env
->tlb
->nb_tlb
; idx
++) {
2049 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
2050 if (!tlb
->G
&& tlb
->ASID
== ASID
) {
2054 cpu_mips_tlb_flush(env
);
2057 void r4k_helper_tlbinvf(CPUMIPSState
*env
)
2061 for (idx
= 0; idx
< env
->tlb
->nb_tlb
; idx
++) {
2062 env
->tlb
->mmu
.r4k
.tlb
[idx
].EHINV
= 1;
2064 cpu_mips_tlb_flush(env
);
2067 void r4k_helper_tlbwi(CPUMIPSState
*env
)
2073 bool EHINV
, G
, V0
, D0
, V1
, D1
, XI0
, XI1
, RI0
, RI1
;
2075 idx
= (env
->CP0_Index
& ~0x80000000) % env
->tlb
->nb_tlb
;
2076 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
2077 VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
2078 #if defined(TARGET_MIPS64)
2079 VPN
&= env
->SEGMask
;
2081 ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
2082 EHINV
= (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) != 0;
2083 G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
2084 V0
= (env
->CP0_EntryLo0
& 2) != 0;
2085 D0
= (env
->CP0_EntryLo0
& 4) != 0;
2086 XI0
= (env
->CP0_EntryLo0
>> CP0EnLo_XI
) &1;
2087 RI0
= (env
->CP0_EntryLo0
>> CP0EnLo_RI
) &1;
2088 V1
= (env
->CP0_EntryLo1
& 2) != 0;
2089 D1
= (env
->CP0_EntryLo1
& 4) != 0;
2090 XI1
= (env
->CP0_EntryLo1
>> CP0EnLo_XI
) &1;
2091 RI1
= (env
->CP0_EntryLo1
>> CP0EnLo_RI
) &1;
2093 /* Discard cached TLB entries, unless tlbwi is just upgrading access
2094 permissions on the current entry. */
2095 if (tlb
->VPN
!= VPN
|| tlb
->ASID
!= ASID
|| tlb
->G
!= G
||
2096 (!tlb
->EHINV
&& EHINV
) ||
2097 (tlb
->V0
&& !V0
) || (tlb
->D0
&& !D0
) ||
2098 (!tlb
->XI0
&& XI0
) || (!tlb
->RI0
&& RI0
) ||
2099 (tlb
->V1
&& !V1
) || (tlb
->D1
&& !D1
) ||
2100 (!tlb
->XI1
&& XI1
) || (!tlb
->RI1
&& RI1
)) {
2101 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
2104 r4k_invalidate_tlb(env
, idx
, 0);
2105 r4k_fill_tlb(env
, idx
);
2108 void r4k_helper_tlbwr(CPUMIPSState
*env
)
2110 int r
= cpu_mips_get_random(env
);
2112 r4k_invalidate_tlb(env
, r
, 1);
2113 r4k_fill_tlb(env
, r
);
2116 void r4k_helper_tlbp(CPUMIPSState
*env
)
2125 ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
2126 for (i
= 0; i
< env
->tlb
->nb_tlb
; i
++) {
2127 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
2128 /* 1k pages are not supported. */
2129 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
2130 tag
= env
->CP0_EntryHi
& ~mask
;
2131 VPN
= tlb
->VPN
& ~mask
;
2132 #if defined(TARGET_MIPS64)
2133 tag
&= env
->SEGMask
;
2135 /* Check ASID, virtual page number & size */
2136 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
&& !tlb
->EHINV
) {
2142 if (i
== env
->tlb
->nb_tlb
) {
2143 /* No match. Discard any shadow entries, if any of them match. */
2144 for (i
= env
->tlb
->nb_tlb
; i
< env
->tlb
->tlb_in_use
; i
++) {
2145 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
2146 /* 1k pages are not supported. */
2147 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
2148 tag
= env
->CP0_EntryHi
& ~mask
;
2149 VPN
= tlb
->VPN
& ~mask
;
2150 #if defined(TARGET_MIPS64)
2151 tag
&= env
->SEGMask
;
2153 /* Check ASID, virtual page number & size */
2154 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
2155 r4k_mips_tlb_flush_extra (env
, i
);
2160 env
->CP0_Index
|= 0x80000000;
2164 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn
)
2166 #if defined(TARGET_MIPS64)
2167 return tlb_pfn
<< 6;
2169 return (extract64(tlb_pfn
, 0, 24) << 6) | /* PFN */
2170 (extract64(tlb_pfn
, 24, 32) << 32); /* PFNX */
2174 void r4k_helper_tlbr(CPUMIPSState
*env
)
2180 ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
2181 idx
= (env
->CP0_Index
& ~0x80000000) % env
->tlb
->nb_tlb
;
2182 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
2184 /* If this will change the current ASID, flush qemu's TLB. */
2185 if (ASID
!= tlb
->ASID
)
2186 cpu_mips_tlb_flush(env
);
2188 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
2191 env
->CP0_EntryHi
= 1 << CP0EnHi_EHINV
;
2192 env
->CP0_PageMask
= 0;
2193 env
->CP0_EntryLo0
= 0;
2194 env
->CP0_EntryLo1
= 0;
2196 env
->CP0_EntryHi
= tlb
->VPN
| tlb
->ASID
;
2197 env
->CP0_PageMask
= tlb
->PageMask
;
2198 env
->CP0_EntryLo0
= tlb
->G
| (tlb
->V0
<< 1) | (tlb
->D0
<< 2) |
2199 ((uint64_t)tlb
->RI0
<< CP0EnLo_RI
) |
2200 ((uint64_t)tlb
->XI0
<< CP0EnLo_XI
) | (tlb
->C0
<< 3) |
2201 get_entrylo_pfn_from_tlb(tlb
->PFN
[0] >> 12);
2202 env
->CP0_EntryLo1
= tlb
->G
| (tlb
->V1
<< 1) | (tlb
->D1
<< 2) |
2203 ((uint64_t)tlb
->RI1
<< CP0EnLo_RI
) |
2204 ((uint64_t)tlb
->XI1
<< CP0EnLo_XI
) | (tlb
->C1
<< 3) |
2205 get_entrylo_pfn_from_tlb(tlb
->PFN
[1] >> 12);
2209 void helper_tlbwi(CPUMIPSState
*env
)
2211 env
->tlb
->helper_tlbwi(env
);
2214 void helper_tlbwr(CPUMIPSState
*env
)
2216 env
->tlb
->helper_tlbwr(env
);
2219 void helper_tlbp(CPUMIPSState
*env
)
2221 env
->tlb
->helper_tlbp(env
);
2224 void helper_tlbr(CPUMIPSState
*env
)
2226 env
->tlb
->helper_tlbr(env
);
2229 void helper_tlbinv(CPUMIPSState
*env
)
2231 env
->tlb
->helper_tlbinv(env
);
2234 void helper_tlbinvf(CPUMIPSState
*env
)
2236 env
->tlb
->helper_tlbinvf(env
);
2240 target_ulong
helper_di(CPUMIPSState
*env
)
2242 target_ulong t0
= env
->CP0_Status
;
2244 env
->CP0_Status
= t0
& ~(1 << CP0St_IE
);
2248 target_ulong
helper_ei(CPUMIPSState
*env
)
2250 target_ulong t0
= env
->CP0_Status
;
2252 env
->CP0_Status
= t0
| (1 << CP0St_IE
);
2256 static void debug_pre_eret(CPUMIPSState
*env
)
2258 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
2259 qemu_log("ERET: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
2260 env
->active_tc
.PC
, env
->CP0_EPC
);
2261 if (env
->CP0_Status
& (1 << CP0St_ERL
))
2262 qemu_log(" ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
2263 if (env
->hflags
& MIPS_HFLAG_DM
)
2264 qemu_log(" DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
2269 static void debug_post_eret(CPUMIPSState
*env
)
2271 MIPSCPU
*cpu
= mips_env_get_cpu(env
);
2273 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
2274 qemu_log(" => PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
2275 env
->active_tc
.PC
, env
->CP0_EPC
);
2276 if (env
->CP0_Status
& (1 << CP0St_ERL
))
2277 qemu_log(" ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
2278 if (env
->hflags
& MIPS_HFLAG_DM
)
2279 qemu_log(" DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
2280 switch (cpu_mmu_index(env
, false)) {
2282 qemu_log(", ERL\n");
2284 case MIPS_HFLAG_UM
: qemu_log(", UM\n"); break;
2285 case MIPS_HFLAG_SM
: qemu_log(", SM\n"); break;
2286 case MIPS_HFLAG_KM
: qemu_log("\n"); break;
2288 cpu_abort(CPU(cpu
), "Invalid MMU mode!\n");
2294 static void set_pc(CPUMIPSState
*env
, target_ulong error_pc
)
2296 env
->active_tc
.PC
= error_pc
& ~(target_ulong
)1;
2298 env
->hflags
|= MIPS_HFLAG_M16
;
2300 env
->hflags
&= ~(MIPS_HFLAG_M16
);
2304 static inline void exception_return(CPUMIPSState
*env
)
2306 debug_pre_eret(env
);
2307 if (env
->CP0_Status
& (1 << CP0St_ERL
)) {
2308 set_pc(env
, env
->CP0_ErrorEPC
);
2309 env
->CP0_Status
&= ~(1 << CP0St_ERL
);
2311 set_pc(env
, env
->CP0_EPC
);
2312 env
->CP0_Status
&= ~(1 << CP0St_EXL
);
2314 compute_hflags(env
);
2315 debug_post_eret(env
);
2318 void helper_eret(CPUMIPSState
*env
)
2320 exception_return(env
);
2324 void helper_eretnc(CPUMIPSState
*env
)
2326 exception_return(env
);
2329 void helper_deret(CPUMIPSState
*env
)
2331 debug_pre_eret(env
);
2332 set_pc(env
, env
->CP0_DEPC
);
2334 env
->hflags
&= ~MIPS_HFLAG_DM
;
2335 compute_hflags(env
);
2336 debug_post_eret(env
);
2338 #endif /* !CONFIG_USER_ONLY */
2340 static inline void check_hwrena(CPUMIPSState
*env
, int reg
, uintptr_t pc
)
2342 if ((env
->hflags
& MIPS_HFLAG_CP0
) || (env
->CP0_HWREna
& (1 << reg
))) {
2345 do_raise_exception(env
, EXCP_RI
, pc
);
2348 target_ulong
helper_rdhwr_cpunum(CPUMIPSState
*env
)
2350 check_hwrena(env
, 0, GETPC());
2351 return env
->CP0_EBase
& 0x3ff;
2354 target_ulong
helper_rdhwr_synci_step(CPUMIPSState
*env
)
2356 check_hwrena(env
, 1, GETPC());
2357 return env
->SYNCI_Step
;
2360 target_ulong
helper_rdhwr_cc(CPUMIPSState
*env
)
2363 check_hwrena(env
, 2, GETPC());
2364 #ifdef CONFIG_USER_ONLY
2365 count
= env
->CP0_Count
;
2367 qemu_mutex_lock_iothread();
2368 count
= (int32_t)cpu_mips_get_count(env
);
2369 qemu_mutex_unlock_iothread();
2374 target_ulong
helper_rdhwr_ccres(CPUMIPSState
*env
)
2376 check_hwrena(env
, 3, GETPC());
2380 target_ulong
helper_rdhwr_performance(CPUMIPSState
*env
)
2382 check_hwrena(env
, 4, GETPC());
2383 return env
->CP0_Performance0
;
2386 target_ulong
helper_rdhwr_xnp(CPUMIPSState
*env
)
2388 check_hwrena(env
, 5, GETPC());
2389 return (env
->CP0_Config5
>> CP0C5_XNP
) & 1;
2392 void helper_pmon(CPUMIPSState
*env
, int function
)
2396 case 2: /* TODO: char inbyte(int waitflag); */
2397 if (env
->active_tc
.gpr
[4] == 0)
2398 env
->active_tc
.gpr
[2] = -1;
2400 case 11: /* TODO: char inbyte (void); */
2401 env
->active_tc
.gpr
[2] = -1;
2405 printf("%c", (char)(env
->active_tc
.gpr
[4] & 0xFF));
2411 unsigned char *fmt
= (void *)(uintptr_t)env
->active_tc
.gpr
[4];
2418 void helper_wait(CPUMIPSState
*env
)
2420 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
2423 cpu_reset_interrupt(cs
, CPU_INTERRUPT_WAKE
);
2424 /* Last instruction in the block, PC was updated before
2425 - no need to recover PC and icount */
2426 raise_exception(env
, EXCP_HLT
);
2429 #if !defined(CONFIG_USER_ONLY)
2431 void mips_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
2432 MMUAccessType access_type
,
2433 int mmu_idx
, uintptr_t retaddr
)
2435 MIPSCPU
*cpu
= MIPS_CPU(cs
);
2436 CPUMIPSState
*env
= &cpu
->env
;
2440 env
->CP0_BadVAddr
= addr
;
2442 if (access_type
== MMU_DATA_STORE
) {
2446 if (access_type
== MMU_INST_FETCH
) {
2447 error_code
|= EXCP_INST_NOTAVAIL
;
2451 do_raise_exception_err(env
, excp
, error_code
, retaddr
);
2454 void tlb_fill(CPUState
*cs
, target_ulong addr
, MMUAccessType access_type
,
2455 int mmu_idx
, uintptr_t retaddr
)
2459 ret
= mips_cpu_handle_mmu_fault(cs
, addr
, access_type
, mmu_idx
);
2461 MIPSCPU
*cpu
= MIPS_CPU(cs
);
2462 CPUMIPSState
*env
= &cpu
->env
;
2464 do_raise_exception_err(env
, cs
->exception_index
,
2465 env
->error_code
, retaddr
);
2469 void mips_cpu_unassigned_access(CPUState
*cs
, hwaddr addr
,
2470 bool is_write
, bool is_exec
, int unused
,
2473 MIPSCPU
*cpu
= MIPS_CPU(cs
);
2474 CPUMIPSState
*env
= &cpu
->env
;
2477 * Raising an exception with KVM enabled will crash because it won't be from
2478 * the main execution loop so the longjmp won't have a matching setjmp.
2479 * Until we can trigger a bus error exception through KVM lets just ignore
2482 if (kvm_enabled()) {
2487 raise_exception(env
, EXCP_IBE
);
2489 raise_exception(env
, EXCP_DBE
);
2492 #endif /* !CONFIG_USER_ONLY */
2494 /* Complex FPU operations which may need stack space. */
2496 #define FLOAT_TWO32 make_float32(1 << 30)
2497 #define FLOAT_TWO64 make_float64(1ULL << 62)
2499 #define FP_TO_INT32_OVERFLOW 0x7fffffff
2500 #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
2502 /* convert MIPS rounding mode in FCR31 to IEEE library */
2503 unsigned int ieee_rm
[] = {
2504 float_round_nearest_even
,
2505 float_round_to_zero
,
2510 target_ulong
helper_cfc1(CPUMIPSState
*env
, uint32_t reg
)
2512 target_ulong arg1
= 0;
2516 arg1
= (int32_t)env
->active_fpu
.fcr0
;
2519 /* UFR Support - Read Status FR */
2520 if (env
->active_fpu
.fcr0
& (1 << FCR0_UFRP
)) {
2521 if (env
->CP0_Config5
& (1 << CP0C5_UFR
)) {
2523 ((env
->CP0_Status
& (1 << CP0St_FR
)) >> CP0St_FR
);
2525 do_raise_exception(env
, EXCP_RI
, GETPC());
2530 /* FRE Support - read Config5.FRE bit */
2531 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
2532 if (env
->CP0_Config5
& (1 << CP0C5_UFE
)) {
2533 arg1
= (env
->CP0_Config5
>> CP0C5_FRE
) & 1;
2535 helper_raise_exception(env
, EXCP_RI
);
2540 arg1
= ((env
->active_fpu
.fcr31
>> 24) & 0xfe) | ((env
->active_fpu
.fcr31
>> 23) & 0x1);
2543 arg1
= env
->active_fpu
.fcr31
& 0x0003f07c;
2546 arg1
= (env
->active_fpu
.fcr31
& 0x00000f83) | ((env
->active_fpu
.fcr31
>> 22) & 0x4);
2549 arg1
= (int32_t)env
->active_fpu
.fcr31
;
2556 void helper_ctc1(CPUMIPSState
*env
, target_ulong arg1
, uint32_t fs
, uint32_t rt
)
2560 /* UFR Alias - Reset Status FR */
2561 if (!((env
->active_fpu
.fcr0
& (1 << FCR0_UFRP
)) && (rt
== 0))) {
2564 if (env
->CP0_Config5
& (1 << CP0C5_UFR
)) {
2565 env
->CP0_Status
&= ~(1 << CP0St_FR
);
2566 compute_hflags(env
);
2568 do_raise_exception(env
, EXCP_RI
, GETPC());
2572 /* UNFR Alias - Set Status FR */
2573 if (!((env
->active_fpu
.fcr0
& (1 << FCR0_UFRP
)) && (rt
== 0))) {
2576 if (env
->CP0_Config5
& (1 << CP0C5_UFR
)) {
2577 env
->CP0_Status
|= (1 << CP0St_FR
);
2578 compute_hflags(env
);
2580 do_raise_exception(env
, EXCP_RI
, GETPC());
2584 /* FRE Support - clear Config5.FRE bit */
2585 if (!((env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) && (rt
== 0))) {
2588 if (env
->CP0_Config5
& (1 << CP0C5_UFE
)) {
2589 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
2590 compute_hflags(env
);
2592 helper_raise_exception(env
, EXCP_RI
);
2596 /* FRE Support - set Config5.FRE bit */
2597 if (!((env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) && (rt
== 0))) {
2600 if (env
->CP0_Config5
& (1 << CP0C5_UFE
)) {
2601 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
2602 compute_hflags(env
);
2604 helper_raise_exception(env
, EXCP_RI
);
2608 if ((env
->insn_flags
& ISA_MIPS32R6
) || (arg1
& 0xffffff00)) {
2611 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0x017fffff) | ((arg1
& 0xfe) << 24) |
2612 ((arg1
& 0x1) << 23);
2615 if (arg1
& 0x007c0000)
2617 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0xfffc0f83) | (arg1
& 0x0003f07c);
2620 if (arg1
& 0x007c0000)
2622 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0xfefff07c) | (arg1
& 0x00000f83) |
2623 ((arg1
& 0x4) << 22);
2626 env
->active_fpu
.fcr31
= (arg1
& env
->active_fpu
.fcr31_rw_bitmask
) |
2627 (env
->active_fpu
.fcr31
& ~(env
->active_fpu
.fcr31_rw_bitmask
));
2632 restore_fp_status(env
);
2633 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2634 if ((GET_FP_ENABLE(env
->active_fpu
.fcr31
) | 0x20) & GET_FP_CAUSE(env
->active_fpu
.fcr31
))
2635 do_raise_exception(env
, EXCP_FPE
, GETPC());
2638 int ieee_ex_to_mips(int xcpt
)
2642 if (xcpt
& float_flag_invalid
) {
2645 if (xcpt
& float_flag_overflow
) {
2648 if (xcpt
& float_flag_underflow
) {
2649 ret
|= FP_UNDERFLOW
;
2651 if (xcpt
& float_flag_divbyzero
) {
2654 if (xcpt
& float_flag_inexact
) {
2661 static inline void update_fcr31(CPUMIPSState
*env
, uintptr_t pc
)
2663 int tmp
= ieee_ex_to_mips(get_float_exception_flags(&env
->active_fpu
.fp_status
));
2665 SET_FP_CAUSE(env
->active_fpu
.fcr31
, tmp
);
2668 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2670 if (GET_FP_ENABLE(env
->active_fpu
.fcr31
) & tmp
) {
2671 do_raise_exception(env
, EXCP_FPE
, pc
);
2673 UPDATE_FP_FLAGS(env
->active_fpu
.fcr31
, tmp
);
2679 Single precition routines have a "s" suffix, double precision a
2680 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2681 paired single lower "pl", paired single upper "pu". */
2683 /* unary operations, modifying fp status */
2684 uint64_t helper_float_sqrt_d(CPUMIPSState
*env
, uint64_t fdt0
)
2686 fdt0
= float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
2687 update_fcr31(env
, GETPC());
2691 uint32_t helper_float_sqrt_s(CPUMIPSState
*env
, uint32_t fst0
)
2693 fst0
= float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
2694 update_fcr31(env
, GETPC());
2698 uint64_t helper_float_cvtd_s(CPUMIPSState
*env
, uint32_t fst0
)
2702 fdt2
= float32_to_float64(fst0
, &env
->active_fpu
.fp_status
);
2703 fdt2
= float64_maybe_silence_nan(fdt2
, &env
->active_fpu
.fp_status
);
2704 update_fcr31(env
, GETPC());
2708 uint64_t helper_float_cvtd_w(CPUMIPSState
*env
, uint32_t wt0
)
2712 fdt2
= int32_to_float64(wt0
, &env
->active_fpu
.fp_status
);
2713 update_fcr31(env
, GETPC());
2717 uint64_t helper_float_cvtd_l(CPUMIPSState
*env
, uint64_t dt0
)
2721 fdt2
= int64_to_float64(dt0
, &env
->active_fpu
.fp_status
);
2722 update_fcr31(env
, GETPC());
2726 uint64_t helper_float_cvt_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
2730 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2731 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2732 & (float_flag_invalid
| float_flag_overflow
)) {
2733 dt2
= FP_TO_INT64_OVERFLOW
;
2735 update_fcr31(env
, GETPC());
2739 uint64_t helper_float_cvt_l_s(CPUMIPSState
*env
, uint32_t fst0
)
2743 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2744 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2745 & (float_flag_invalid
| float_flag_overflow
)) {
2746 dt2
= FP_TO_INT64_OVERFLOW
;
2748 update_fcr31(env
, GETPC());
2752 uint64_t helper_float_cvtps_pw(CPUMIPSState
*env
, uint64_t dt0
)
2757 fst2
= int32_to_float32(dt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
2758 fsth2
= int32_to_float32(dt0
>> 32, &env
->active_fpu
.fp_status
);
2759 update_fcr31(env
, GETPC());
2760 return ((uint64_t)fsth2
<< 32) | fst2
;
2763 uint64_t helper_float_cvtpw_ps(CPUMIPSState
*env
, uint64_t fdt0
)
2769 wt2
= float32_to_int32(fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
2770 excp
= get_float_exception_flags(&env
->active_fpu
.fp_status
);
2771 if (excp
& (float_flag_overflow
| float_flag_invalid
)) {
2772 wt2
= FP_TO_INT32_OVERFLOW
;
2775 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2776 wth2
= float32_to_int32(fdt0
>> 32, &env
->active_fpu
.fp_status
);
2777 excph
= get_float_exception_flags(&env
->active_fpu
.fp_status
);
2778 if (excph
& (float_flag_overflow
| float_flag_invalid
)) {
2779 wth2
= FP_TO_INT32_OVERFLOW
;
2782 set_float_exception_flags(excp
| excph
, &env
->active_fpu
.fp_status
);
2783 update_fcr31(env
, GETPC());
2785 return ((uint64_t)wth2
<< 32) | wt2
;
2788 uint32_t helper_float_cvts_d(CPUMIPSState
*env
, uint64_t fdt0
)
2792 fst2
= float64_to_float32(fdt0
, &env
->active_fpu
.fp_status
);
2793 fst2
= float32_maybe_silence_nan(fst2
, &env
->active_fpu
.fp_status
);
2794 update_fcr31(env
, GETPC());
2798 uint32_t helper_float_cvts_w(CPUMIPSState
*env
, uint32_t wt0
)
2802 fst2
= int32_to_float32(wt0
, &env
->active_fpu
.fp_status
);
2803 update_fcr31(env
, GETPC());
2807 uint32_t helper_float_cvts_l(CPUMIPSState
*env
, uint64_t dt0
)
2811 fst2
= int64_to_float32(dt0
, &env
->active_fpu
.fp_status
);
2812 update_fcr31(env
, GETPC());
2816 uint32_t helper_float_cvts_pl(CPUMIPSState
*env
, uint32_t wt0
)
2821 update_fcr31(env
, GETPC());
2825 uint32_t helper_float_cvts_pu(CPUMIPSState
*env
, uint32_t wth0
)
2830 update_fcr31(env
, GETPC());
2834 uint32_t helper_float_cvt_w_s(CPUMIPSState
*env
, uint32_t fst0
)
2838 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
2839 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2840 & (float_flag_invalid
| float_flag_overflow
)) {
2841 wt2
= FP_TO_INT32_OVERFLOW
;
2843 update_fcr31(env
, GETPC());
2847 uint32_t helper_float_cvt_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
2851 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
2852 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2853 & (float_flag_invalid
| float_flag_overflow
)) {
2854 wt2
= FP_TO_INT32_OVERFLOW
;
2856 update_fcr31(env
, GETPC());
2860 uint64_t helper_float_round_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
2864 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2865 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2866 restore_rounding_mode(env
);
2867 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2868 & (float_flag_invalid
| float_flag_overflow
)) {
2869 dt2
= FP_TO_INT64_OVERFLOW
;
2871 update_fcr31(env
, GETPC());
2875 uint64_t helper_float_round_l_s(CPUMIPSState
*env
, uint32_t fst0
)
2879 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2880 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2881 restore_rounding_mode(env
);
2882 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2883 & (float_flag_invalid
| float_flag_overflow
)) {
2884 dt2
= FP_TO_INT64_OVERFLOW
;
2886 update_fcr31(env
, GETPC());
2890 uint32_t helper_float_round_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
2894 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2895 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
2896 restore_rounding_mode(env
);
2897 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2898 & (float_flag_invalid
| float_flag_overflow
)) {
2899 wt2
= FP_TO_INT32_OVERFLOW
;
2901 update_fcr31(env
, GETPC());
2905 uint32_t helper_float_round_w_s(CPUMIPSState
*env
, uint32_t fst0
)
2909 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2910 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
2911 restore_rounding_mode(env
);
2912 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2913 & (float_flag_invalid
| float_flag_overflow
)) {
2914 wt2
= FP_TO_INT32_OVERFLOW
;
2916 update_fcr31(env
, GETPC());
2920 uint64_t helper_float_trunc_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
2924 dt2
= float64_to_int64_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
2925 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2926 & (float_flag_invalid
| float_flag_overflow
)) {
2927 dt2
= FP_TO_INT64_OVERFLOW
;
2929 update_fcr31(env
, GETPC());
2933 uint64_t helper_float_trunc_l_s(CPUMIPSState
*env
, uint32_t fst0
)
2937 dt2
= float32_to_int64_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
2938 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2939 & (float_flag_invalid
| float_flag_overflow
)) {
2940 dt2
= FP_TO_INT64_OVERFLOW
;
2942 update_fcr31(env
, GETPC());
2946 uint32_t helper_float_trunc_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
2950 wt2
= float64_to_int32_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
2951 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2952 & (float_flag_invalid
| float_flag_overflow
)) {
2953 wt2
= FP_TO_INT32_OVERFLOW
;
2955 update_fcr31(env
, GETPC());
2959 uint32_t helper_float_trunc_w_s(CPUMIPSState
*env
, uint32_t fst0
)
2963 wt2
= float32_to_int32_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
2964 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2965 & (float_flag_invalid
| float_flag_overflow
)) {
2966 wt2
= FP_TO_INT32_OVERFLOW
;
2968 update_fcr31(env
, GETPC());
2972 uint64_t helper_float_ceil_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
2976 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
2977 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2978 restore_rounding_mode(env
);
2979 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2980 & (float_flag_invalid
| float_flag_overflow
)) {
2981 dt2
= FP_TO_INT64_OVERFLOW
;
2983 update_fcr31(env
, GETPC());
2987 uint64_t helper_float_ceil_l_s(CPUMIPSState
*env
, uint32_t fst0
)
2991 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
2992 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2993 restore_rounding_mode(env
);
2994 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
2995 & (float_flag_invalid
| float_flag_overflow
)) {
2996 dt2
= FP_TO_INT64_OVERFLOW
;
2998 update_fcr31(env
, GETPC());
3002 uint32_t helper_float_ceil_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3006 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3007 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3008 restore_rounding_mode(env
);
3009 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3010 & (float_flag_invalid
| float_flag_overflow
)) {
3011 wt2
= FP_TO_INT32_OVERFLOW
;
3013 update_fcr31(env
, GETPC());
3017 uint32_t helper_float_ceil_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3021 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3022 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3023 restore_rounding_mode(env
);
3024 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3025 & (float_flag_invalid
| float_flag_overflow
)) {
3026 wt2
= FP_TO_INT32_OVERFLOW
;
3028 update_fcr31(env
, GETPC());
3032 uint64_t helper_float_floor_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3036 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3037 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3038 restore_rounding_mode(env
);
3039 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3040 & (float_flag_invalid
| float_flag_overflow
)) {
3041 dt2
= FP_TO_INT64_OVERFLOW
;
3043 update_fcr31(env
, GETPC());
3047 uint64_t helper_float_floor_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3051 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3052 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3053 restore_rounding_mode(env
);
3054 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3055 & (float_flag_invalid
| float_flag_overflow
)) {
3056 dt2
= FP_TO_INT64_OVERFLOW
;
3058 update_fcr31(env
, GETPC());
3062 uint32_t helper_float_floor_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3066 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3067 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3068 restore_rounding_mode(env
);
3069 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3070 & (float_flag_invalid
| float_flag_overflow
)) {
3071 wt2
= FP_TO_INT32_OVERFLOW
;
3073 update_fcr31(env
, GETPC());
3077 uint32_t helper_float_floor_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3081 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3082 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3083 restore_rounding_mode(env
);
3084 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3085 & (float_flag_invalid
| float_flag_overflow
)) {
3086 wt2
= FP_TO_INT32_OVERFLOW
;
3088 update_fcr31(env
, GETPC());
3092 uint64_t helper_float_cvt_2008_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3096 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3097 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3098 & float_flag_invalid
) {
3099 if (float64_is_any_nan(fdt0
)) {
3103 update_fcr31(env
, GETPC());
3107 uint64_t helper_float_cvt_2008_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3111 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3112 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3113 & float_flag_invalid
) {
3114 if (float32_is_any_nan(fst0
)) {
3118 update_fcr31(env
, GETPC());
3122 uint32_t helper_float_cvt_2008_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3126 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3127 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3128 & float_flag_invalid
) {
3129 if (float64_is_any_nan(fdt0
)) {
3133 update_fcr31(env
, GETPC());
3137 uint32_t helper_float_cvt_2008_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3141 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3142 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3143 & float_flag_invalid
) {
3144 if (float32_is_any_nan(fst0
)) {
3148 update_fcr31(env
, GETPC());
3152 uint64_t helper_float_round_2008_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3156 set_float_rounding_mode(float_round_nearest_even
,
3157 &env
->active_fpu
.fp_status
);
3158 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3159 restore_rounding_mode(env
);
3160 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3161 & float_flag_invalid
) {
3162 if (float64_is_any_nan(fdt0
)) {
3166 update_fcr31(env
, GETPC());
3170 uint64_t helper_float_round_2008_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3174 set_float_rounding_mode(float_round_nearest_even
,
3175 &env
->active_fpu
.fp_status
);
3176 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3177 restore_rounding_mode(env
);
3178 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3179 & float_flag_invalid
) {
3180 if (float32_is_any_nan(fst0
)) {
3184 update_fcr31(env
, GETPC());
3188 uint32_t helper_float_round_2008_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3192 set_float_rounding_mode(float_round_nearest_even
,
3193 &env
->active_fpu
.fp_status
);
3194 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3195 restore_rounding_mode(env
);
3196 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3197 & float_flag_invalid
) {
3198 if (float64_is_any_nan(fdt0
)) {
3202 update_fcr31(env
, GETPC());
3206 uint32_t helper_float_round_2008_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3210 set_float_rounding_mode(float_round_nearest_even
,
3211 &env
->active_fpu
.fp_status
);
3212 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3213 restore_rounding_mode(env
);
3214 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3215 & float_flag_invalid
) {
3216 if (float32_is_any_nan(fst0
)) {
3220 update_fcr31(env
, GETPC());
3224 uint64_t helper_float_trunc_2008_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3228 dt2
= float64_to_int64_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
3229 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3230 & float_flag_invalid
) {
3231 if (float64_is_any_nan(fdt0
)) {
3235 update_fcr31(env
, GETPC());
3239 uint64_t helper_float_trunc_2008_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3243 dt2
= float32_to_int64_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
3244 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3245 & float_flag_invalid
) {
3246 if (float32_is_any_nan(fst0
)) {
3250 update_fcr31(env
, GETPC());
3254 uint32_t helper_float_trunc_2008_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3258 wt2
= float64_to_int32_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
3259 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3260 & float_flag_invalid
) {
3261 if (float64_is_any_nan(fdt0
)) {
3265 update_fcr31(env
, GETPC());
3269 uint32_t helper_float_trunc_2008_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3273 wt2
= float32_to_int32_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
3274 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3275 & float_flag_invalid
) {
3276 if (float32_is_any_nan(fst0
)) {
3280 update_fcr31(env
, GETPC());
3284 uint64_t helper_float_ceil_2008_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3288 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3289 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3290 restore_rounding_mode(env
);
3291 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3292 & float_flag_invalid
) {
3293 if (float64_is_any_nan(fdt0
)) {
3297 update_fcr31(env
, GETPC());
3301 uint64_t helper_float_ceil_2008_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3305 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3306 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3307 restore_rounding_mode(env
);
3308 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3309 & float_flag_invalid
) {
3310 if (float32_is_any_nan(fst0
)) {
3314 update_fcr31(env
, GETPC());
3318 uint32_t helper_float_ceil_2008_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3322 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3323 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3324 restore_rounding_mode(env
);
3325 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3326 & float_flag_invalid
) {
3327 if (float64_is_any_nan(fdt0
)) {
3331 update_fcr31(env
, GETPC());
3335 uint32_t helper_float_ceil_2008_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3339 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
3340 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3341 restore_rounding_mode(env
);
3342 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3343 & float_flag_invalid
) {
3344 if (float32_is_any_nan(fst0
)) {
3348 update_fcr31(env
, GETPC());
3352 uint64_t helper_float_floor_2008_l_d(CPUMIPSState
*env
, uint64_t fdt0
)
3356 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3357 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
3358 restore_rounding_mode(env
);
3359 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3360 & float_flag_invalid
) {
3361 if (float64_is_any_nan(fdt0
)) {
3365 update_fcr31(env
, GETPC());
3369 uint64_t helper_float_floor_2008_l_s(CPUMIPSState
*env
, uint32_t fst0
)
3373 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3374 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
3375 restore_rounding_mode(env
);
3376 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3377 & float_flag_invalid
) {
3378 if (float32_is_any_nan(fst0
)) {
3382 update_fcr31(env
, GETPC());
3386 uint32_t helper_float_floor_2008_w_d(CPUMIPSState
*env
, uint64_t fdt0
)
3390 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3391 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
3392 restore_rounding_mode(env
);
3393 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3394 & float_flag_invalid
) {
3395 if (float64_is_any_nan(fdt0
)) {
3399 update_fcr31(env
, GETPC());
3403 uint32_t helper_float_floor_2008_w_s(CPUMIPSState
*env
, uint32_t fst0
)
3407 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
3408 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
3409 restore_rounding_mode(env
);
3410 if (get_float_exception_flags(&env
->active_fpu
.fp_status
)
3411 & float_flag_invalid
) {
3412 if (float32_is_any_nan(fst0
)) {
3416 update_fcr31(env
, GETPC());
3420 /* unary operations, not modifying fp status */
3421 #define FLOAT_UNOP(name) \
3422 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
3424 return float64_ ## name(fdt0); \
3426 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
3428 return float32_ ## name(fst0); \
3430 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
3435 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
3436 wth0 = float32_ ## name(fdt0 >> 32); \
3437 return ((uint64_t)wth0 << 32) | wt0; \
3443 /* MIPS specific unary operations */
3444 uint64_t helper_float_recip_d(CPUMIPSState
*env
, uint64_t fdt0
)
3448 fdt2
= float64_div(float64_one
, fdt0
, &env
->active_fpu
.fp_status
);
3449 update_fcr31(env
, GETPC());
3453 uint32_t helper_float_recip_s(CPUMIPSState
*env
, uint32_t fst0
)
3457 fst2
= float32_div(float32_one
, fst0
, &env
->active_fpu
.fp_status
);
3458 update_fcr31(env
, GETPC());
3462 uint64_t helper_float_rsqrt_d(CPUMIPSState
*env
, uint64_t fdt0
)
3466 fdt2
= float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
3467 fdt2
= float64_div(float64_one
, fdt2
, &env
->active_fpu
.fp_status
);
3468 update_fcr31(env
, GETPC());
3472 uint32_t helper_float_rsqrt_s(CPUMIPSState
*env
, uint32_t fst0
)
3476 fst2
= float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
3477 fst2
= float32_div(float32_one
, fst2
, &env
->active_fpu
.fp_status
);
3478 update_fcr31(env
, GETPC());
3482 uint64_t helper_float_recip1_d(CPUMIPSState
*env
, uint64_t fdt0
)
3486 fdt2
= float64_div(float64_one
, fdt0
, &env
->active_fpu
.fp_status
);
3487 update_fcr31(env
, GETPC());
3491 uint32_t helper_float_recip1_s(CPUMIPSState
*env
, uint32_t fst0
)
3495 fst2
= float32_div(float32_one
, fst0
, &env
->active_fpu
.fp_status
);
3496 update_fcr31(env
, GETPC());
3500 uint64_t helper_float_recip1_ps(CPUMIPSState
*env
, uint64_t fdt0
)
3505 fst2
= float32_div(float32_one
, fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
3506 fsth2
= float32_div(float32_one
, fdt0
>> 32, &env
->active_fpu
.fp_status
);
3507 update_fcr31(env
, GETPC());
3508 return ((uint64_t)fsth2
<< 32) | fst2
;
3511 uint64_t helper_float_rsqrt1_d(CPUMIPSState
*env
, uint64_t fdt0
)
3515 fdt2
= float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
3516 fdt2
= float64_div(float64_one
, fdt2
, &env
->active_fpu
.fp_status
);
3517 update_fcr31(env
, GETPC());
3521 uint32_t helper_float_rsqrt1_s(CPUMIPSState
*env
, uint32_t fst0
)
3525 fst2
= float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
3526 fst2
= float32_div(float32_one
, fst2
, &env
->active_fpu
.fp_status
);
3527 update_fcr31(env
, GETPC());
3531 uint64_t helper_float_rsqrt1_ps(CPUMIPSState
*env
, uint64_t fdt0
)
3536 fst2
= float32_sqrt(fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
3537 fsth2
= float32_sqrt(fdt0
>> 32, &env
->active_fpu
.fp_status
);
3538 fst2
= float32_div(float32_one
, fst2
, &env
->active_fpu
.fp_status
);
3539 fsth2
= float32_div(float32_one
, fsth2
, &env
->active_fpu
.fp_status
);
3540 update_fcr31(env
, GETPC());
3541 return ((uint64_t)fsth2
<< 32) | fst2
;
3544 #define FLOAT_RINT(name, bits) \
3545 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
3546 uint ## bits ## _t fs) \
3548 uint ## bits ## _t fdret; \
3550 fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \
3551 update_fcr31(env, GETPC()); \
3555 FLOAT_RINT(rint_s
, 32)
3556 FLOAT_RINT(rint_d
, 64)
3559 #define FLOAT_CLASS_SIGNALING_NAN 0x001
3560 #define FLOAT_CLASS_QUIET_NAN 0x002
3561 #define FLOAT_CLASS_NEGATIVE_INFINITY 0x004
3562 #define FLOAT_CLASS_NEGATIVE_NORMAL 0x008
3563 #define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010
3564 #define FLOAT_CLASS_NEGATIVE_ZERO 0x020
3565 #define FLOAT_CLASS_POSITIVE_INFINITY 0x040
3566 #define FLOAT_CLASS_POSITIVE_NORMAL 0x080
3567 #define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100
3568 #define FLOAT_CLASS_POSITIVE_ZERO 0x200
3570 #define FLOAT_CLASS(name, bits) \
3571 uint ## bits ## _t float_ ## name (uint ## bits ## _t arg, \
3572 float_status *status) \
3574 if (float ## bits ## _is_signaling_nan(arg, status)) { \
3575 return FLOAT_CLASS_SIGNALING_NAN; \
3576 } else if (float ## bits ## _is_quiet_nan(arg, status)) { \
3577 return FLOAT_CLASS_QUIET_NAN; \
3578 } else if (float ## bits ## _is_neg(arg)) { \
3579 if (float ## bits ## _is_infinity(arg)) { \
3580 return FLOAT_CLASS_NEGATIVE_INFINITY; \
3581 } else if (float ## bits ## _is_zero(arg)) { \
3582 return FLOAT_CLASS_NEGATIVE_ZERO; \
3583 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
3584 return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \
3586 return FLOAT_CLASS_NEGATIVE_NORMAL; \
3589 if (float ## bits ## _is_infinity(arg)) { \
3590 return FLOAT_CLASS_POSITIVE_INFINITY; \
3591 } else if (float ## bits ## _is_zero(arg)) { \
3592 return FLOAT_CLASS_POSITIVE_ZERO; \
3593 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
3594 return FLOAT_CLASS_POSITIVE_SUBNORMAL; \
3596 return FLOAT_CLASS_POSITIVE_NORMAL; \
3601 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
3602 uint ## bits ## _t arg) \
3604 return float_ ## name(arg, &env->active_fpu.fp_status); \
3607 FLOAT_CLASS(class_s
, 32)
3608 FLOAT_CLASS(class_d
, 64)
3611 /* binary operations */
3612 #define FLOAT_BINOP(name) \
3613 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
3614 uint64_t fdt0, uint64_t fdt1) \
3618 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
3619 update_fcr31(env, GETPC()); \
3623 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
3624 uint32_t fst0, uint32_t fst1) \
3628 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3629 update_fcr31(env, GETPC()); \
3633 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
3637 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3638 uint32_t fsth0 = fdt0 >> 32; \
3639 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3640 uint32_t fsth1 = fdt1 >> 32; \
3644 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3645 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
3646 update_fcr31(env, GETPC()); \
3647 return ((uint64_t)wth2 << 32) | wt2; \
3656 /* MIPS specific binary operations */
3657 uint64_t helper_float_recip2_d(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt2
)
3659 fdt2
= float64_mul(fdt0
, fdt2
, &env
->active_fpu
.fp_status
);
3660 fdt2
= float64_chs(float64_sub(fdt2
, float64_one
, &env
->active_fpu
.fp_status
));
3661 update_fcr31(env
, GETPC());
3665 uint32_t helper_float_recip2_s(CPUMIPSState
*env
, uint32_t fst0
, uint32_t fst2
)
3667 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
3668 fst2
= float32_chs(float32_sub(fst2
, float32_one
, &env
->active_fpu
.fp_status
));
3669 update_fcr31(env
, GETPC());
3673 uint64_t helper_float_recip2_ps(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt2
)
3675 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
3676 uint32_t fsth0
= fdt0
>> 32;
3677 uint32_t fst2
= fdt2
& 0XFFFFFFFF;
3678 uint32_t fsth2
= fdt2
>> 32;
3680 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
3681 fsth2
= float32_mul(fsth0
, fsth2
, &env
->active_fpu
.fp_status
);
3682 fst2
= float32_chs(float32_sub(fst2
, float32_one
, &env
->active_fpu
.fp_status
));
3683 fsth2
= float32_chs(float32_sub(fsth2
, float32_one
, &env
->active_fpu
.fp_status
));
3684 update_fcr31(env
, GETPC());
3685 return ((uint64_t)fsth2
<< 32) | fst2
;
3688 uint64_t helper_float_rsqrt2_d(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt2
)
3690 fdt2
= float64_mul(fdt0
, fdt2
, &env
->active_fpu
.fp_status
);
3691 fdt2
= float64_sub(fdt2
, float64_one
, &env
->active_fpu
.fp_status
);
3692 fdt2
= float64_chs(float64_div(fdt2
, FLOAT_TWO64
, &env
->active_fpu
.fp_status
));
3693 update_fcr31(env
, GETPC());
3697 uint32_t helper_float_rsqrt2_s(CPUMIPSState
*env
, uint32_t fst0
, uint32_t fst2
)
3699 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
3700 fst2
= float32_sub(fst2
, float32_one
, &env
->active_fpu
.fp_status
);
3701 fst2
= float32_chs(float32_div(fst2
, FLOAT_TWO32
, &env
->active_fpu
.fp_status
));
3702 update_fcr31(env
, GETPC());
3706 uint64_t helper_float_rsqrt2_ps(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt2
)
3708 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
3709 uint32_t fsth0
= fdt0
>> 32;
3710 uint32_t fst2
= fdt2
& 0XFFFFFFFF;
3711 uint32_t fsth2
= fdt2
>> 32;
3713 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
3714 fsth2
= float32_mul(fsth0
, fsth2
, &env
->active_fpu
.fp_status
);
3715 fst2
= float32_sub(fst2
, float32_one
, &env
->active_fpu
.fp_status
);
3716 fsth2
= float32_sub(fsth2
, float32_one
, &env
->active_fpu
.fp_status
);
3717 fst2
= float32_chs(float32_div(fst2
, FLOAT_TWO32
, &env
->active_fpu
.fp_status
));
3718 fsth2
= float32_chs(float32_div(fsth2
, FLOAT_TWO32
, &env
->active_fpu
.fp_status
));
3719 update_fcr31(env
, GETPC());
3720 return ((uint64_t)fsth2
<< 32) | fst2
;
3723 uint64_t helper_float_addr_ps(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt1
)
3725 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
3726 uint32_t fsth0
= fdt0
>> 32;
3727 uint32_t fst1
= fdt1
& 0XFFFFFFFF;
3728 uint32_t fsth1
= fdt1
>> 32;
3732 fst2
= float32_add (fst0
, fsth0
, &env
->active_fpu
.fp_status
);
3733 fsth2
= float32_add (fst1
, fsth1
, &env
->active_fpu
.fp_status
);
3734 update_fcr31(env
, GETPC());
3735 return ((uint64_t)fsth2
<< 32) | fst2
;
3738 uint64_t helper_float_mulr_ps(CPUMIPSState
*env
, uint64_t fdt0
, uint64_t fdt1
)
3740 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
3741 uint32_t fsth0
= fdt0
>> 32;
3742 uint32_t fst1
= fdt1
& 0XFFFFFFFF;
3743 uint32_t fsth1
= fdt1
>> 32;
3747 fst2
= float32_mul (fst0
, fsth0
, &env
->active_fpu
.fp_status
);
3748 fsth2
= float32_mul (fst1
, fsth1
, &env
->active_fpu
.fp_status
);
3749 update_fcr31(env
, GETPC());
3750 return ((uint64_t)fsth2
<< 32) | fst2
;
3753 #define FLOAT_MINMAX(name, bits, minmaxfunc) \
3754 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
3755 uint ## bits ## _t fs, \
3756 uint ## bits ## _t ft) \
3758 uint ## bits ## _t fdret; \
3760 fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \
3761 &env->active_fpu.fp_status); \
3762 update_fcr31(env, GETPC()); \
3766 FLOAT_MINMAX(max_s
, 32, maxnum
)
3767 FLOAT_MINMAX(max_d
, 64, maxnum
)
3768 FLOAT_MINMAX(maxa_s
, 32, maxnummag
)
3769 FLOAT_MINMAX(maxa_d
, 64, maxnummag
)
3771 FLOAT_MINMAX(min_s
, 32, minnum
)
3772 FLOAT_MINMAX(min_d
, 64, minnum
)
3773 FLOAT_MINMAX(mina_s
, 32, minnummag
)
3774 FLOAT_MINMAX(mina_d
, 64, minnummag
)
3777 /* ternary operations */
3778 #define UNFUSED_FMA(prefix, a, b, c, flags) \
3780 a = prefix##_mul(a, b, &env->active_fpu.fp_status); \
3781 if ((flags) & float_muladd_negate_c) { \
3782 a = prefix##_sub(a, c, &env->active_fpu.fp_status); \
3784 a = prefix##_add(a, c, &env->active_fpu.fp_status); \
3786 if ((flags) & float_muladd_negate_result) { \
3787 a = prefix##_chs(a); \
3791 /* FMA based operations */
3792 #define FLOAT_FMA(name, type) \
3793 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
3794 uint64_t fdt0, uint64_t fdt1, \
3797 UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \
3798 update_fcr31(env, GETPC()); \
3802 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
3803 uint32_t fst0, uint32_t fst1, \
3806 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
3807 update_fcr31(env, GETPC()); \
3811 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
3812 uint64_t fdt0, uint64_t fdt1, \
3815 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3816 uint32_t fsth0 = fdt0 >> 32; \
3817 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3818 uint32_t fsth1 = fdt1 >> 32; \
3819 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3820 uint32_t fsth2 = fdt2 >> 32; \
3822 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
3823 UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \
3824 update_fcr31(env, GETPC()); \
3825 return ((uint64_t)fsth0 << 32) | fst0; \
3828 FLOAT_FMA(msub
, float_muladd_negate_c
)
3829 FLOAT_FMA(nmadd
, float_muladd_negate_result
)
3830 FLOAT_FMA(nmsub
, float_muladd_negate_result
| float_muladd_negate_c
)
3833 #define FLOAT_FMADDSUB(name, bits, muladd_arg) \
3834 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
3835 uint ## bits ## _t fs, \
3836 uint ## bits ## _t ft, \
3837 uint ## bits ## _t fd) \
3839 uint ## bits ## _t fdret; \
3841 fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \
3842 &env->active_fpu.fp_status); \
3843 update_fcr31(env, GETPC()); \
3847 FLOAT_FMADDSUB(maddf_s
, 32, 0)
3848 FLOAT_FMADDSUB(maddf_d
, 64, 0)
3849 FLOAT_FMADDSUB(msubf_s
, 32, float_muladd_negate_product
)
3850 FLOAT_FMADDSUB(msubf_d
, 64, float_muladd_negate_product
)
3851 #undef FLOAT_FMADDSUB
3853 /* compare operations */
3854 #define FOP_COND_D(op, cond) \
3855 void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3856 uint64_t fdt1, int cc) \
3860 update_fcr31(env, GETPC()); \
3862 SET_FP_COND(cc, env->active_fpu); \
3864 CLEAR_FP_COND(cc, env->active_fpu); \
3866 void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3867 uint64_t fdt1, int cc) \
3870 fdt0 = float64_abs(fdt0); \
3871 fdt1 = float64_abs(fdt1); \
3873 update_fcr31(env, GETPC()); \
3875 SET_FP_COND(cc, env->active_fpu); \
3877 CLEAR_FP_COND(cc, env->active_fpu); \
3880 /* NOTE: the comma operator will make "cond" to eval to false,
3881 * but float64_unordered_quiet() is still called. */
3882 FOP_COND_D(f
, (float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
), 0))
3883 FOP_COND_D(un
, float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
))
3884 FOP_COND_D(eq
, float64_eq_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3885 FOP_COND_D(ueq
, float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_eq_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3886 FOP_COND_D(olt
, float64_lt_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3887 FOP_COND_D(ult
, float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_lt_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3888 FOP_COND_D(ole
, float64_le_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3889 FOP_COND_D(ule
, float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_le_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3890 /* NOTE: the comma operator will make "cond" to eval to false,
3891 * but float64_unordered() is still called. */
3892 FOP_COND_D(sf
, (float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
), 0))
3893 FOP_COND_D(ngle
,float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
))
3894 FOP_COND_D(seq
, float64_eq(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3895 FOP_COND_D(ngl
, float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_eq(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3896 FOP_COND_D(lt
, float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3897 FOP_COND_D(nge
, float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3898 FOP_COND_D(le
, float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3899 FOP_COND_D(ngt
, float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3901 #define FOP_COND_S(op, cond) \
3902 void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
3903 uint32_t fst1, int cc) \
3907 update_fcr31(env, GETPC()); \
3909 SET_FP_COND(cc, env->active_fpu); \
3911 CLEAR_FP_COND(cc, env->active_fpu); \
3913 void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
3914 uint32_t fst1, int cc) \
3917 fst0 = float32_abs(fst0); \
3918 fst1 = float32_abs(fst1); \
3920 update_fcr31(env, GETPC()); \
3922 SET_FP_COND(cc, env->active_fpu); \
3924 CLEAR_FP_COND(cc, env->active_fpu); \
3927 /* NOTE: the comma operator will make "cond" to eval to false,
3928 * but float32_unordered_quiet() is still called. */
3929 FOP_COND_S(f
, (float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
), 0))
3930 FOP_COND_S(un
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
))
3931 FOP_COND_S(eq
, float32_eq_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3932 FOP_COND_S(ueq
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3933 FOP_COND_S(olt
, float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3934 FOP_COND_S(ult
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3935 FOP_COND_S(ole
, float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3936 FOP_COND_S(ule
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3937 /* NOTE: the comma operator will make "cond" to eval to false,
3938 * but float32_unordered() is still called. */
3939 FOP_COND_S(sf
, (float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
), 0))
3940 FOP_COND_S(ngle
,float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
))
3941 FOP_COND_S(seq
, float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3942 FOP_COND_S(ngl
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3943 FOP_COND_S(lt
, float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3944 FOP_COND_S(nge
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3945 FOP_COND_S(le
, float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3946 FOP_COND_S(ngt
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3948 #define FOP_COND_PS(op, condl, condh) \
3949 void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3950 uint64_t fdt1, int cc) \
3952 uint32_t fst0, fsth0, fst1, fsth1; \
3954 fst0 = fdt0 & 0XFFFFFFFF; \
3955 fsth0 = fdt0 >> 32; \
3956 fst1 = fdt1 & 0XFFFFFFFF; \
3957 fsth1 = fdt1 >> 32; \
3960 update_fcr31(env, GETPC()); \
3962 SET_FP_COND(cc, env->active_fpu); \
3964 CLEAR_FP_COND(cc, env->active_fpu); \
3966 SET_FP_COND(cc + 1, env->active_fpu); \
3968 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3970 void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3971 uint64_t fdt1, int cc) \
3973 uint32_t fst0, fsth0, fst1, fsth1; \
3975 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3976 fsth0 = float32_abs(fdt0 >> 32); \
3977 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3978 fsth1 = float32_abs(fdt1 >> 32); \
3981 update_fcr31(env, GETPC()); \
3983 SET_FP_COND(cc, env->active_fpu); \
3985 CLEAR_FP_COND(cc, env->active_fpu); \
3987 SET_FP_COND(cc + 1, env->active_fpu); \
3989 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3992 /* NOTE: the comma operator will make "cond" to eval to false,
3993 * but float32_unordered_quiet() is still called. */
3994 FOP_COND_PS(f
, (float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
), 0),
3995 (float32_unordered_quiet(fsth1
, fsth0
, &env
->active_fpu
.fp_status
), 0))
3996 FOP_COND_PS(un
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
),
3997 float32_unordered_quiet(fsth1
, fsth0
, &env
->active_fpu
.fp_status
))
3998 FOP_COND_PS(eq
, float32_eq_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3999 float32_eq_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4000 FOP_COND_PS(ueq
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4001 float32_unordered_quiet(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_eq_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4002 FOP_COND_PS(olt
, float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4003 float32_lt_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4004 FOP_COND_PS(ult
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4005 float32_unordered_quiet(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_lt_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4006 FOP_COND_PS(ole
, float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4007 float32_le_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4008 FOP_COND_PS(ule
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4009 float32_unordered_quiet(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_le_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4010 /* NOTE: the comma operator will make "cond" to eval to false,
4011 * but float32_unordered() is still called. */
4012 FOP_COND_PS(sf
, (float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
), 0),
4013 (float32_unordered(fsth1
, fsth0
, &env
->active_fpu
.fp_status
), 0))
4014 FOP_COND_PS(ngle
,float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
),
4015 float32_unordered(fsth1
, fsth0
, &env
->active_fpu
.fp_status
))
4016 FOP_COND_PS(seq
, float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4017 float32_eq(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4018 FOP_COND_PS(ngl
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4019 float32_unordered(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_eq(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4020 FOP_COND_PS(lt
, float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4021 float32_lt(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4022 FOP_COND_PS(nge
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4023 float32_unordered(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_lt(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4024 FOP_COND_PS(le
, float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4025 float32_le(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4026 FOP_COND_PS(ngt
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
),
4027 float32_unordered(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_le(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
4029 /* R6 compare operations */
4030 #define FOP_CONDN_D(op, cond) \
4031 uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState * env, uint64_t fdt0, \
4036 update_fcr31(env, GETPC()); \
4044 /* NOTE: the comma operator will make "cond" to eval to false,
4045 * but float64_unordered_quiet() is still called. */
4046 FOP_CONDN_D(af
, (float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
), 0))
4047 FOP_CONDN_D(un
, (float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)))
4048 FOP_CONDN_D(eq
, (float64_eq_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4049 FOP_CONDN_D(ueq
, (float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4050 || float64_eq_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4051 FOP_CONDN_D(lt
, (float64_lt_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4052 FOP_CONDN_D(ult
, (float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4053 || float64_lt_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4054 FOP_CONDN_D(le
, (float64_le_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4055 FOP_CONDN_D(ule
, (float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4056 || float64_le_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4057 /* NOTE: the comma operator will make "cond" to eval to false,
4058 * but float64_unordered() is still called. */
4059 FOP_CONDN_D(saf
, (float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
), 0))
4060 FOP_CONDN_D(sun
, (float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)))
4061 FOP_CONDN_D(seq
, (float64_eq(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4062 FOP_CONDN_D(sueq
, (float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4063 || float64_eq(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4064 FOP_CONDN_D(slt
, (float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4065 FOP_CONDN_D(sult
, (float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4066 || float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4067 FOP_CONDN_D(sle
, (float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4068 FOP_CONDN_D(sule
, (float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4069 || float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4070 FOP_CONDN_D(or, (float64_le_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4071 || float64_le_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4072 FOP_CONDN_D(une
, (float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4073 || float64_lt_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4074 || float64_lt_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4075 FOP_CONDN_D(ne
, (float64_lt_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4076 || float64_lt_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4077 FOP_CONDN_D(sor
, (float64_le(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4078 || float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4079 FOP_CONDN_D(sune
, (float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4080 || float64_lt(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4081 || float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4082 FOP_CONDN_D(sne
, (float64_lt(fdt1
, fdt0
, &env
->active_fpu
.fp_status
)
4083 || float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
)))
4085 #define FOP_CONDN_S(op, cond) \
4086 uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState * env, uint32_t fst0, \
4091 update_fcr31(env, GETPC()); \
4099 /* NOTE: the comma operator will make "cond" to eval to false,
4100 * but float32_unordered_quiet() is still called. */
4101 FOP_CONDN_S(af
, (float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
), 0))
4102 FOP_CONDN_S(un
, (float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
)))
4103 FOP_CONDN_S(eq
, (float32_eq_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4104 FOP_CONDN_S(ueq
, (float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4105 || float32_eq_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4106 FOP_CONDN_S(lt
, (float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4107 FOP_CONDN_S(ult
, (float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4108 || float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4109 FOP_CONDN_S(le
, (float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4110 FOP_CONDN_S(ule
, (float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4111 || float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4112 /* NOTE: the comma operator will make "cond" to eval to false,
4113 * but float32_unordered() is still called. */
4114 FOP_CONDN_S(saf
, (float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
), 0))
4115 FOP_CONDN_S(sun
, (float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
)))
4116 FOP_CONDN_S(seq
, (float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4117 FOP_CONDN_S(sueq
, (float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4118 || float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4119 FOP_CONDN_S(slt
, (float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4120 FOP_CONDN_S(sult
, (float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4121 || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4122 FOP_CONDN_S(sle
, (float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4123 FOP_CONDN_S(sule
, (float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4124 || float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4125 FOP_CONDN_S(or, (float32_le_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4126 || float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4127 FOP_CONDN_S(une
, (float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4128 || float32_lt_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4129 || float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4130 FOP_CONDN_S(ne
, (float32_lt_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4131 || float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4132 FOP_CONDN_S(sor
, (float32_le(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4133 || float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4134 FOP_CONDN_S(sune
, (float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4135 || float32_lt(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4136 || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4137 FOP_CONDN_S(sne
, (float32_lt(fst1
, fst0
, &env
->active_fpu
.fp_status
)
4138 || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
)))
4141 /* Data format min and max values */
4142 #define DF_BITS(df) (1 << ((df) + 3))
4144 /* Element-by-element access macros */
4145 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
4147 #if !defined(CONFIG_USER_ONLY)
4148 #define MEMOP_IDX(DF) \
4149 TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
4150 cpu_mmu_index(env, false));
4152 #define MEMOP_IDX(DF)
4155 #define MSA_LD_DF(DF, TYPE, LD_INSN, ...) \
4156 void helper_msa_ld_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
4157 target_ulong addr) \
4159 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
4163 for (i = 0; i < DF_ELEMENTS(DF); i++) { \
4164 wx.TYPE[i] = LD_INSN(env, addr + (i << DF), ##__VA_ARGS__); \
4166 memcpy(pwd, &wx, sizeof(wr_t)); \
4169 #if !defined(CONFIG_USER_ONLY)
4170 MSA_LD_DF(DF_BYTE
, b
, helper_ret_ldub_mmu
, oi
, GETPC())
4171 MSA_LD_DF(DF_HALF
, h
, helper_ret_lduw_mmu
, oi
, GETPC())
4172 MSA_LD_DF(DF_WORD
, w
, helper_ret_ldul_mmu
, oi
, GETPC())
4173 MSA_LD_DF(DF_DOUBLE
, d
, helper_ret_ldq_mmu
, oi
, GETPC())
4175 MSA_LD_DF(DF_BYTE
, b
, cpu_ldub_data
)
4176 MSA_LD_DF(DF_HALF
, h
, cpu_lduw_data
)
4177 MSA_LD_DF(DF_WORD
, w
, cpu_ldl_data
)
4178 MSA_LD_DF(DF_DOUBLE
, d
, cpu_ldq_data
)
4181 #define MSA_PAGESPAN(x) \
4182 ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN/8 - 1) >= TARGET_PAGE_SIZE)
4184 static inline void ensure_writable_pages(CPUMIPSState
*env
,
4189 #if !defined(CONFIG_USER_ONLY)
4190 target_ulong page_addr
;
4191 if (unlikely(MSA_PAGESPAN(addr
))) {
4193 probe_write(env
, addr
, mmu_idx
, retaddr
);
4195 page_addr
= (addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
4196 probe_write(env
, page_addr
, mmu_idx
, retaddr
);
4201 #define MSA_ST_DF(DF, TYPE, ST_INSN, ...) \
4202 void helper_msa_st_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
4203 target_ulong addr) \
4205 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
4206 int mmu_idx = cpu_mmu_index(env, false); \
4209 ensure_writable_pages(env, addr, mmu_idx, GETPC()); \
4210 for (i = 0; i < DF_ELEMENTS(DF); i++) { \
4211 ST_INSN(env, addr + (i << DF), pwd->TYPE[i], ##__VA_ARGS__); \
4215 #if !defined(CONFIG_USER_ONLY)
4216 MSA_ST_DF(DF_BYTE
, b
, helper_ret_stb_mmu
, oi
, GETPC())
4217 MSA_ST_DF(DF_HALF
, h
, helper_ret_stw_mmu
, oi
, GETPC())
4218 MSA_ST_DF(DF_WORD
, w
, helper_ret_stl_mmu
, oi
, GETPC())
4219 MSA_ST_DF(DF_DOUBLE
, d
, helper_ret_stq_mmu
, oi
, GETPC())
4221 MSA_ST_DF(DF_BYTE
, b
, cpu_stb_data
)
4222 MSA_ST_DF(DF_HALF
, h
, cpu_stw_data
)
4223 MSA_ST_DF(DF_WORD
, w
, cpu_stl_data
)
4224 MSA_ST_DF(DF_DOUBLE
, d
, cpu_stq_data
)
4227 void helper_cache(CPUMIPSState
*env
, target_ulong addr
, uint32_t op
)
4229 #ifndef CONFIG_USER_ONLY
4230 target_ulong index
= addr
& 0x1fffffff;
4232 /* Index Store Tag */
4233 memory_region_dispatch_write(env
->itc_tag
, index
, env
->CP0_TagLo
,
4234 8, MEMTXATTRS_UNSPECIFIED
);
4235 } else if (op
== 5) {
4236 /* Index Load Tag */
4237 memory_region_dispatch_read(env
->itc_tag
, index
, &env
->CP0_TagLo
,
4238 8, MEMTXATTRS_UNSPECIFIED
);