memory: remove assertion on memory_region_destroy
[qemu/ar7.git] / target-mips / op_helper.c
blobea7d95f36c017ade7ecb0d268a564e0d33097b41
1 /*
2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include "cpu.h"
21 #include "qemu/host-utils.h"
22 #include "exec/helper-proto.h"
23 #include "exec/cpu_ldst.h"
24 #include "sysemu/kvm.h"
26 #ifndef CONFIG_USER_ONLY
27 static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global);
28 #endif
30 /*****************************************************************************/
31 /* Exceptions processing helpers */
33 static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
34 uint32_t exception,
35 int error_code,
36 uintptr_t pc)
38 CPUState *cs = CPU(mips_env_get_cpu(env));
40 if (exception < EXCP_SC) {
41 qemu_log("%s: %d %d\n", __func__, exception, error_code);
43 cs->exception_index = exception;
44 env->error_code = error_code;
46 if (pc) {
47 /* now we have a real cpu fault */
48 cpu_restore_state(cs, pc);
51 cpu_loop_exit(cs);
54 static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
55 uint32_t exception,
56 uintptr_t pc)
58 do_raise_exception_err(env, exception, 0, pc);
61 void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
62 int error_code)
64 do_raise_exception_err(env, exception, error_code, 0);
67 void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
69 do_raise_exception(env, exception, 0);
72 #if defined(CONFIG_USER_ONLY)
73 #define HELPER_LD(name, insn, type) \
74 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
75 int mem_idx) \
76 { \
77 return (type) cpu_##insn##_data(env, addr); \
79 #else
80 #define HELPER_LD(name, insn, type) \
81 static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
82 int mem_idx) \
83 { \
84 switch (mem_idx) \
85 { \
86 case 0: return (type) cpu_##insn##_kernel(env, addr); break; \
87 case 1: return (type) cpu_##insn##_super(env, addr); break; \
88 default: \
89 case 2: return (type) cpu_##insn##_user(env, addr); break; \
90 } \
92 #endif
93 HELPER_LD(lbu, ldub, uint8_t)
94 HELPER_LD(lhu, lduw, uint16_t)
95 HELPER_LD(lw, ldl, int32_t)
96 HELPER_LD(ld, ldq, int64_t)
97 #undef HELPER_LD
99 #if defined(CONFIG_USER_ONLY)
100 #define HELPER_ST(name, insn, type) \
101 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
102 type val, int mem_idx) \
104 cpu_##insn##_data(env, addr, val); \
106 #else
107 #define HELPER_ST(name, insn, type) \
108 static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
109 type val, int mem_idx) \
111 switch (mem_idx) \
113 case 0: cpu_##insn##_kernel(env, addr, val); break; \
114 case 1: cpu_##insn##_super(env, addr, val); break; \
115 default: \
116 case 2: cpu_##insn##_user(env, addr, val); break; \
119 #endif
120 HELPER_ST(sb, stb, uint8_t)
121 HELPER_ST(sh, stw, uint16_t)
122 HELPER_ST(sw, stl, uint32_t)
123 HELPER_ST(sd, stq, uint64_t)
124 #undef HELPER_ST
126 target_ulong helper_clo (target_ulong arg1)
128 return clo32(arg1);
131 target_ulong helper_clz (target_ulong arg1)
133 return clz32(arg1);
136 #if defined(TARGET_MIPS64)
137 target_ulong helper_dclo (target_ulong arg1)
139 return clo64(arg1);
142 target_ulong helper_dclz (target_ulong arg1)
144 return clz64(arg1);
146 #endif /* TARGET_MIPS64 */
148 /* 64 bits arithmetic for 32 bits hosts */
149 static inline uint64_t get_HILO(CPUMIPSState *env)
151 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
154 static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
156 target_ulong tmp;
157 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
158 tmp = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
159 return tmp;
162 static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
164 target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
165 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
166 return tmp;
169 /* Multiplication variants of the vr54xx. */
170 target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
171 target_ulong arg2)
173 return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
174 (int64_t)(int32_t)arg2));
177 target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
178 target_ulong arg2)
180 return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
181 (uint64_t)(uint32_t)arg2);
184 target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
185 target_ulong arg2)
187 return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
188 (int64_t)(int32_t)arg2);
191 target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
192 target_ulong arg2)
194 return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
195 (int64_t)(int32_t)arg2);
198 target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
199 target_ulong arg2)
201 return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
202 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
205 target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
206 target_ulong arg2)
208 return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
209 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
212 target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
213 target_ulong arg2)
215 return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
216 (int64_t)(int32_t)arg2);
219 target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
220 target_ulong arg2)
222 return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
223 (int64_t)(int32_t)arg2);
226 target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
227 target_ulong arg2)
229 return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
230 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
233 target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
234 target_ulong arg2)
236 return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
237 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
240 target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
241 target_ulong arg2)
243 return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
246 target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
247 target_ulong arg2)
249 return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
250 (uint64_t)(uint32_t)arg2);
253 target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
254 target_ulong arg2)
256 return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
257 (int64_t)(int32_t)arg2);
260 target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
261 target_ulong arg2)
263 return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
264 (uint64_t)(uint32_t)arg2);
267 static inline target_ulong bitswap(target_ulong v)
269 v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
270 ((v & (target_ulong)0x5555555555555555ULL) << 1);
271 v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
272 ((v & (target_ulong)0x3333333333333333ULL) << 2);
273 v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
274 ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
275 return v;
278 #ifdef TARGET_MIPS64
279 target_ulong helper_dbitswap(target_ulong rt)
281 return bitswap(rt);
283 #endif
285 target_ulong helper_bitswap(target_ulong rt)
287 return (int32_t)bitswap(rt);
290 #ifndef CONFIG_USER_ONLY
292 static inline hwaddr do_translate_address(CPUMIPSState *env,
293 target_ulong address,
294 int rw)
296 hwaddr lladdr;
298 lladdr = cpu_mips_translate_address(env, address, rw);
300 if (lladdr == -1LL) {
301 cpu_loop_exit(CPU(mips_env_get_cpu(env)));
302 } else {
303 return lladdr;
307 #define HELPER_LD_ATOMIC(name, insn) \
308 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
310 env->lladdr = do_translate_address(env, arg, 0); \
311 env->llval = do_##insn(env, arg, mem_idx); \
312 return env->llval; \
314 HELPER_LD_ATOMIC(ll, lw)
315 #ifdef TARGET_MIPS64
316 HELPER_LD_ATOMIC(lld, ld)
317 #endif
318 #undef HELPER_LD_ATOMIC
320 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
321 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \
322 target_ulong arg2, int mem_idx) \
324 target_long tmp; \
326 if (arg2 & almask) { \
327 env->CP0_BadVAddr = arg2; \
328 helper_raise_exception(env, EXCP_AdES); \
330 if (do_translate_address(env, arg2, 1) == env->lladdr) { \
331 tmp = do_##ld_insn(env, arg2, mem_idx); \
332 if (tmp == env->llval) { \
333 do_##st_insn(env, arg2, arg1, mem_idx); \
334 return 1; \
337 return 0; \
339 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
340 #ifdef TARGET_MIPS64
341 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
342 #endif
343 #undef HELPER_ST_ATOMIC
344 #endif
346 #ifdef TARGET_WORDS_BIGENDIAN
347 #define GET_LMASK(v) ((v) & 3)
348 #define GET_OFFSET(addr, offset) (addr + (offset))
349 #else
350 #define GET_LMASK(v) (((v) & 3) ^ 3)
351 #define GET_OFFSET(addr, offset) (addr - (offset))
352 #endif
354 void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
355 int mem_idx)
357 do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx);
359 if (GET_LMASK(arg2) <= 2)
360 do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
362 if (GET_LMASK(arg2) <= 1)
363 do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
365 if (GET_LMASK(arg2) == 0)
366 do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
369 void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
370 int mem_idx)
372 do_sb(env, arg2, (uint8_t)arg1, mem_idx);
374 if (GET_LMASK(arg2) >= 1)
375 do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
377 if (GET_LMASK(arg2) >= 2)
378 do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
380 if (GET_LMASK(arg2) == 3)
381 do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
384 #if defined(TARGET_MIPS64)
385 /* "half" load and stores. We must do the memory access inline,
386 or fault handling won't work. */
388 #ifdef TARGET_WORDS_BIGENDIAN
389 #define GET_LMASK64(v) ((v) & 7)
390 #else
391 #define GET_LMASK64(v) (((v) & 7) ^ 7)
392 #endif
394 void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
395 int mem_idx)
397 do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx);
399 if (GET_LMASK64(arg2) <= 6)
400 do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
402 if (GET_LMASK64(arg2) <= 5)
403 do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
405 if (GET_LMASK64(arg2) <= 4)
406 do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
408 if (GET_LMASK64(arg2) <= 3)
409 do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
411 if (GET_LMASK64(arg2) <= 2)
412 do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
414 if (GET_LMASK64(arg2) <= 1)
415 do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
417 if (GET_LMASK64(arg2) <= 0)
418 do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
421 void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
422 int mem_idx)
424 do_sb(env, arg2, (uint8_t)arg1, mem_idx);
426 if (GET_LMASK64(arg2) >= 1)
427 do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
429 if (GET_LMASK64(arg2) >= 2)
430 do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
432 if (GET_LMASK64(arg2) >= 3)
433 do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
435 if (GET_LMASK64(arg2) >= 4)
436 do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
438 if (GET_LMASK64(arg2) >= 5)
439 do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
441 if (GET_LMASK64(arg2) >= 6)
442 do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
444 if (GET_LMASK64(arg2) == 7)
445 do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
447 #endif /* TARGET_MIPS64 */
449 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
451 void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
452 uint32_t mem_idx)
454 target_ulong base_reglist = reglist & 0xf;
455 target_ulong do_r31 = reglist & 0x10;
457 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
458 target_ulong i;
460 for (i = 0; i < base_reglist; i++) {
461 env->active_tc.gpr[multiple_regs[i]] =
462 (target_long)do_lw(env, addr, mem_idx);
463 addr += 4;
467 if (do_r31) {
468 env->active_tc.gpr[31] = (target_long)do_lw(env, addr, mem_idx);
472 void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
473 uint32_t mem_idx)
475 target_ulong base_reglist = reglist & 0xf;
476 target_ulong do_r31 = reglist & 0x10;
478 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
479 target_ulong i;
481 for (i = 0; i < base_reglist; i++) {
482 do_sw(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx);
483 addr += 4;
487 if (do_r31) {
488 do_sw(env, addr, env->active_tc.gpr[31], mem_idx);
492 #if defined(TARGET_MIPS64)
493 void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
494 uint32_t mem_idx)
496 target_ulong base_reglist = reglist & 0xf;
497 target_ulong do_r31 = reglist & 0x10;
499 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
500 target_ulong i;
502 for (i = 0; i < base_reglist; i++) {
503 env->active_tc.gpr[multiple_regs[i]] = do_ld(env, addr, mem_idx);
504 addr += 8;
508 if (do_r31) {
509 env->active_tc.gpr[31] = do_ld(env, addr, mem_idx);
513 void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
514 uint32_t mem_idx)
516 target_ulong base_reglist = reglist & 0xf;
517 target_ulong do_r31 = reglist & 0x10;
519 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
520 target_ulong i;
522 for (i = 0; i < base_reglist; i++) {
523 do_sd(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx);
524 addr += 8;
528 if (do_r31) {
529 do_sd(env, addr, env->active_tc.gpr[31], mem_idx);
532 #endif
534 #ifndef CONFIG_USER_ONLY
535 /* SMP helpers. */
536 static bool mips_vpe_is_wfi(MIPSCPU *c)
538 CPUState *cpu = CPU(c);
539 CPUMIPSState *env = &c->env;
541 /* If the VPE is halted but otherwise active, it means it's waiting for
542 an interrupt. */
543 return cpu->halted && mips_vpe_active(env);
546 static inline void mips_vpe_wake(MIPSCPU *c)
548 /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
549 because there might be other conditions that state that c should
550 be sleeping. */
551 cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
554 static inline void mips_vpe_sleep(MIPSCPU *cpu)
556 CPUState *cs = CPU(cpu);
558 /* The VPE was shut off, really go to bed.
559 Reset any old _WAKE requests. */
560 cs->halted = 1;
561 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
564 static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
566 CPUMIPSState *c = &cpu->env;
568 /* FIXME: TC reschedule. */
569 if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
570 mips_vpe_wake(cpu);
574 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
576 CPUMIPSState *c = &cpu->env;
578 /* FIXME: TC reschedule. */
579 if (!mips_vpe_active(c)) {
580 mips_vpe_sleep(cpu);
585 * mips_cpu_map_tc:
586 * @env: CPU from which mapping is performed.
587 * @tc: Should point to an int with the value of the global TC index.
589 * This function will transform @tc into a local index within the
590 * returned #CPUMIPSState.
592 /* FIXME: This code assumes that all VPEs have the same number of TCs,
593 which depends on runtime setup. Can probably be fixed by
594 walking the list of CPUMIPSStates. */
595 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
597 MIPSCPU *cpu;
598 CPUState *cs;
599 CPUState *other_cs;
600 int vpe_idx;
601 int tc_idx = *tc;
603 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
604 /* Not allowed to address other CPUs. */
605 *tc = env->current_tc;
606 return env;
609 cs = CPU(mips_env_get_cpu(env));
610 vpe_idx = tc_idx / cs->nr_threads;
611 *tc = tc_idx % cs->nr_threads;
612 other_cs = qemu_get_cpu(vpe_idx);
613 if (other_cs == NULL) {
614 return env;
616 cpu = MIPS_CPU(other_cs);
617 return &cpu->env;
620 /* The per VPE CP0_Status register shares some fields with the per TC
621 CP0_TCStatus registers. These fields are wired to the same registers,
622 so changes to either of them should be reflected on both registers.
624 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
626 These helper call synchronizes the regs for a given cpu. */
628 /* Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c. */
629 /* static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
630 int tc); */
632 /* Called for updates to CP0_TCStatus. */
633 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
634 target_ulong v)
636 uint32_t status;
637 uint32_t tcu, tmx, tasid, tksu;
638 uint32_t mask = ((1U << CP0St_CU3)
639 | (1 << CP0St_CU2)
640 | (1 << CP0St_CU1)
641 | (1 << CP0St_CU0)
642 | (1 << CP0St_MX)
643 | (3 << CP0St_KSU));
645 tcu = (v >> CP0TCSt_TCU0) & 0xf;
646 tmx = (v >> CP0TCSt_TMX) & 0x1;
647 tasid = v & 0xff;
648 tksu = (v >> CP0TCSt_TKSU) & 0x3;
650 status = tcu << CP0St_CU0;
651 status |= tmx << CP0St_MX;
652 status |= tksu << CP0St_KSU;
654 cpu->CP0_Status &= ~mask;
655 cpu->CP0_Status |= status;
657 /* Sync the TASID with EntryHi. */
658 cpu->CP0_EntryHi &= ~0xff;
659 cpu->CP0_EntryHi = tasid;
661 compute_hflags(cpu);
664 /* Called for updates to CP0_EntryHi. */
665 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
667 int32_t *tcst;
668 uint32_t asid, v = cpu->CP0_EntryHi;
670 asid = v & 0xff;
672 if (tc == cpu->current_tc) {
673 tcst = &cpu->active_tc.CP0_TCStatus;
674 } else {
675 tcst = &cpu->tcs[tc].CP0_TCStatus;
678 *tcst &= ~0xff;
679 *tcst |= asid;
682 /* CP0 helpers */
683 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
685 return env->mvp->CP0_MVPControl;
688 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
690 return env->mvp->CP0_MVPConf0;
693 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
695 return env->mvp->CP0_MVPConf1;
698 target_ulong helper_mfc0_random(CPUMIPSState *env)
700 return (int32_t)cpu_mips_get_random(env);
703 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
705 return env->active_tc.CP0_TCStatus;
708 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
710 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
711 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
713 if (other_tc == other->current_tc)
714 return other->active_tc.CP0_TCStatus;
715 else
716 return other->tcs[other_tc].CP0_TCStatus;
719 target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
721 return env->active_tc.CP0_TCBind;
724 target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
726 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
727 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
729 if (other_tc == other->current_tc)
730 return other->active_tc.CP0_TCBind;
731 else
732 return other->tcs[other_tc].CP0_TCBind;
735 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
737 return env->active_tc.PC;
740 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
742 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
743 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
745 if (other_tc == other->current_tc)
746 return other->active_tc.PC;
747 else
748 return other->tcs[other_tc].PC;
751 target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
753 return env->active_tc.CP0_TCHalt;
756 target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
758 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
759 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
761 if (other_tc == other->current_tc)
762 return other->active_tc.CP0_TCHalt;
763 else
764 return other->tcs[other_tc].CP0_TCHalt;
767 target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
769 return env->active_tc.CP0_TCContext;
772 target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
774 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
775 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
777 if (other_tc == other->current_tc)
778 return other->active_tc.CP0_TCContext;
779 else
780 return other->tcs[other_tc].CP0_TCContext;
783 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
785 return env->active_tc.CP0_TCSchedule;
788 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
790 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
791 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
793 if (other_tc == other->current_tc)
794 return other->active_tc.CP0_TCSchedule;
795 else
796 return other->tcs[other_tc].CP0_TCSchedule;
799 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
801 return env->active_tc.CP0_TCScheFBack;
804 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
806 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
807 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
809 if (other_tc == other->current_tc)
810 return other->active_tc.CP0_TCScheFBack;
811 else
812 return other->tcs[other_tc].CP0_TCScheFBack;
815 target_ulong helper_mfc0_count(CPUMIPSState *env)
817 return (int32_t)cpu_mips_get_count(env);
820 target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
822 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
823 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
825 return other->CP0_EntryHi;
828 target_ulong helper_mftc0_cause(CPUMIPSState *env)
830 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
831 int32_t tccause;
832 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
834 if (other_tc == other->current_tc) {
835 tccause = other->CP0_Cause;
836 } else {
837 tccause = other->CP0_Cause;
840 return tccause;
843 target_ulong helper_mftc0_status(CPUMIPSState *env)
845 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
846 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
848 return other->CP0_Status;
851 target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
853 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
856 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
858 return (int32_t)env->CP0_WatchLo[sel];
861 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
863 return env->CP0_WatchHi[sel];
866 target_ulong helper_mfc0_debug(CPUMIPSState *env)
868 target_ulong t0 = env->CP0_Debug;
869 if (env->hflags & MIPS_HFLAG_DM)
870 t0 |= 1 << CP0DB_DM;
872 return t0;
875 target_ulong helper_mftc0_debug(CPUMIPSState *env)
877 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
878 int32_t tcstatus;
879 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
881 if (other_tc == other->current_tc)
882 tcstatus = other->active_tc.CP0_Debug_tcstatus;
883 else
884 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
886 /* XXX: Might be wrong, check with EJTAG spec. */
887 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
888 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
891 #if defined(TARGET_MIPS64)
892 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
894 return env->active_tc.PC;
897 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
899 return env->active_tc.CP0_TCHalt;
902 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
904 return env->active_tc.CP0_TCContext;
907 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
909 return env->active_tc.CP0_TCSchedule;
912 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
914 return env->active_tc.CP0_TCScheFBack;
917 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
919 return env->lladdr >> env->CP0_LLAddr_shift;
922 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
924 return env->CP0_WatchLo[sel];
926 #endif /* TARGET_MIPS64 */
928 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
930 uint32_t index_p = env->CP0_Index & 0x80000000;
931 uint32_t tlb_index = arg1 & 0x7fffffff;
932 if (tlb_index < env->tlb->nb_tlb) {
933 if (env->insn_flags & ISA_MIPS32R6) {
934 index_p |= arg1 & 0x80000000;
936 env->CP0_Index = index_p | tlb_index;
940 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
942 uint32_t mask = 0;
943 uint32_t newval;
945 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
946 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
947 (1 << CP0MVPCo_EVP);
948 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
949 mask |= (1 << CP0MVPCo_STLB);
950 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
952 // TODO: Enable/disable shared TLB, enable/disable VPEs.
954 env->mvp->CP0_MVPControl = newval;
957 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
959 uint32_t mask;
960 uint32_t newval;
962 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
963 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
964 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
966 /* Yield scheduler intercept not implemented. */
967 /* Gating storage scheduler intercept not implemented. */
969 // TODO: Enable/disable TCs.
971 env->CP0_VPEControl = newval;
974 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
976 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
977 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
978 uint32_t mask;
979 uint32_t newval;
981 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
982 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
983 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
985 /* TODO: Enable/disable TCs. */
987 other->CP0_VPEControl = newval;
990 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
992 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
993 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
994 /* FIXME: Mask away return zero on read bits. */
995 return other->CP0_VPEControl;
998 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
1000 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1001 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1003 return other->CP0_VPEConf0;
1006 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
1008 uint32_t mask = 0;
1009 uint32_t newval;
1011 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1012 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1013 mask |= (0xff << CP0VPEC0_XTC);
1014 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1016 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1018 // TODO: TC exclusive handling due to ERL/EXL.
1020 env->CP0_VPEConf0 = newval;
1023 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
1025 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1026 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1027 uint32_t mask = 0;
1028 uint32_t newval;
1030 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1031 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1033 /* TODO: TC exclusive handling due to ERL/EXL. */
1034 other->CP0_VPEConf0 = newval;
1037 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
1039 uint32_t mask = 0;
1040 uint32_t newval;
1042 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1043 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1044 (0xff << CP0VPEC1_NCP1);
1045 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1047 /* UDI not implemented. */
1048 /* CP2 not implemented. */
1050 // TODO: Handle FPU (CP1) binding.
1052 env->CP0_VPEConf1 = newval;
1055 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
1057 /* Yield qualifier inputs not implemented. */
1058 env->CP0_YQMask = 0x00000000;
1061 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
1063 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1066 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
1068 /* Large physaddr (PABITS) not implemented */
1069 /* 1k pages not implemented */
1070 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
1071 env->CP0_EntryLo0 = (arg1 & 0x3FFFFFFF) | (rxi << (CP0EnLo_XI - 30));
1074 #if defined(TARGET_MIPS64)
1075 void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
1077 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
1078 env->CP0_EntryLo0 = (arg1 & 0x3FFFFFFF) | rxi;
1080 #endif
1082 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
1084 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1085 uint32_t newval;
1087 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1089 env->active_tc.CP0_TCStatus = newval;
1090 sync_c0_tcstatus(env, env->current_tc, newval);
1093 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
1095 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1096 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1098 if (other_tc == other->current_tc)
1099 other->active_tc.CP0_TCStatus = arg1;
1100 else
1101 other->tcs[other_tc].CP0_TCStatus = arg1;
1102 sync_c0_tcstatus(other, other_tc, arg1);
1105 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
1107 uint32_t mask = (1 << CP0TCBd_TBE);
1108 uint32_t newval;
1110 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1111 mask |= (1 << CP0TCBd_CurVPE);
1112 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1113 env->active_tc.CP0_TCBind = newval;
1116 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
1118 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1119 uint32_t mask = (1 << CP0TCBd_TBE);
1120 uint32_t newval;
1121 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1123 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1124 mask |= (1 << CP0TCBd_CurVPE);
1125 if (other_tc == other->current_tc) {
1126 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1127 other->active_tc.CP0_TCBind = newval;
1128 } else {
1129 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1130 other->tcs[other_tc].CP0_TCBind = newval;
1134 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
1136 env->active_tc.PC = arg1;
1137 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1138 env->lladdr = 0ULL;
1139 /* MIPS16 not implemented. */
1142 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
1144 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1145 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1147 if (other_tc == other->current_tc) {
1148 other->active_tc.PC = arg1;
1149 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1150 other->lladdr = 0ULL;
1151 /* MIPS16 not implemented. */
1152 } else {
1153 other->tcs[other_tc].PC = arg1;
1154 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1155 other->lladdr = 0ULL;
1156 /* MIPS16 not implemented. */
1160 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
1162 MIPSCPU *cpu = mips_env_get_cpu(env);
1164 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1166 // TODO: Halt TC / Restart (if allocated+active) TC.
1167 if (env->active_tc.CP0_TCHalt & 1) {
1168 mips_tc_sleep(cpu, env->current_tc);
1169 } else {
1170 mips_tc_wake(cpu, env->current_tc);
1174 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
1176 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1177 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1178 MIPSCPU *other_cpu = mips_env_get_cpu(other);
1180 // TODO: Halt TC / Restart (if allocated+active) TC.
1182 if (other_tc == other->current_tc)
1183 other->active_tc.CP0_TCHalt = arg1;
1184 else
1185 other->tcs[other_tc].CP0_TCHalt = arg1;
1187 if (arg1 & 1) {
1188 mips_tc_sleep(other_cpu, other_tc);
1189 } else {
1190 mips_tc_wake(other_cpu, other_tc);
1194 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
1196 env->active_tc.CP0_TCContext = arg1;
1199 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
1201 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1202 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1204 if (other_tc == other->current_tc)
1205 other->active_tc.CP0_TCContext = arg1;
1206 else
1207 other->tcs[other_tc].CP0_TCContext = arg1;
1210 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
1212 env->active_tc.CP0_TCSchedule = arg1;
1215 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
1217 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1218 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1220 if (other_tc == other->current_tc)
1221 other->active_tc.CP0_TCSchedule = arg1;
1222 else
1223 other->tcs[other_tc].CP0_TCSchedule = arg1;
1226 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
1228 env->active_tc.CP0_TCScheFBack = arg1;
1231 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
1233 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1234 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1236 if (other_tc == other->current_tc)
1237 other->active_tc.CP0_TCScheFBack = arg1;
1238 else
1239 other->tcs[other_tc].CP0_TCScheFBack = arg1;
1242 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
1244 /* Large physaddr (PABITS) not implemented */
1245 /* 1k pages not implemented */
1246 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
1247 env->CP0_EntryLo1 = (arg1 & 0x3FFFFFFF) | (rxi << (CP0EnLo_XI - 30));
1250 #if defined(TARGET_MIPS64)
1251 void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
1253 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
1254 env->CP0_EntryLo1 = (arg1 & 0x3FFFFFFF) | rxi;
1256 #endif
1258 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
1260 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1263 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
1265 uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1);
1266 if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) ||
1267 (mask == 0x0000 || mask == 0x0003 || mask == 0x000F ||
1268 mask == 0x003F || mask == 0x00FF || mask == 0x03FF ||
1269 mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) {
1270 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1274 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
1276 /* SmartMIPS not implemented */
1277 /* Large physaddr (PABITS) not implemented */
1278 /* 1k pages not implemented */
1279 env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
1280 (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
1283 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
1285 if (env->insn_flags & ISA_MIPS32R6) {
1286 if (arg1 < env->tlb->nb_tlb) {
1287 env->CP0_Wired = arg1;
1289 } else {
1290 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1294 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
1296 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1299 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
1301 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1304 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
1306 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1309 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
1311 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1314 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
1316 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1319 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
1321 uint32_t mask = 0x0000000F;
1323 if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
1324 mask |= (1 << 29);
1326 if (arg1 & (1 << 29)) {
1327 env->hflags |= MIPS_HFLAG_HWRENA_ULR;
1328 } else {
1329 env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
1333 env->CP0_HWREna = arg1 & mask;
1336 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
1338 cpu_mips_store_count(env, arg1);
1341 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1343 target_ulong old, val, mask;
1344 mask = (TARGET_PAGE_MASK << 1) | 0xFF;
1345 if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
1346 mask |= 1 << CP0EnHi_EHINV;
1349 /* 1k pages not implemented */
1350 #if defined(TARGET_MIPS64)
1351 if (env->insn_flags & ISA_MIPS32R6) {
1352 int entryhi_r = extract64(arg1, 62, 2);
1353 int config0_at = extract32(env->CP0_Config0, 13, 2);
1354 bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
1355 if ((entryhi_r == 2) ||
1356 (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
1357 /* skip EntryHi.R field if new value is reserved */
1358 mask &= ~(0x3ull << 62);
1361 mask &= env->SEGMask;
1362 #endif
1363 old = env->CP0_EntryHi;
1364 val = (arg1 & mask) | (old & ~mask);
1365 env->CP0_EntryHi = val;
1366 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1367 sync_c0_entryhi(env, env->current_tc);
1369 /* If the ASID changes, flush qemu's TLB. */
1370 if ((old & 0xFF) != (val & 0xFF))
1371 cpu_mips_tlb_flush(env, 1);
1374 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1376 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1377 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1379 other->CP0_EntryHi = arg1;
1380 sync_c0_entryhi(other, other_tc);
1383 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
1385 cpu_mips_store_compare(env, arg1);
1388 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
1390 MIPSCPU *cpu = mips_env_get_cpu(env);
1391 uint32_t val, old;
1393 old = env->CP0_Status;
1394 cpu_mips_store_status(env, arg1);
1395 val = env->CP0_Status;
1397 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1398 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1399 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1400 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1401 env->CP0_Cause);
1402 switch (env->hflags & MIPS_HFLAG_KSU) {
1403 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1404 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1405 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1406 default:
1407 cpu_abort(CPU(cpu), "Invalid MMU mode!\n");
1408 break;
1413 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
1415 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1416 uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
1417 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1419 other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
1420 sync_c0_status(env, other, other_tc);
1423 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
1425 /* vectored interrupts not implemented, no performance counters. */
1426 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1429 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
1431 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1432 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1435 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
1437 cpu_mips_store_cause(env, arg1);
1440 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
1442 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1443 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1445 cpu_mips_store_cause(other, arg1);
1448 target_ulong helper_mftc0_epc(CPUMIPSState *env)
1450 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1451 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1453 return other->CP0_EPC;
1456 target_ulong helper_mftc0_ebase(CPUMIPSState *env)
1458 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1459 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1461 return other->CP0_EBase;
1464 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
1466 /* vectored interrupts not implemented */
1467 env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1470 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
1472 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1473 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1474 other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1477 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
1479 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1480 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1482 switch (idx) {
1483 case 0: return other->CP0_Config0;
1484 case 1: return other->CP0_Config1;
1485 case 2: return other->CP0_Config2;
1486 case 3: return other->CP0_Config3;
1487 /* 4 and 5 are reserved. */
1488 case 6: return other->CP0_Config6;
1489 case 7: return other->CP0_Config7;
1490 default:
1491 break;
1493 return 0;
1496 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
1498 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1501 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
1503 /* tertiary/secondary caches not implemented */
1504 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1507 void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
1509 if (env->insn_flags & ASE_MICROMIPS) {
1510 env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
1511 (arg1 & (1 << CP0C3_ISA_ON_EXC));
1515 void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
1517 env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
1518 (arg1 & env->CP0_Config4_rw_bitmask);
1521 void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
1523 env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
1524 (arg1 & env->CP0_Config5_rw_bitmask);
1525 compute_hflags(env);
1528 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
1530 target_long mask = env->CP0_LLAddr_rw_bitmask;
1531 arg1 = arg1 << env->CP0_LLAddr_shift;
1532 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1535 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1537 /* Watch exceptions for instructions, data loads, data stores
1538 not implemented. */
1539 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1542 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1544 env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1545 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1548 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
1550 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1551 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1554 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
1556 env->CP0_Framemask = arg1; /* XXX */
1559 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
1561 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1562 if (arg1 & (1 << CP0DB_DM))
1563 env->hflags |= MIPS_HFLAG_DM;
1564 else
1565 env->hflags &= ~MIPS_HFLAG_DM;
1568 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
1570 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1571 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1572 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1574 /* XXX: Might be wrong, check with EJTAG spec. */
1575 if (other_tc == other->current_tc)
1576 other->active_tc.CP0_Debug_tcstatus = val;
1577 else
1578 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1579 other->CP0_Debug = (other->CP0_Debug &
1580 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1581 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1584 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
1586 env->CP0_Performance0 = arg1 & 0x000007ff;
1589 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
1591 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1594 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
1596 env->CP0_DataLo = arg1; /* XXX */
1599 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
1601 env->CP0_TagHi = arg1; /* XXX */
1604 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
1606 env->CP0_DataHi = arg1; /* XXX */
1609 /* MIPS MT functions */
1610 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
1612 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1613 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1615 if (other_tc == other->current_tc)
1616 return other->active_tc.gpr[sel];
1617 else
1618 return other->tcs[other_tc].gpr[sel];
1621 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
1623 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1624 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1626 if (other_tc == other->current_tc)
1627 return other->active_tc.LO[sel];
1628 else
1629 return other->tcs[other_tc].LO[sel];
1632 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
1634 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1635 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1637 if (other_tc == other->current_tc)
1638 return other->active_tc.HI[sel];
1639 else
1640 return other->tcs[other_tc].HI[sel];
1643 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
1645 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1646 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1648 if (other_tc == other->current_tc)
1649 return other->active_tc.ACX[sel];
1650 else
1651 return other->tcs[other_tc].ACX[sel];
1654 target_ulong helper_mftdsp(CPUMIPSState *env)
1656 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1657 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1659 if (other_tc == other->current_tc)
1660 return other->active_tc.DSPControl;
1661 else
1662 return other->tcs[other_tc].DSPControl;
1665 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1667 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1668 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1670 if (other_tc == other->current_tc)
1671 other->active_tc.gpr[sel] = arg1;
1672 else
1673 other->tcs[other_tc].gpr[sel] = arg1;
1676 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1678 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1679 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1681 if (other_tc == other->current_tc)
1682 other->active_tc.LO[sel] = arg1;
1683 else
1684 other->tcs[other_tc].LO[sel] = arg1;
1687 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1689 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1690 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1692 if (other_tc == other->current_tc)
1693 other->active_tc.HI[sel] = arg1;
1694 else
1695 other->tcs[other_tc].HI[sel] = arg1;
1698 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1700 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1701 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1703 if (other_tc == other->current_tc)
1704 other->active_tc.ACX[sel] = arg1;
1705 else
1706 other->tcs[other_tc].ACX[sel] = arg1;
1709 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
1711 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1712 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1714 if (other_tc == other->current_tc)
1715 other->active_tc.DSPControl = arg1;
1716 else
1717 other->tcs[other_tc].DSPControl = arg1;
1720 /* MIPS MT functions */
1721 target_ulong helper_dmt(void)
1723 // TODO
1724 return 0;
1727 target_ulong helper_emt(void)
1729 // TODO
1730 return 0;
1733 target_ulong helper_dvpe(CPUMIPSState *env)
1735 CPUState *other_cs = first_cpu;
1736 target_ulong prev = env->mvp->CP0_MVPControl;
1738 CPU_FOREACH(other_cs) {
1739 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1740 /* Turn off all VPEs except the one executing the dvpe. */
1741 if (&other_cpu->env != env) {
1742 other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1743 mips_vpe_sleep(other_cpu);
1746 return prev;
1749 target_ulong helper_evpe(CPUMIPSState *env)
1751 CPUState *other_cs = first_cpu;
1752 target_ulong prev = env->mvp->CP0_MVPControl;
1754 CPU_FOREACH(other_cs) {
1755 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1757 if (&other_cpu->env != env
1758 /* If the VPE is WFI, don't disturb its sleep. */
1759 && !mips_vpe_is_wfi(other_cpu)) {
1760 /* Enable the VPE. */
1761 other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1762 mips_vpe_wake(other_cpu); /* And wake it up. */
1765 return prev;
1767 #endif /* !CONFIG_USER_ONLY */
1769 void helper_fork(target_ulong arg1, target_ulong arg2)
1771 // arg1 = rt, arg2 = rs
1772 // TODO: store to TC register
1775 target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
1777 target_long arg1 = arg;
1779 if (arg1 < 0) {
1780 /* No scheduling policy implemented. */
1781 if (arg1 != -2) {
1782 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1783 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1784 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1785 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1786 helper_raise_exception(env, EXCP_THREAD);
1789 } else if (arg1 == 0) {
1790 if (0 /* TODO: TC underflow */) {
1791 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1792 helper_raise_exception(env, EXCP_THREAD);
1793 } else {
1794 // TODO: Deallocate TC
1796 } else if (arg1 > 0) {
1797 /* Yield qualifier inputs not implemented. */
1798 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1799 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1800 helper_raise_exception(env, EXCP_THREAD);
1802 return env->CP0_YQMask;
1805 #ifndef CONFIG_USER_ONLY
1806 /* TLB management */
1807 static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
1809 MIPSCPU *cpu = mips_env_get_cpu(env);
1811 /* Flush qemu's TLB and discard all shadowed entries. */
1812 tlb_flush(CPU(cpu), flush_global);
1813 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1816 static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first)
1818 /* Discard entries from env->tlb[first] onwards. */
1819 while (env->tlb->tlb_in_use > first) {
1820 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1824 static void r4k_fill_tlb(CPUMIPSState *env, int idx)
1826 r4k_tlb_t *tlb;
1828 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1829 tlb = &env->tlb->mmu.r4k.tlb[idx];
1830 if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
1831 tlb->EHINV = 1;
1832 return;
1834 tlb->EHINV = 0;
1835 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1836 #if defined(TARGET_MIPS64)
1837 tlb->VPN &= env->SEGMask;
1838 #endif
1839 tlb->ASID = env->CP0_EntryHi & 0xFF;
1840 tlb->PageMask = env->CP0_PageMask;
1841 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1842 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1843 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1844 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1845 tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
1846 tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
1847 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1848 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1849 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1850 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1851 tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
1852 tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
1853 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1856 void r4k_helper_tlbinv(CPUMIPSState *env)
1858 int idx;
1859 r4k_tlb_t *tlb;
1860 uint8_t ASID = env->CP0_EntryHi & 0xFF;
1862 for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
1863 tlb = &env->tlb->mmu.r4k.tlb[idx];
1864 if (!tlb->G && tlb->ASID == ASID) {
1865 tlb->EHINV = 1;
1868 cpu_mips_tlb_flush(env, 1);
1871 void r4k_helper_tlbinvf(CPUMIPSState *env)
1873 int idx;
1875 for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
1876 env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
1878 cpu_mips_tlb_flush(env, 1);
1881 void r4k_helper_tlbwi(CPUMIPSState *env)
1883 r4k_tlb_t *tlb;
1884 int idx;
1885 target_ulong VPN;
1886 uint8_t ASID;
1887 bool G, V0, D0, V1, D1;
1889 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1890 tlb = &env->tlb->mmu.r4k.tlb[idx];
1891 VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1892 #if defined(TARGET_MIPS64)
1893 VPN &= env->SEGMask;
1894 #endif
1895 ASID = env->CP0_EntryHi & 0xff;
1896 G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1897 V0 = (env->CP0_EntryLo0 & 2) != 0;
1898 D0 = (env->CP0_EntryLo0 & 4) != 0;
1899 V1 = (env->CP0_EntryLo1 & 2) != 0;
1900 D1 = (env->CP0_EntryLo1 & 4) != 0;
1902 /* Discard cached TLB entries, unless tlbwi is just upgrading access
1903 permissions on the current entry. */
1904 if (tlb->VPN != VPN || tlb->ASID != ASID || tlb->G != G ||
1905 (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
1906 (tlb->V1 && !V1) || (tlb->D1 && !D1)) {
1907 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1910 r4k_invalidate_tlb(env, idx, 0);
1911 r4k_fill_tlb(env, idx);
1914 void r4k_helper_tlbwr(CPUMIPSState *env)
1916 int r = cpu_mips_get_random(env);
1918 r4k_invalidate_tlb(env, r, 1);
1919 r4k_fill_tlb(env, r);
1922 void r4k_helper_tlbp(CPUMIPSState *env)
1924 r4k_tlb_t *tlb;
1925 target_ulong mask;
1926 target_ulong tag;
1927 target_ulong VPN;
1928 uint8_t ASID;
1929 int i;
1931 ASID = env->CP0_EntryHi & 0xFF;
1932 for (i = 0; i < env->tlb->nb_tlb; i++) {
1933 tlb = &env->tlb->mmu.r4k.tlb[i];
1934 /* 1k pages are not supported. */
1935 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1936 tag = env->CP0_EntryHi & ~mask;
1937 VPN = tlb->VPN & ~mask;
1938 #if defined(TARGET_MIPS64)
1939 tag &= env->SEGMask;
1940 #endif
1941 /* Check ASID, virtual page number & size */
1942 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) {
1943 /* TLB match */
1944 env->CP0_Index = i;
1945 break;
1948 if (i == env->tlb->nb_tlb) {
1949 /* No match. Discard any shadow entries, if any of them match. */
1950 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1951 tlb = &env->tlb->mmu.r4k.tlb[i];
1952 /* 1k pages are not supported. */
1953 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1954 tag = env->CP0_EntryHi & ~mask;
1955 VPN = tlb->VPN & ~mask;
1956 #if defined(TARGET_MIPS64)
1957 tag &= env->SEGMask;
1958 #endif
1959 /* Check ASID, virtual page number & size */
1960 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1961 r4k_mips_tlb_flush_extra (env, i);
1962 break;
1966 env->CP0_Index |= 0x80000000;
1970 void r4k_helper_tlbr(CPUMIPSState *env)
1972 r4k_tlb_t *tlb;
1973 uint8_t ASID;
1974 int idx;
1976 ASID = env->CP0_EntryHi & 0xFF;
1977 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1978 tlb = &env->tlb->mmu.r4k.tlb[idx];
1980 /* If this will change the current ASID, flush qemu's TLB. */
1981 if (ASID != tlb->ASID)
1982 cpu_mips_tlb_flush (env, 1);
1984 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
1986 if (tlb->EHINV) {
1987 env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
1988 env->CP0_PageMask = 0;
1989 env->CP0_EntryLo0 = 0;
1990 env->CP0_EntryLo1 = 0;
1991 } else {
1992 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
1993 env->CP0_PageMask = tlb->PageMask;
1994 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
1995 ((target_ulong)tlb->RI0 << CP0EnLo_RI) |
1996 ((target_ulong)tlb->XI0 << CP0EnLo_XI) |
1997 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
1998 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
1999 ((target_ulong)tlb->RI1 << CP0EnLo_RI) |
2000 ((target_ulong)tlb->XI1 << CP0EnLo_XI) |
2001 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
2005 void helper_tlbwi(CPUMIPSState *env)
2007 env->tlb->helper_tlbwi(env);
2010 void helper_tlbwr(CPUMIPSState *env)
2012 env->tlb->helper_tlbwr(env);
2015 void helper_tlbp(CPUMIPSState *env)
2017 env->tlb->helper_tlbp(env);
2020 void helper_tlbr(CPUMIPSState *env)
2022 env->tlb->helper_tlbr(env);
2025 void helper_tlbinv(CPUMIPSState *env)
2027 env->tlb->helper_tlbinv(env);
2030 void helper_tlbinvf(CPUMIPSState *env)
2032 env->tlb->helper_tlbinvf(env);
2035 /* Specials */
2036 target_ulong helper_di(CPUMIPSState *env)
2038 target_ulong t0 = env->CP0_Status;
2040 env->CP0_Status = t0 & ~(1 << CP0St_IE);
2041 return t0;
2044 target_ulong helper_ei(CPUMIPSState *env)
2046 target_ulong t0 = env->CP0_Status;
2048 env->CP0_Status = t0 | (1 << CP0St_IE);
2049 return t0;
2052 static void debug_pre_eret(CPUMIPSState *env)
2054 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2055 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2056 env->active_tc.PC, env->CP0_EPC);
2057 if (env->CP0_Status & (1 << CP0St_ERL))
2058 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2059 if (env->hflags & MIPS_HFLAG_DM)
2060 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2061 qemu_log("\n");
2065 static void debug_post_eret(CPUMIPSState *env)
2067 MIPSCPU *cpu = mips_env_get_cpu(env);
2069 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2070 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2071 env->active_tc.PC, env->CP0_EPC);
2072 if (env->CP0_Status & (1 << CP0St_ERL))
2073 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2074 if (env->hflags & MIPS_HFLAG_DM)
2075 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2076 switch (env->hflags & MIPS_HFLAG_KSU) {
2077 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2078 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2079 case MIPS_HFLAG_KM: qemu_log("\n"); break;
2080 default:
2081 cpu_abort(CPU(cpu), "Invalid MMU mode!\n");
2082 break;
2087 static void set_pc(CPUMIPSState *env, target_ulong error_pc)
2089 env->active_tc.PC = error_pc & ~(target_ulong)1;
2090 if (error_pc & 1) {
2091 env->hflags |= MIPS_HFLAG_M16;
2092 } else {
2093 env->hflags &= ~(MIPS_HFLAG_M16);
2097 void helper_eret(CPUMIPSState *env)
2099 debug_pre_eret(env);
2100 if (env->CP0_Status & (1 << CP0St_ERL)) {
2101 set_pc(env, env->CP0_ErrorEPC);
2102 env->CP0_Status &= ~(1 << CP0St_ERL);
2103 } else {
2104 set_pc(env, env->CP0_EPC);
2105 env->CP0_Status &= ~(1 << CP0St_EXL);
2107 compute_hflags(env);
2108 debug_post_eret(env);
2109 env->lladdr = 1;
2112 void helper_deret(CPUMIPSState *env)
2114 debug_pre_eret(env);
2115 set_pc(env, env->CP0_DEPC);
2117 env->hflags &= MIPS_HFLAG_DM;
2118 compute_hflags(env);
2119 debug_post_eret(env);
2120 env->lladdr = 1;
2122 #endif /* !CONFIG_USER_ONLY */
2124 target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
2126 if ((env->hflags & MIPS_HFLAG_CP0) ||
2127 (env->CP0_HWREna & (1 << 0)))
2128 return env->CP0_EBase & 0x3ff;
2129 else
2130 helper_raise_exception(env, EXCP_RI);
2132 return 0;
2135 target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
2137 if ((env->hflags & MIPS_HFLAG_CP0) ||
2138 (env->CP0_HWREna & (1 << 1)))
2139 return env->SYNCI_Step;
2140 else
2141 helper_raise_exception(env, EXCP_RI);
2143 return 0;
2146 target_ulong helper_rdhwr_cc(CPUMIPSState *env)
2148 if ((env->hflags & MIPS_HFLAG_CP0) ||
2149 (env->CP0_HWREna & (1 << 2)))
2150 return env->CP0_Count;
2151 else
2152 helper_raise_exception(env, EXCP_RI);
2154 return 0;
2157 target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
2159 if ((env->hflags & MIPS_HFLAG_CP0) ||
2160 (env->CP0_HWREna & (1 << 3)))
2161 return env->CCRes;
2162 else
2163 helper_raise_exception(env, EXCP_RI);
2165 return 0;
2168 void helper_pmon(CPUMIPSState *env, int function)
2170 function /= 2;
2171 switch (function) {
2172 case 2: /* TODO: char inbyte(int waitflag); */
2173 if (env->active_tc.gpr[4] == 0)
2174 env->active_tc.gpr[2] = -1;
2175 /* Fall through */
2176 case 11: /* TODO: char inbyte (void); */
2177 env->active_tc.gpr[2] = -1;
2178 break;
2179 case 3:
2180 case 12:
2181 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2182 break;
2183 case 17:
2184 break;
2185 case 158:
2187 unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
2188 printf("%s", fmt);
2190 break;
2194 void helper_wait(CPUMIPSState *env)
2196 CPUState *cs = CPU(mips_env_get_cpu(env));
2198 cs->halted = 1;
2199 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
2200 helper_raise_exception(env, EXCP_HLT);
2203 #if !defined(CONFIG_USER_ONLY)
2205 void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
2206 int access_type, int is_user,
2207 uintptr_t retaddr)
2209 MIPSCPU *cpu = MIPS_CPU(cs);
2210 CPUMIPSState *env = &cpu->env;
2211 int error_code = 0;
2212 int excp;
2214 env->CP0_BadVAddr = addr;
2216 if (access_type == MMU_DATA_STORE) {
2217 excp = EXCP_AdES;
2218 } else {
2219 excp = EXCP_AdEL;
2220 if (access_type == MMU_INST_FETCH) {
2221 error_code |= EXCP_INST_NOTAVAIL;
2225 do_raise_exception_err(env, excp, error_code, retaddr);
2228 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
2229 uintptr_t retaddr)
2231 int ret;
2233 ret = mips_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
2234 if (ret) {
2235 MIPSCPU *cpu = MIPS_CPU(cs);
2236 CPUMIPSState *env = &cpu->env;
2238 do_raise_exception_err(env, cs->exception_index,
2239 env->error_code, retaddr);
2243 void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr,
2244 bool is_write, bool is_exec, int unused,
2245 unsigned size)
2247 MIPSCPU *cpu = MIPS_CPU(cs);
2248 CPUMIPSState *env = &cpu->env;
2251 * Raising an exception with KVM enabled will crash because it won't be from
2252 * the main execution loop so the longjmp won't have a matching setjmp.
2253 * Until we can trigger a bus error exception through KVM lets just ignore
2254 * the access.
2256 if (kvm_enabled()) {
2257 return;
2260 if (is_exec) {
2261 helper_raise_exception(env, EXCP_IBE);
2262 } else {
2263 helper_raise_exception(env, EXCP_DBE);
2266 #endif /* !CONFIG_USER_ONLY */
2268 /* Complex FPU operations which may need stack space. */
2270 #define FLOAT_TWO32 make_float32(1 << 30)
2271 #define FLOAT_TWO64 make_float64(1ULL << 62)
2272 #define FP_TO_INT32_OVERFLOW 0x7fffffff
2273 #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
2275 /* convert MIPS rounding mode in FCR31 to IEEE library */
2276 unsigned int ieee_rm[] = {
2277 float_round_nearest_even,
2278 float_round_to_zero,
2279 float_round_up,
2280 float_round_down
2283 target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg)
2285 target_ulong arg1 = 0;
2287 switch (reg) {
2288 case 0:
2289 arg1 = (int32_t)env->active_fpu.fcr0;
2290 break;
2291 case 1:
2292 /* UFR Support - Read Status FR */
2293 if (env->active_fpu.fcr0 & (1 << FCR0_UFRP)) {
2294 if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
2295 arg1 = (int32_t)
2296 ((env->CP0_Status & (1 << CP0St_FR)) >> CP0St_FR);
2297 } else {
2298 helper_raise_exception(env, EXCP_RI);
2301 break;
2302 case 25:
2303 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2304 break;
2305 case 26:
2306 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2307 break;
2308 case 28:
2309 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2310 break;
2311 default:
2312 arg1 = (int32_t)env->active_fpu.fcr31;
2313 break;
2316 return arg1;
2319 void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt)
2321 switch (fs) {
2322 case 1:
2323 /* UFR Alias - Reset Status FR */
2324 if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) {
2325 return;
2327 if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
2328 env->CP0_Status &= ~(1 << CP0St_FR);
2329 compute_hflags(env);
2330 } else {
2331 helper_raise_exception(env, EXCP_RI);
2333 break;
2334 case 4:
2335 /* UNFR Alias - Set Status FR */
2336 if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) {
2337 return;
2339 if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
2340 env->CP0_Status |= (1 << CP0St_FR);
2341 compute_hflags(env);
2342 } else {
2343 helper_raise_exception(env, EXCP_RI);
2345 break;
2346 case 25:
2347 if ((env->insn_flags & ISA_MIPS32R6) || (arg1 & 0xffffff00)) {
2348 return;
2350 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2351 ((arg1 & 0x1) << 23);
2352 break;
2353 case 26:
2354 if (arg1 & 0x007c0000)
2355 return;
2356 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2357 break;
2358 case 28:
2359 if (arg1 & 0x007c0000)
2360 return;
2361 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2362 ((arg1 & 0x4) << 22);
2363 break;
2364 case 31:
2365 if (env->insn_flags & ISA_MIPS32R6) {
2366 uint32_t mask = 0xfefc0000;
2367 env->active_fpu.fcr31 = (arg1 & ~mask) |
2368 (env->active_fpu.fcr31 & mask);
2369 } else if (!(arg1 & 0x007c0000)) {
2370 env->active_fpu.fcr31 = arg1;
2372 break;
2373 default:
2374 return;
2376 /* set rounding mode */
2377 restore_rounding_mode(env);
2378 /* set flush-to-zero mode */
2379 restore_flush_mode(env);
2380 set_float_exception_flags(0, &env->active_fpu.fp_status);
2381 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2382 do_raise_exception(env, EXCP_FPE, GETPC());
2385 int ieee_ex_to_mips(int xcpt)
2387 int ret = 0;
2388 if (xcpt) {
2389 if (xcpt & float_flag_invalid) {
2390 ret |= FP_INVALID;
2392 if (xcpt & float_flag_overflow) {
2393 ret |= FP_OVERFLOW;
2395 if (xcpt & float_flag_underflow) {
2396 ret |= FP_UNDERFLOW;
2398 if (xcpt & float_flag_divbyzero) {
2399 ret |= FP_DIV0;
2401 if (xcpt & float_flag_inexact) {
2402 ret |= FP_INEXACT;
2405 return ret;
2408 static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc)
2410 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2412 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2414 if (tmp) {
2415 set_float_exception_flags(0, &env->active_fpu.fp_status);
2417 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) {
2418 do_raise_exception(env, EXCP_FPE, pc);
2419 } else {
2420 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2425 /* Float support.
2426 Single precition routines have a "s" suffix, double precision a
2427 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2428 paired single lower "pl", paired single upper "pu". */
2430 /* unary operations, modifying fp status */
2431 uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0)
2433 fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2434 update_fcr31(env, GETPC());
2435 return fdt0;
2438 uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0)
2440 fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2441 update_fcr31(env, GETPC());
2442 return fst0;
2445 uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0)
2447 uint64_t fdt2;
2449 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2450 update_fcr31(env, GETPC());
2451 return fdt2;
2454 uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0)
2456 uint64_t fdt2;
2458 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2459 update_fcr31(env, GETPC());
2460 return fdt2;
2463 uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0)
2465 uint64_t fdt2;
2467 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2468 update_fcr31(env, GETPC());
2469 return fdt2;
2472 uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0)
2474 uint64_t dt2;
2476 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2477 if (get_float_exception_flags(&env->active_fpu.fp_status)
2478 & (float_flag_invalid | float_flag_overflow)) {
2479 dt2 = FP_TO_INT64_OVERFLOW;
2481 update_fcr31(env, GETPC());
2482 return dt2;
2485 uint64_t helper_float_cvtl_s(CPUMIPSState *env, uint32_t fst0)
2487 uint64_t dt2;
2489 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2490 if (get_float_exception_flags(&env->active_fpu.fp_status)
2491 & (float_flag_invalid | float_flag_overflow)) {
2492 dt2 = FP_TO_INT64_OVERFLOW;
2494 update_fcr31(env, GETPC());
2495 return dt2;
2498 uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0)
2500 uint32_t fst2;
2501 uint32_t fsth2;
2503 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2504 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2505 update_fcr31(env, GETPC());
2506 return ((uint64_t)fsth2 << 32) | fst2;
2509 uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0)
2511 uint32_t wt2;
2512 uint32_t wth2;
2513 int excp, excph;
2515 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2516 excp = get_float_exception_flags(&env->active_fpu.fp_status);
2517 if (excp & (float_flag_overflow | float_flag_invalid)) {
2518 wt2 = FP_TO_INT32_OVERFLOW;
2521 set_float_exception_flags(0, &env->active_fpu.fp_status);
2522 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2523 excph = get_float_exception_flags(&env->active_fpu.fp_status);
2524 if (excph & (float_flag_overflow | float_flag_invalid)) {
2525 wth2 = FP_TO_INT32_OVERFLOW;
2528 set_float_exception_flags(excp | excph, &env->active_fpu.fp_status);
2529 update_fcr31(env, GETPC());
2531 return ((uint64_t)wth2 << 32) | wt2;
2534 uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0)
2536 uint32_t fst2;
2538 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2539 update_fcr31(env, GETPC());
2540 return fst2;
2543 uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0)
2545 uint32_t fst2;
2547 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2548 update_fcr31(env, GETPC());
2549 return fst2;
2552 uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0)
2554 uint32_t fst2;
2556 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2557 update_fcr31(env, GETPC());
2558 return fst2;
2561 uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0)
2563 uint32_t wt2;
2565 wt2 = wt0;
2566 update_fcr31(env, GETPC());
2567 return wt2;
2570 uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0)
2572 uint32_t wt2;
2574 wt2 = wth0;
2575 update_fcr31(env, GETPC());
2576 return wt2;
2579 uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0)
2581 uint32_t wt2;
2583 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2584 if (get_float_exception_flags(&env->active_fpu.fp_status)
2585 & (float_flag_invalid | float_flag_overflow)) {
2586 wt2 = FP_TO_INT32_OVERFLOW;
2588 update_fcr31(env, GETPC());
2589 return wt2;
2592 uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0)
2594 uint32_t wt2;
2596 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2597 if (get_float_exception_flags(&env->active_fpu.fp_status)
2598 & (float_flag_invalid | float_flag_overflow)) {
2599 wt2 = FP_TO_INT32_OVERFLOW;
2601 update_fcr31(env, GETPC());
2602 return wt2;
2605 uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0)
2607 uint64_t dt2;
2609 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2610 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2611 restore_rounding_mode(env);
2612 if (get_float_exception_flags(&env->active_fpu.fp_status)
2613 & (float_flag_invalid | float_flag_overflow)) {
2614 dt2 = FP_TO_INT64_OVERFLOW;
2616 update_fcr31(env, GETPC());
2617 return dt2;
2620 uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0)
2622 uint64_t dt2;
2624 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2625 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2626 restore_rounding_mode(env);
2627 if (get_float_exception_flags(&env->active_fpu.fp_status)
2628 & (float_flag_invalid | float_flag_overflow)) {
2629 dt2 = FP_TO_INT64_OVERFLOW;
2631 update_fcr31(env, GETPC());
2632 return dt2;
2635 uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0)
2637 uint32_t wt2;
2639 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2640 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2641 restore_rounding_mode(env);
2642 if (get_float_exception_flags(&env->active_fpu.fp_status)
2643 & (float_flag_invalid | float_flag_overflow)) {
2644 wt2 = FP_TO_INT32_OVERFLOW;
2646 update_fcr31(env, GETPC());
2647 return wt2;
2650 uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0)
2652 uint32_t wt2;
2654 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2655 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2656 restore_rounding_mode(env);
2657 if (get_float_exception_flags(&env->active_fpu.fp_status)
2658 & (float_flag_invalid | float_flag_overflow)) {
2659 wt2 = FP_TO_INT32_OVERFLOW;
2661 update_fcr31(env, GETPC());
2662 return wt2;
2665 uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0)
2667 uint64_t dt2;
2669 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2670 if (get_float_exception_flags(&env->active_fpu.fp_status)
2671 & (float_flag_invalid | float_flag_overflow)) {
2672 dt2 = FP_TO_INT64_OVERFLOW;
2674 update_fcr31(env, GETPC());
2675 return dt2;
2678 uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0)
2680 uint64_t dt2;
2682 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2683 if (get_float_exception_flags(&env->active_fpu.fp_status)
2684 & (float_flag_invalid | float_flag_overflow)) {
2685 dt2 = FP_TO_INT64_OVERFLOW;
2687 update_fcr31(env, GETPC());
2688 return dt2;
2691 uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0)
2693 uint32_t wt2;
2695 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2696 if (get_float_exception_flags(&env->active_fpu.fp_status)
2697 & (float_flag_invalid | float_flag_overflow)) {
2698 wt2 = FP_TO_INT32_OVERFLOW;
2700 update_fcr31(env, GETPC());
2701 return wt2;
2704 uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0)
2706 uint32_t wt2;
2708 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2709 if (get_float_exception_flags(&env->active_fpu.fp_status)
2710 & (float_flag_invalid | float_flag_overflow)) {
2711 wt2 = FP_TO_INT32_OVERFLOW;
2713 update_fcr31(env, GETPC());
2714 return wt2;
2717 uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0)
2719 uint64_t dt2;
2721 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2722 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2723 restore_rounding_mode(env);
2724 if (get_float_exception_flags(&env->active_fpu.fp_status)
2725 & (float_flag_invalid | float_flag_overflow)) {
2726 dt2 = FP_TO_INT64_OVERFLOW;
2728 update_fcr31(env, GETPC());
2729 return dt2;
2732 uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0)
2734 uint64_t dt2;
2736 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2737 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2738 restore_rounding_mode(env);
2739 if (get_float_exception_flags(&env->active_fpu.fp_status)
2740 & (float_flag_invalid | float_flag_overflow)) {
2741 dt2 = FP_TO_INT64_OVERFLOW;
2743 update_fcr31(env, GETPC());
2744 return dt2;
2747 uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0)
2749 uint32_t wt2;
2751 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2752 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2753 restore_rounding_mode(env);
2754 if (get_float_exception_flags(&env->active_fpu.fp_status)
2755 & (float_flag_invalid | float_flag_overflow)) {
2756 wt2 = FP_TO_INT32_OVERFLOW;
2758 update_fcr31(env, GETPC());
2759 return wt2;
2762 uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0)
2764 uint32_t wt2;
2766 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2767 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2768 restore_rounding_mode(env);
2769 if (get_float_exception_flags(&env->active_fpu.fp_status)
2770 & (float_flag_invalid | float_flag_overflow)) {
2771 wt2 = FP_TO_INT32_OVERFLOW;
2773 update_fcr31(env, GETPC());
2774 return wt2;
2777 uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0)
2779 uint64_t dt2;
2781 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2782 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2783 restore_rounding_mode(env);
2784 if (get_float_exception_flags(&env->active_fpu.fp_status)
2785 & (float_flag_invalid | float_flag_overflow)) {
2786 dt2 = FP_TO_INT64_OVERFLOW;
2788 update_fcr31(env, GETPC());
2789 return dt2;
2792 uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0)
2794 uint64_t dt2;
2796 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2797 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2798 restore_rounding_mode(env);
2799 if (get_float_exception_flags(&env->active_fpu.fp_status)
2800 & (float_flag_invalid | float_flag_overflow)) {
2801 dt2 = FP_TO_INT64_OVERFLOW;
2803 update_fcr31(env, GETPC());
2804 return dt2;
2807 uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0)
2809 uint32_t wt2;
2811 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2812 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2813 restore_rounding_mode(env);
2814 if (get_float_exception_flags(&env->active_fpu.fp_status)
2815 & (float_flag_invalid | float_flag_overflow)) {
2816 wt2 = FP_TO_INT32_OVERFLOW;
2818 update_fcr31(env, GETPC());
2819 return wt2;
2822 uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0)
2824 uint32_t wt2;
2826 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2827 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2828 restore_rounding_mode(env);
2829 if (get_float_exception_flags(&env->active_fpu.fp_status)
2830 & (float_flag_invalid | float_flag_overflow)) {
2831 wt2 = FP_TO_INT32_OVERFLOW;
2833 update_fcr31(env, GETPC());
2834 return wt2;
2837 /* unary operations, not modifying fp status */
2838 #define FLOAT_UNOP(name) \
2839 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2841 return float64_ ## name(fdt0); \
2843 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2845 return float32_ ## name(fst0); \
2847 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2849 uint32_t wt0; \
2850 uint32_t wth0; \
2852 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2853 wth0 = float32_ ## name(fdt0 >> 32); \
2854 return ((uint64_t)wth0 << 32) | wt0; \
2856 FLOAT_UNOP(abs)
2857 FLOAT_UNOP(chs)
2858 #undef FLOAT_UNOP
2860 /* MIPS specific unary operations */
2861 uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0)
2863 uint64_t fdt2;
2865 fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
2866 update_fcr31(env, GETPC());
2867 return fdt2;
2870 uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0)
2872 uint32_t fst2;
2874 fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
2875 update_fcr31(env, GETPC());
2876 return fst2;
2879 uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0)
2881 uint64_t fdt2;
2883 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2884 fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
2885 update_fcr31(env, GETPC());
2886 return fdt2;
2889 uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0)
2891 uint32_t fst2;
2893 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2894 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
2895 update_fcr31(env, GETPC());
2896 return fst2;
2899 uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0)
2901 uint64_t fdt2;
2903 fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
2904 update_fcr31(env, GETPC());
2905 return fdt2;
2908 uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0)
2910 uint32_t fst2;
2912 fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
2913 update_fcr31(env, GETPC());
2914 return fst2;
2917 uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0)
2919 uint32_t fst2;
2920 uint32_t fsth2;
2922 fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2923 fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status);
2924 update_fcr31(env, GETPC());
2925 return ((uint64_t)fsth2 << 32) | fst2;
2928 uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0)
2930 uint64_t fdt2;
2932 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2933 fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
2934 update_fcr31(env, GETPC());
2935 return fdt2;
2938 uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0)
2940 uint32_t fst2;
2942 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2943 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
2944 update_fcr31(env, GETPC());
2945 return fst2;
2948 uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0)
2950 uint32_t fst2;
2951 uint32_t fsth2;
2953 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2954 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2955 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
2956 fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status);
2957 update_fcr31(env, GETPC());
2958 return ((uint64_t)fsth2 << 32) | fst2;
2961 #define FLOAT_RINT(name, bits) \
2962 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
2963 uint ## bits ## _t fs) \
2965 uint ## bits ## _t fdret; \
2967 fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \
2968 update_fcr31(env, GETPC()); \
2969 return fdret; \
2972 FLOAT_RINT(rint_s, 32)
2973 FLOAT_RINT(rint_d, 64)
2974 #undef FLOAT_RINT
2976 #define FLOAT_CLASS_SIGNALING_NAN 0x001
2977 #define FLOAT_CLASS_QUIET_NAN 0x002
2978 #define FLOAT_CLASS_NEGATIVE_INFINITY 0x004
2979 #define FLOAT_CLASS_NEGATIVE_NORMAL 0x008
2980 #define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010
2981 #define FLOAT_CLASS_NEGATIVE_ZERO 0x020
2982 #define FLOAT_CLASS_POSITIVE_INFINITY 0x040
2983 #define FLOAT_CLASS_POSITIVE_NORMAL 0x080
2984 #define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100
2985 #define FLOAT_CLASS_POSITIVE_ZERO 0x200
2987 #define FLOAT_CLASS(name, bits) \
2988 uint ## bits ## _t helper_float_ ## name (uint ## bits ## _t arg) \
2990 if (float ## bits ## _is_signaling_nan(arg)) { \
2991 return FLOAT_CLASS_SIGNALING_NAN; \
2992 } else if (float ## bits ## _is_quiet_nan(arg)) { \
2993 return FLOAT_CLASS_QUIET_NAN; \
2994 } else if (float ## bits ## _is_neg(arg)) { \
2995 if (float ## bits ## _is_infinity(arg)) { \
2996 return FLOAT_CLASS_NEGATIVE_INFINITY; \
2997 } else if (float ## bits ## _is_zero(arg)) { \
2998 return FLOAT_CLASS_NEGATIVE_ZERO; \
2999 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
3000 return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \
3001 } else { \
3002 return FLOAT_CLASS_NEGATIVE_NORMAL; \
3004 } else { \
3005 if (float ## bits ## _is_infinity(arg)) { \
3006 return FLOAT_CLASS_POSITIVE_INFINITY; \
3007 } else if (float ## bits ## _is_zero(arg)) { \
3008 return FLOAT_CLASS_POSITIVE_ZERO; \
3009 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
3010 return FLOAT_CLASS_POSITIVE_SUBNORMAL; \
3011 } else { \
3012 return FLOAT_CLASS_POSITIVE_NORMAL; \
3017 FLOAT_CLASS(class_s, 32)
3018 FLOAT_CLASS(class_d, 64)
3019 #undef FLOAT_CLASS
3021 /* binary operations */
3022 #define FLOAT_BINOP(name) \
3023 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
3024 uint64_t fdt0, uint64_t fdt1) \
3026 uint64_t dt2; \
3028 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
3029 update_fcr31(env, GETPC()); \
3030 return dt2; \
3033 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
3034 uint32_t fst0, uint32_t fst1) \
3036 uint32_t wt2; \
3038 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3039 update_fcr31(env, GETPC()); \
3040 return wt2; \
3043 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
3044 uint64_t fdt0, \
3045 uint64_t fdt1) \
3047 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3048 uint32_t fsth0 = fdt0 >> 32; \
3049 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3050 uint32_t fsth1 = fdt1 >> 32; \
3051 uint32_t wt2; \
3052 uint32_t wth2; \
3054 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3055 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
3056 update_fcr31(env, GETPC()); \
3057 return ((uint64_t)wth2 << 32) | wt2; \
3060 FLOAT_BINOP(add)
3061 FLOAT_BINOP(sub)
3062 FLOAT_BINOP(mul)
3063 FLOAT_BINOP(div)
3064 #undef FLOAT_BINOP
3066 /* MIPS specific binary operations */
3067 uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3069 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3070 fdt2 = float64_chs(float64_sub(fdt2, float64_one, &env->active_fpu.fp_status));
3071 update_fcr31(env, GETPC());
3072 return fdt2;
3075 uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
3077 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3078 fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status));
3079 update_fcr31(env, GETPC());
3080 return fst2;
3083 uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3085 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3086 uint32_t fsth0 = fdt0 >> 32;
3087 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3088 uint32_t fsth2 = fdt2 >> 32;
3090 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3091 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3092 fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status));
3093 fsth2 = float32_chs(float32_sub(fsth2, float32_one, &env->active_fpu.fp_status));
3094 update_fcr31(env, GETPC());
3095 return ((uint64_t)fsth2 << 32) | fst2;
3098 uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3100 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3101 fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status);
3102 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3103 update_fcr31(env, GETPC());
3104 return fdt2;
3107 uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
3109 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3110 fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
3111 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3112 update_fcr31(env, GETPC());
3113 return fst2;
3116 uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
3118 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3119 uint32_t fsth0 = fdt0 >> 32;
3120 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3121 uint32_t fsth2 = fdt2 >> 32;
3123 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3124 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3125 fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
3126 fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status);
3127 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3128 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3129 update_fcr31(env, GETPC());
3130 return ((uint64_t)fsth2 << 32) | fst2;
3133 uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
3135 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3136 uint32_t fsth0 = fdt0 >> 32;
3137 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3138 uint32_t fsth1 = fdt1 >> 32;
3139 uint32_t fst2;
3140 uint32_t fsth2;
3142 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3143 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3144 update_fcr31(env, GETPC());
3145 return ((uint64_t)fsth2 << 32) | fst2;
3148 uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
3150 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3151 uint32_t fsth0 = fdt0 >> 32;
3152 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3153 uint32_t fsth1 = fdt1 >> 32;
3154 uint32_t fst2;
3155 uint32_t fsth2;
3157 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3158 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3159 update_fcr31(env, GETPC());
3160 return ((uint64_t)fsth2 << 32) | fst2;
3163 #define FLOAT_MINMAX(name, bits, minmaxfunc) \
3164 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
3165 uint ## bits ## _t fs, \
3166 uint ## bits ## _t ft) \
3168 uint ## bits ## _t fdret; \
3170 fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \
3171 &env->active_fpu.fp_status); \
3172 update_fcr31(env, GETPC()); \
3173 return fdret; \
3176 FLOAT_MINMAX(max_s, 32, maxnum)
3177 FLOAT_MINMAX(max_d, 64, maxnum)
3178 FLOAT_MINMAX(maxa_s, 32, maxnummag)
3179 FLOAT_MINMAX(maxa_d, 64, maxnummag)
3181 FLOAT_MINMAX(min_s, 32, minnum)
3182 FLOAT_MINMAX(min_d, 64, minnum)
3183 FLOAT_MINMAX(mina_s, 32, minnummag)
3184 FLOAT_MINMAX(mina_d, 64, minnummag)
3185 #undef FLOAT_MINMAX
3187 /* ternary operations */
3188 #define UNFUSED_FMA(prefix, a, b, c, flags) \
3190 a = prefix##_mul(a, b, &env->active_fpu.fp_status); \
3191 if ((flags) & float_muladd_negate_c) { \
3192 a = prefix##_sub(a, c, &env->active_fpu.fp_status); \
3193 } else { \
3194 a = prefix##_add(a, c, &env->active_fpu.fp_status); \
3196 if ((flags) & float_muladd_negate_result) { \
3197 a = prefix##_chs(a); \
3201 /* FMA based operations */
3202 #define FLOAT_FMA(name, type) \
3203 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
3204 uint64_t fdt0, uint64_t fdt1, \
3205 uint64_t fdt2) \
3207 UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \
3208 update_fcr31(env, GETPC()); \
3209 return fdt0; \
3212 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
3213 uint32_t fst0, uint32_t fst1, \
3214 uint32_t fst2) \
3216 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
3217 update_fcr31(env, GETPC()); \
3218 return fst0; \
3221 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
3222 uint64_t fdt0, uint64_t fdt1, \
3223 uint64_t fdt2) \
3225 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3226 uint32_t fsth0 = fdt0 >> 32; \
3227 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3228 uint32_t fsth1 = fdt1 >> 32; \
3229 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3230 uint32_t fsth2 = fdt2 >> 32; \
3232 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
3233 UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \
3234 update_fcr31(env, GETPC()); \
3235 return ((uint64_t)fsth0 << 32) | fst0; \
3237 FLOAT_FMA(madd, 0)
3238 FLOAT_FMA(msub, float_muladd_negate_c)
3239 FLOAT_FMA(nmadd, float_muladd_negate_result)
3240 FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c)
3241 #undef FLOAT_FMA
3243 #define FLOAT_FMADDSUB(name, bits, muladd_arg) \
3244 uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
3245 uint ## bits ## _t fs, \
3246 uint ## bits ## _t ft, \
3247 uint ## bits ## _t fd) \
3249 uint ## bits ## _t fdret; \
3251 fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \
3252 &env->active_fpu.fp_status); \
3253 update_fcr31(env, GETPC()); \
3254 return fdret; \
3257 FLOAT_FMADDSUB(maddf_s, 32, 0)
3258 FLOAT_FMADDSUB(maddf_d, 64, 0)
3259 FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_product)
3260 FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_product)
3261 #undef FLOAT_FMADDSUB
3263 /* compare operations */
3264 #define FOP_COND_D(op, cond) \
3265 void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3266 uint64_t fdt1, int cc) \
3268 int c; \
3269 c = cond; \
3270 update_fcr31(env, GETPC()); \
3271 if (c) \
3272 SET_FP_COND(cc, env->active_fpu); \
3273 else \
3274 CLEAR_FP_COND(cc, env->active_fpu); \
3276 void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3277 uint64_t fdt1, int cc) \
3279 int c; \
3280 fdt0 = float64_abs(fdt0); \
3281 fdt1 = float64_abs(fdt1); \
3282 c = cond; \
3283 update_fcr31(env, GETPC()); \
3284 if (c) \
3285 SET_FP_COND(cc, env->active_fpu); \
3286 else \
3287 CLEAR_FP_COND(cc, env->active_fpu); \
3290 /* NOTE: the comma operator will make "cond" to eval to false,
3291 * but float64_unordered_quiet() is still called. */
3292 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3293 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3294 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3295 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3296 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3297 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3298 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3299 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3300 /* NOTE: the comma operator will make "cond" to eval to false,
3301 * but float64_unordered() is still called. */
3302 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3303 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3304 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3305 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3306 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3307 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3308 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3309 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3311 #define FOP_COND_S(op, cond) \
3312 void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
3313 uint32_t fst1, int cc) \
3315 int c; \
3316 c = cond; \
3317 update_fcr31(env, GETPC()); \
3318 if (c) \
3319 SET_FP_COND(cc, env->active_fpu); \
3320 else \
3321 CLEAR_FP_COND(cc, env->active_fpu); \
3323 void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
3324 uint32_t fst1, int cc) \
3326 int c; \
3327 fst0 = float32_abs(fst0); \
3328 fst1 = float32_abs(fst1); \
3329 c = cond; \
3330 update_fcr31(env, GETPC()); \
3331 if (c) \
3332 SET_FP_COND(cc, env->active_fpu); \
3333 else \
3334 CLEAR_FP_COND(cc, env->active_fpu); \
3337 /* NOTE: the comma operator will make "cond" to eval to false,
3338 * but float32_unordered_quiet() is still called. */
3339 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3340 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3341 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3342 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3343 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3344 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3345 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3346 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3347 /* NOTE: the comma operator will make "cond" to eval to false,
3348 * but float32_unordered() is still called. */
3349 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3350 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3351 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3352 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3353 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3354 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3355 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
3356 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3358 #define FOP_COND_PS(op, condl, condh) \
3359 void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3360 uint64_t fdt1, int cc) \
3362 uint32_t fst0, fsth0, fst1, fsth1; \
3363 int ch, cl; \
3364 fst0 = fdt0 & 0XFFFFFFFF; \
3365 fsth0 = fdt0 >> 32; \
3366 fst1 = fdt1 & 0XFFFFFFFF; \
3367 fsth1 = fdt1 >> 32; \
3368 cl = condl; \
3369 ch = condh; \
3370 update_fcr31(env, GETPC()); \
3371 if (cl) \
3372 SET_FP_COND(cc, env->active_fpu); \
3373 else \
3374 CLEAR_FP_COND(cc, env->active_fpu); \
3375 if (ch) \
3376 SET_FP_COND(cc + 1, env->active_fpu); \
3377 else \
3378 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3380 void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
3381 uint64_t fdt1, int cc) \
3383 uint32_t fst0, fsth0, fst1, fsth1; \
3384 int ch, cl; \
3385 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3386 fsth0 = float32_abs(fdt0 >> 32); \
3387 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3388 fsth1 = float32_abs(fdt1 >> 32); \
3389 cl = condl; \
3390 ch = condh; \
3391 update_fcr31(env, GETPC()); \
3392 if (cl) \
3393 SET_FP_COND(cc, env->active_fpu); \
3394 else \
3395 CLEAR_FP_COND(cc, env->active_fpu); \
3396 if (ch) \
3397 SET_FP_COND(cc + 1, env->active_fpu); \
3398 else \
3399 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3402 /* NOTE: the comma operator will make "cond" to eval to false,
3403 * but float32_unordered_quiet() is still called. */
3404 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3405 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3406 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3407 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3408 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3409 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3410 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3411 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3412 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3413 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3414 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3415 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3416 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3417 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3418 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3419 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3420 /* NOTE: the comma operator will make "cond" to eval to false,
3421 * but float32_unordered() is still called. */
3422 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3423 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3424 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3425 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3426 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3427 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3428 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3429 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3430 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3431 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3432 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3433 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3434 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
3435 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3436 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3437 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3439 /* R6 compare operations */
3440 #define FOP_CONDN_D(op, cond) \
3441 uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState * env, uint64_t fdt0, \
3442 uint64_t fdt1) \
3444 uint64_t c; \
3445 c = cond; \
3446 update_fcr31(env, GETPC()); \
3447 if (c) { \
3448 return -1; \
3449 } else { \
3450 return 0; \
3454 /* NOTE: the comma operator will make "cond" to eval to false,
3455 * but float64_unordered_quiet() is still called. */
3456 FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3457 FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)))
3458 FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
3459 FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
3460 || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
3461 FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
3462 FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
3463 || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
3464 FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
3465 FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
3466 || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
3467 /* NOTE: the comma operator will make "cond" to eval to false,
3468 * but float64_unordered() is still called. */
3469 FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3470 FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)))
3471 FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)))
3472 FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
3473 || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)))
3474 FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
3475 FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
3476 || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
3477 FOP_CONDN_D(sle, (float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
3478 FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
3479 || float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
3480 FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
3481 || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
3482 FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
3483 || float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
3484 || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
3485 FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
3486 || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
3487 FOP_CONDN_D(sor, (float64_le(fdt1, fdt0, &env->active_fpu.fp_status)
3488 || float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
3489 FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
3490 || float64_lt(fdt1, fdt0, &env->active_fpu.fp_status)
3491 || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
3492 FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0, &env->active_fpu.fp_status)
3493 || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
3495 #define FOP_CONDN_S(op, cond) \
3496 uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState * env, uint32_t fst0, \
3497 uint32_t fst1) \
3499 uint64_t c; \
3500 c = cond; \
3501 update_fcr31(env, GETPC()); \
3502 if (c) { \
3503 return -1; \
3504 } else { \
3505 return 0; \
3509 /* NOTE: the comma operator will make "cond" to eval to false,
3510 * but float32_unordered_quiet() is still called. */
3511 FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3512 FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)))
3513 FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)))
3514 FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
3515 || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)))
3516 FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
3517 FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
3518 || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
3519 FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
3520 FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
3521 || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
3522 /* NOTE: the comma operator will make "cond" to eval to false,
3523 * but float32_unordered() is still called. */
3524 FOP_CONDN_S(saf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3525 FOP_CONDN_S(sun, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)))
3526 FOP_CONDN_S(seq, (float32_eq(fst0, fst1, &env->active_fpu.fp_status)))
3527 FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
3528 || float32_eq(fst0, fst1, &env->active_fpu.fp_status)))
3529 FOP_CONDN_S(slt, (float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
3530 FOP_CONDN_S(sult, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
3531 || float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
3532 FOP_CONDN_S(sle, (float32_le(fst0, fst1, &env->active_fpu.fp_status)))
3533 FOP_CONDN_S(sule, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
3534 || float32_le(fst0, fst1, &env->active_fpu.fp_status)))
3535 FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0, &env->active_fpu.fp_status)
3536 || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
3537 FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
3538 || float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status)
3539 || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
3540 FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status)
3541 || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
3542 FOP_CONDN_S(sor, (float32_le(fst1, fst0, &env->active_fpu.fp_status)
3543 || float32_le(fst0, fst1, &env->active_fpu.fp_status)))
3544 FOP_CONDN_S(sune, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
3545 || float32_lt(fst1, fst0, &env->active_fpu.fp_status)
3546 || float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
3547 FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status)
3548 || float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
3550 /* MSA */
3551 /* Data format min and max values */
3552 #define DF_BITS(df) (1 << ((df) + 3))
3554 /* Element-by-element access macros */
3555 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
3557 void helper_msa_ld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs,
3558 int32_t s10)
3560 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3561 target_ulong addr = env->active_tc.gpr[rs] + (s10 << df);
3562 int i;
3564 switch (df) {
3565 case DF_BYTE:
3566 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
3567 pwd->b[i] = do_lbu(env, addr + (i << DF_BYTE),
3568 env->hflags & MIPS_HFLAG_KSU);
3570 break;
3571 case DF_HALF:
3572 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
3573 pwd->h[i] = do_lhu(env, addr + (i << DF_HALF),
3574 env->hflags & MIPS_HFLAG_KSU);
3576 break;
3577 case DF_WORD:
3578 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3579 pwd->w[i] = do_lw(env, addr + (i << DF_WORD),
3580 env->hflags & MIPS_HFLAG_KSU);
3582 break;
3583 case DF_DOUBLE:
3584 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3585 pwd->d[i] = do_ld(env, addr + (i << DF_DOUBLE),
3586 env->hflags & MIPS_HFLAG_KSU);
3588 break;
3592 void helper_msa_st_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs,
3593 int32_t s10)
3595 wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
3596 target_ulong addr = env->active_tc.gpr[rs] + (s10 << df);
3597 int i;
3599 switch (df) {
3600 case DF_BYTE:
3601 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
3602 do_sb(env, addr + (i << DF_BYTE), pwd->b[i],
3603 env->hflags & MIPS_HFLAG_KSU);
3605 break;
3606 case DF_HALF:
3607 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
3608 do_sh(env, addr + (i << DF_HALF), pwd->h[i],
3609 env->hflags & MIPS_HFLAG_KSU);
3611 break;
3612 case DF_WORD:
3613 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
3614 do_sw(env, addr + (i << DF_WORD), pwd->w[i],
3615 env->hflags & MIPS_HFLAG_KSU);
3617 break;
3618 case DF_DOUBLE:
3619 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
3620 do_sd(env, addr + (i << DF_DOUBLE), pwd->d[i],
3621 env->hflags & MIPS_HFLAG_KSU);
3623 break;