sparc: fix qtest
[qemu.git] / target-mips / op_helper.c
blob5627447953e2d92f7a82b4e80c849c919b0bc792
1 /*
2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include "cpu.h"
21 #include "dyngen-exec.h"
23 #include "host-utils.h"
25 #include "helper.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 #ifndef CONFIG_USER_ONLY
32 static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global);
33 #endif
35 static inline void compute_hflags(CPUMIPSState *env)
37 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
39 MIPS_HFLAG_UX);
40 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41 !(env->CP0_Status & (1 << CP0St_ERL)) &&
42 !(env->hflags & MIPS_HFLAG_DM)) {
43 env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
45 #if defined(TARGET_MIPS64)
46 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47 (env->CP0_Status & (1 << CP0St_PX)) ||
48 (env->CP0_Status & (1 << CP0St_UX))) {
49 env->hflags |= MIPS_HFLAG_64;
51 if (env->CP0_Status & (1 << CP0St_UX)) {
52 env->hflags |= MIPS_HFLAG_UX;
54 #endif
55 if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56 !(env->hflags & MIPS_HFLAG_KSU)) {
57 env->hflags |= MIPS_HFLAG_CP0;
59 if (env->CP0_Status & (1 << CP0St_CU1)) {
60 env->hflags |= MIPS_HFLAG_FPU;
62 if (env->CP0_Status & (1 << CP0St_FR)) {
63 env->hflags |= MIPS_HFLAG_F64;
65 if (env->insn_flags & ISA_MIPS32R2) {
66 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67 env->hflags |= MIPS_HFLAG_COP1X;
69 } else if (env->insn_flags & ISA_MIPS32) {
70 if (env->hflags & MIPS_HFLAG_64) {
71 env->hflags |= MIPS_HFLAG_COP1X;
73 } else if (env->insn_flags & ISA_MIPS4) {
74 /* All supported MIPS IV CPUs use the XX (CU3) to enable
75 and disable the MIPS IV extensions to the MIPS III ISA.
76 Some other MIPS IV CPUs ignore the bit, so the check here
77 would be too restrictive for them. */
78 if (env->CP0_Status & (1 << CP0St_CU3)) {
79 env->hflags |= MIPS_HFLAG_COP1X;
84 /*****************************************************************************/
85 /* Exceptions processing helpers */
87 void helper_raise_exception_err (uint32_t exception, int error_code)
89 #if 1
90 if (exception < 0x100)
91 qemu_log("%s: %d %d\n", __func__, exception, error_code);
92 #endif
93 env->exception_index = exception;
94 env->error_code = error_code;
95 cpu_loop_exit(env);
98 void helper_raise_exception (uint32_t exception)
100 helper_raise_exception_err(exception, 0);
103 #if !defined(CONFIG_USER_ONLY)
104 static void do_restore_state(uintptr_t pc)
106 TranslationBlock *tb;
108 tb = tb_find_pc (pc);
109 if (tb) {
110 cpu_restore_state(tb, env, pc);
113 #endif
115 #if defined(CONFIG_USER_ONLY)
116 #define HELPER_LD(name, insn, type) \
117 static inline type do_##name(target_ulong addr, int mem_idx) \
119 return (type) insn##_raw(addr); \
121 #else
122 #define HELPER_LD(name, insn, type) \
123 static inline type do_##name(target_ulong addr, int mem_idx) \
125 switch (mem_idx) \
127 case 0: return (type) insn##_kernel(addr); break; \
128 case 1: return (type) insn##_super(addr); break; \
129 default: \
130 case 2: return (type) insn##_user(addr); break; \
133 #endif
134 HELPER_LD(lbu, ldub, uint8_t)
135 HELPER_LD(lw, ldl, int32_t)
136 #ifdef TARGET_MIPS64
137 HELPER_LD(ld, ldq, int64_t)
138 #endif
139 #undef HELPER_LD
141 #if defined(CONFIG_USER_ONLY)
142 #define HELPER_ST(name, insn, type) \
143 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
145 insn##_raw(addr, val); \
147 #else
148 #define HELPER_ST(name, insn, type) \
149 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
151 switch (mem_idx) \
153 case 0: insn##_kernel(addr, val); break; \
154 case 1: insn##_super(addr, val); break; \
155 default: \
156 case 2: insn##_user(addr, val); break; \
159 #endif
160 HELPER_ST(sb, stb, uint8_t)
161 HELPER_ST(sw, stl, uint32_t)
162 #ifdef TARGET_MIPS64
163 HELPER_ST(sd, stq, uint64_t)
164 #endif
165 #undef HELPER_ST
167 target_ulong helper_clo (target_ulong arg1)
169 return clo32(arg1);
172 target_ulong helper_clz (target_ulong arg1)
174 return clz32(arg1);
177 #if defined(TARGET_MIPS64)
178 target_ulong helper_dclo (target_ulong arg1)
180 return clo64(arg1);
183 target_ulong helper_dclz (target_ulong arg1)
185 return clz64(arg1);
187 #endif /* TARGET_MIPS64 */
189 /* 64 bits arithmetic for 32 bits hosts */
190 static inline uint64_t get_HILO (void)
192 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
195 static inline void set_HILO (uint64_t HILO)
197 env->active_tc.LO[0] = (int32_t)HILO;
198 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
201 static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
203 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
204 arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
207 static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
209 arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
210 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
213 /* Multiplication variants of the vr54xx. */
214 target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
216 set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
218 return arg1;
221 target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
223 set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
225 return arg1;
228 target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
230 set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
232 return arg1;
235 target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
237 set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
239 return arg1;
242 target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
244 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
246 return arg1;
249 target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
251 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
253 return arg1;
256 target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
258 set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
260 return arg1;
263 target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
265 set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
267 return arg1;
270 target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
272 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
274 return arg1;
277 target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
279 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
281 return arg1;
284 target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
286 set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
288 return arg1;
291 target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
293 set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
295 return arg1;
298 target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
300 set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
302 return arg1;
305 target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
307 set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
309 return arg1;
312 #ifdef TARGET_MIPS64
313 void helper_dmult (target_ulong arg1, target_ulong arg2)
315 muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
318 void helper_dmultu (target_ulong arg1, target_ulong arg2)
320 mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
322 #endif
324 #ifndef CONFIG_USER_ONLY
326 static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
328 target_phys_addr_t lladdr;
330 lladdr = cpu_mips_translate_address(env, address, rw);
332 if (lladdr == -1LL) {
333 cpu_loop_exit(env);
334 } else {
335 return lladdr;
339 #define HELPER_LD_ATOMIC(name, insn) \
340 target_ulong helper_##name(target_ulong arg, int mem_idx) \
342 env->lladdr = do_translate_address(arg, 0); \
343 env->llval = do_##insn(arg, mem_idx); \
344 return env->llval; \
346 HELPER_LD_ATOMIC(ll, lw)
347 #ifdef TARGET_MIPS64
348 HELPER_LD_ATOMIC(lld, ld)
349 #endif
350 #undef HELPER_LD_ATOMIC
352 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
353 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
355 target_long tmp; \
357 if (arg2 & almask) { \
358 env->CP0_BadVAddr = arg2; \
359 helper_raise_exception(EXCP_AdES); \
361 if (do_translate_address(arg2, 1) == env->lladdr) { \
362 tmp = do_##ld_insn(arg2, mem_idx); \
363 if (tmp == env->llval) { \
364 do_##st_insn(arg2, arg1, mem_idx); \
365 return 1; \
368 return 0; \
370 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
371 #ifdef TARGET_MIPS64
372 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
373 #endif
374 #undef HELPER_ST_ATOMIC
375 #endif
377 #ifdef TARGET_WORDS_BIGENDIAN
378 #define GET_LMASK(v) ((v) & 3)
379 #define GET_OFFSET(addr, offset) (addr + (offset))
380 #else
381 #define GET_LMASK(v) (((v) & 3) ^ 3)
382 #define GET_OFFSET(addr, offset) (addr - (offset))
383 #endif
385 target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
387 target_ulong tmp;
389 tmp = do_lbu(arg2, mem_idx);
390 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
392 if (GET_LMASK(arg2) <= 2) {
393 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
394 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
397 if (GET_LMASK(arg2) <= 1) {
398 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
399 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
402 if (GET_LMASK(arg2) == 0) {
403 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
404 arg1 = (arg1 & 0xFFFFFF00) | tmp;
406 return (int32_t)arg1;
409 target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
411 target_ulong tmp;
413 tmp = do_lbu(arg2, mem_idx);
414 arg1 = (arg1 & 0xFFFFFF00) | tmp;
416 if (GET_LMASK(arg2) >= 1) {
417 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
418 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
421 if (GET_LMASK(arg2) >= 2) {
422 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
423 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
426 if (GET_LMASK(arg2) == 3) {
427 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
428 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
430 return (int32_t)arg1;
433 void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
435 do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
437 if (GET_LMASK(arg2) <= 2)
438 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
440 if (GET_LMASK(arg2) <= 1)
441 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
443 if (GET_LMASK(arg2) == 0)
444 do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
447 void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
449 do_sb(arg2, (uint8_t)arg1, mem_idx);
451 if (GET_LMASK(arg2) >= 1)
452 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
454 if (GET_LMASK(arg2) >= 2)
455 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
457 if (GET_LMASK(arg2) == 3)
458 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
461 #if defined(TARGET_MIPS64)
462 /* "half" load and stores. We must do the memory access inline,
463 or fault handling won't work. */
465 #ifdef TARGET_WORDS_BIGENDIAN
466 #define GET_LMASK64(v) ((v) & 7)
467 #else
468 #define GET_LMASK64(v) (((v) & 7) ^ 7)
469 #endif
471 target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
473 uint64_t tmp;
475 tmp = do_lbu(arg2, mem_idx);
476 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
478 if (GET_LMASK64(arg2) <= 6) {
479 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
480 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
483 if (GET_LMASK64(arg2) <= 5) {
484 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
485 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
488 if (GET_LMASK64(arg2) <= 4) {
489 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
490 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
493 if (GET_LMASK64(arg2) <= 3) {
494 tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
495 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
498 if (GET_LMASK64(arg2) <= 2) {
499 tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
500 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
503 if (GET_LMASK64(arg2) <= 1) {
504 tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
505 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
508 if (GET_LMASK64(arg2) == 0) {
509 tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
510 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
513 return arg1;
516 target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
518 uint64_t tmp;
520 tmp = do_lbu(arg2, mem_idx);
521 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
523 if (GET_LMASK64(arg2) >= 1) {
524 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
525 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
528 if (GET_LMASK64(arg2) >= 2) {
529 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
530 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
533 if (GET_LMASK64(arg2) >= 3) {
534 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
535 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
538 if (GET_LMASK64(arg2) >= 4) {
539 tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
540 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
543 if (GET_LMASK64(arg2) >= 5) {
544 tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
545 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
548 if (GET_LMASK64(arg2) >= 6) {
549 tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
550 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
553 if (GET_LMASK64(arg2) == 7) {
554 tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
555 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
558 return arg1;
561 void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
563 do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
565 if (GET_LMASK64(arg2) <= 6)
566 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
568 if (GET_LMASK64(arg2) <= 5)
569 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
571 if (GET_LMASK64(arg2) <= 4)
572 do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
574 if (GET_LMASK64(arg2) <= 3)
575 do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
577 if (GET_LMASK64(arg2) <= 2)
578 do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
580 if (GET_LMASK64(arg2) <= 1)
581 do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
583 if (GET_LMASK64(arg2) <= 0)
584 do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
587 void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
589 do_sb(arg2, (uint8_t)arg1, mem_idx);
591 if (GET_LMASK64(arg2) >= 1)
592 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
594 if (GET_LMASK64(arg2) >= 2)
595 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
597 if (GET_LMASK64(arg2) >= 3)
598 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
600 if (GET_LMASK64(arg2) >= 4)
601 do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
603 if (GET_LMASK64(arg2) >= 5)
604 do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
606 if (GET_LMASK64(arg2) >= 6)
607 do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
609 if (GET_LMASK64(arg2) == 7)
610 do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
612 #endif /* TARGET_MIPS64 */
614 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
616 void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
618 target_ulong base_reglist = reglist & 0xf;
619 target_ulong do_r31 = reglist & 0x10;
620 #ifdef CONFIG_USER_ONLY
621 #undef ldfun
622 #define ldfun ldl_raw
623 #else
624 uint32_t (*ldfun)(target_ulong);
626 switch (mem_idx)
628 case 0: ldfun = ldl_kernel; break;
629 case 1: ldfun = ldl_super; break;
630 default:
631 case 2: ldfun = ldl_user; break;
633 #endif
635 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
636 target_ulong i;
638 for (i = 0; i < base_reglist; i++) {
639 env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
640 addr += 4;
644 if (do_r31) {
645 env->active_tc.gpr[31] = (target_long) ldfun(addr);
649 void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
651 target_ulong base_reglist = reglist & 0xf;
652 target_ulong do_r31 = reglist & 0x10;
653 #ifdef CONFIG_USER_ONLY
654 #undef stfun
655 #define stfun stl_raw
656 #else
657 void (*stfun)(target_ulong, uint32_t);
659 switch (mem_idx)
661 case 0: stfun = stl_kernel; break;
662 case 1: stfun = stl_super; break;
663 default:
664 case 2: stfun = stl_user; break;
666 #endif
668 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
669 target_ulong i;
671 for (i = 0; i < base_reglist; i++) {
672 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
673 addr += 4;
677 if (do_r31) {
678 stfun(addr, env->active_tc.gpr[31]);
682 #if defined(TARGET_MIPS64)
683 void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
685 target_ulong base_reglist = reglist & 0xf;
686 target_ulong do_r31 = reglist & 0x10;
687 #ifdef CONFIG_USER_ONLY
688 #undef ldfun
689 #define ldfun ldq_raw
690 #else
691 uint64_t (*ldfun)(target_ulong);
693 switch (mem_idx)
695 case 0: ldfun = ldq_kernel; break;
696 case 1: ldfun = ldq_super; break;
697 default:
698 case 2: ldfun = ldq_user; break;
700 #endif
702 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
703 target_ulong i;
705 for (i = 0; i < base_reglist; i++) {
706 env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
707 addr += 8;
711 if (do_r31) {
712 env->active_tc.gpr[31] = ldfun(addr);
716 void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
718 target_ulong base_reglist = reglist & 0xf;
719 target_ulong do_r31 = reglist & 0x10;
720 #ifdef CONFIG_USER_ONLY
721 #undef stfun
722 #define stfun stq_raw
723 #else
724 void (*stfun)(target_ulong, uint64_t);
726 switch (mem_idx)
728 case 0: stfun = stq_kernel; break;
729 case 1: stfun = stq_super; break;
730 default:
731 case 2: stfun = stq_user; break;
733 #endif
735 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
736 target_ulong i;
738 for (i = 0; i < base_reglist; i++) {
739 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
740 addr += 8;
744 if (do_r31) {
745 stfun(addr, env->active_tc.gpr[31]);
748 #endif
750 #ifndef CONFIG_USER_ONLY
751 /* SMP helpers. */
752 static int mips_vpe_is_wfi(CPUMIPSState *c)
754 /* If the VPE is halted but otherwise active, it means it's waiting for
755 an interrupt. */
756 return c->halted && mips_vpe_active(c);
759 static inline void mips_vpe_wake(CPUMIPSState *c)
761 /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
762 because there might be other conditions that state that c should
763 be sleeping. */
764 cpu_interrupt(c, CPU_INTERRUPT_WAKE);
767 static inline void mips_vpe_sleep(CPUMIPSState *c)
769 /* The VPE was shut off, really go to bed.
770 Reset any old _WAKE requests. */
771 c->halted = 1;
772 cpu_reset_interrupt(c, CPU_INTERRUPT_WAKE);
775 static inline void mips_tc_wake(CPUMIPSState *c, int tc)
777 /* FIXME: TC reschedule. */
778 if (mips_vpe_active(c) && !mips_vpe_is_wfi(c)) {
779 mips_vpe_wake(c);
783 static inline void mips_tc_sleep(CPUMIPSState *c, int tc)
785 /* FIXME: TC reschedule. */
786 if (!mips_vpe_active(c)) {
787 mips_vpe_sleep(c);
791 /* tc should point to an int with the value of the global TC index.
792 This function will transform it into a local index within the
793 returned CPUMIPSState.
795 FIXME: This code assumes that all VPEs have the same number of TCs,
796 which depends on runtime setup. Can probably be fixed by
797 walking the list of CPUMIPSStates. */
798 static CPUMIPSState *mips_cpu_map_tc(int *tc)
800 CPUMIPSState *other;
801 int vpe_idx, nr_threads = env->nr_threads;
802 int tc_idx = *tc;
804 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
805 /* Not allowed to address other CPUs. */
806 *tc = env->current_tc;
807 return env;
810 vpe_idx = tc_idx / nr_threads;
811 *tc = tc_idx % nr_threads;
812 other = qemu_get_cpu(vpe_idx);
813 return other ? other : env;
816 /* The per VPE CP0_Status register shares some fields with the per TC
817 CP0_TCStatus registers. These fields are wired to the same registers,
818 so changes to either of them should be reflected on both registers.
820 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
822 These helper call synchronizes the regs for a given cpu. */
824 /* Called for updates to CP0_Status. */
825 static void sync_c0_status(CPUMIPSState *cpu, int tc)
827 int32_t tcstatus, *tcst;
828 uint32_t v = cpu->CP0_Status;
829 uint32_t cu, mx, asid, ksu;
830 uint32_t mask = ((1 << CP0TCSt_TCU3)
831 | (1 << CP0TCSt_TCU2)
832 | (1 << CP0TCSt_TCU1)
833 | (1 << CP0TCSt_TCU0)
834 | (1 << CP0TCSt_TMX)
835 | (3 << CP0TCSt_TKSU)
836 | (0xff << CP0TCSt_TASID));
838 cu = (v >> CP0St_CU0) & 0xf;
839 mx = (v >> CP0St_MX) & 0x1;
840 ksu = (v >> CP0St_KSU) & 0x3;
841 asid = env->CP0_EntryHi & 0xff;
843 tcstatus = cu << CP0TCSt_TCU0;
844 tcstatus |= mx << CP0TCSt_TMX;
845 tcstatus |= ksu << CP0TCSt_TKSU;
846 tcstatus |= asid;
848 if (tc == cpu->current_tc) {
849 tcst = &cpu->active_tc.CP0_TCStatus;
850 } else {
851 tcst = &cpu->tcs[tc].CP0_TCStatus;
854 *tcst &= ~mask;
855 *tcst |= tcstatus;
856 compute_hflags(cpu);
859 /* Called for updates to CP0_TCStatus. */
860 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, target_ulong v)
862 uint32_t status;
863 uint32_t tcu, tmx, tasid, tksu;
864 uint32_t mask = ((1 << CP0St_CU3)
865 | (1 << CP0St_CU2)
866 | (1 << CP0St_CU1)
867 | (1 << CP0St_CU0)
868 | (1 << CP0St_MX)
869 | (3 << CP0St_KSU));
871 tcu = (v >> CP0TCSt_TCU0) & 0xf;
872 tmx = (v >> CP0TCSt_TMX) & 0x1;
873 tasid = v & 0xff;
874 tksu = (v >> CP0TCSt_TKSU) & 0x3;
876 status = tcu << CP0St_CU0;
877 status |= tmx << CP0St_MX;
878 status |= tksu << CP0St_KSU;
880 cpu->CP0_Status &= ~mask;
881 cpu->CP0_Status |= status;
883 /* Sync the TASID with EntryHi. */
884 cpu->CP0_EntryHi &= ~0xff;
885 cpu->CP0_EntryHi = tasid;
887 compute_hflags(cpu);
890 /* Called for updates to CP0_EntryHi. */
891 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
893 int32_t *tcst;
894 uint32_t asid, v = cpu->CP0_EntryHi;
896 asid = v & 0xff;
898 if (tc == cpu->current_tc) {
899 tcst = &cpu->active_tc.CP0_TCStatus;
900 } else {
901 tcst = &cpu->tcs[tc].CP0_TCStatus;
904 *tcst &= ~0xff;
905 *tcst |= asid;
908 /* CP0 helpers */
909 target_ulong helper_mfc0_mvpcontrol (void)
911 return env->mvp->CP0_MVPControl;
914 target_ulong helper_mfc0_mvpconf0 (void)
916 return env->mvp->CP0_MVPConf0;
919 target_ulong helper_mfc0_mvpconf1 (void)
921 return env->mvp->CP0_MVPConf1;
924 target_ulong helper_mfc0_random (void)
926 return (int32_t)cpu_mips_get_random(env);
929 target_ulong helper_mfc0_tcstatus (void)
931 return env->active_tc.CP0_TCStatus;
934 target_ulong helper_mftc0_tcstatus(void)
936 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
937 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
939 if (other_tc == other->current_tc)
940 return other->active_tc.CP0_TCStatus;
941 else
942 return other->tcs[other_tc].CP0_TCStatus;
945 target_ulong helper_mfc0_tcbind (void)
947 return env->active_tc.CP0_TCBind;
950 target_ulong helper_mftc0_tcbind(void)
952 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
953 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
955 if (other_tc == other->current_tc)
956 return other->active_tc.CP0_TCBind;
957 else
958 return other->tcs[other_tc].CP0_TCBind;
961 target_ulong helper_mfc0_tcrestart (void)
963 return env->active_tc.PC;
966 target_ulong helper_mftc0_tcrestart(void)
968 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
969 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
971 if (other_tc == other->current_tc)
972 return other->active_tc.PC;
973 else
974 return other->tcs[other_tc].PC;
977 target_ulong helper_mfc0_tchalt (void)
979 return env->active_tc.CP0_TCHalt;
982 target_ulong helper_mftc0_tchalt(void)
984 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
985 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
987 if (other_tc == other->current_tc)
988 return other->active_tc.CP0_TCHalt;
989 else
990 return other->tcs[other_tc].CP0_TCHalt;
993 target_ulong helper_mfc0_tccontext (void)
995 return env->active_tc.CP0_TCContext;
998 target_ulong helper_mftc0_tccontext(void)
1000 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1001 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1003 if (other_tc == other->current_tc)
1004 return other->active_tc.CP0_TCContext;
1005 else
1006 return other->tcs[other_tc].CP0_TCContext;
1009 target_ulong helper_mfc0_tcschedule (void)
1011 return env->active_tc.CP0_TCSchedule;
1014 target_ulong helper_mftc0_tcschedule(void)
1016 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1017 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1019 if (other_tc == other->current_tc)
1020 return other->active_tc.CP0_TCSchedule;
1021 else
1022 return other->tcs[other_tc].CP0_TCSchedule;
1025 target_ulong helper_mfc0_tcschefback (void)
1027 return env->active_tc.CP0_TCScheFBack;
1030 target_ulong helper_mftc0_tcschefback(void)
1032 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1033 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1035 if (other_tc == other->current_tc)
1036 return other->active_tc.CP0_TCScheFBack;
1037 else
1038 return other->tcs[other_tc].CP0_TCScheFBack;
1041 target_ulong helper_mfc0_count (void)
1043 return (int32_t)cpu_mips_get_count(env);
1046 target_ulong helper_mftc0_entryhi(void)
1048 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1049 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1051 return other->CP0_EntryHi;
1054 target_ulong helper_mftc0_cause(void)
1056 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1057 int32_t tccause;
1058 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1060 if (other_tc == other->current_tc) {
1061 tccause = other->CP0_Cause;
1062 } else {
1063 tccause = other->CP0_Cause;
1066 return tccause;
1069 target_ulong helper_mftc0_status(void)
1071 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1072 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1074 return other->CP0_Status;
1077 target_ulong helper_mfc0_lladdr (void)
1079 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
1082 target_ulong helper_mfc0_watchlo (uint32_t sel)
1084 return (int32_t)env->CP0_WatchLo[sel];
1087 target_ulong helper_mfc0_watchhi (uint32_t sel)
1089 return env->CP0_WatchHi[sel];
1092 target_ulong helper_mfc0_debug (void)
1094 target_ulong t0 = env->CP0_Debug;
1095 if (env->hflags & MIPS_HFLAG_DM)
1096 t0 |= 1 << CP0DB_DM;
1098 return t0;
1101 target_ulong helper_mftc0_debug(void)
1103 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1104 int32_t tcstatus;
1105 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1107 if (other_tc == other->current_tc)
1108 tcstatus = other->active_tc.CP0_Debug_tcstatus;
1109 else
1110 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1112 /* XXX: Might be wrong, check with EJTAG spec. */
1113 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1114 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1117 #if defined(TARGET_MIPS64)
1118 target_ulong helper_dmfc0_tcrestart (void)
1120 return env->active_tc.PC;
1123 target_ulong helper_dmfc0_tchalt (void)
1125 return env->active_tc.CP0_TCHalt;
1128 target_ulong helper_dmfc0_tccontext (void)
1130 return env->active_tc.CP0_TCContext;
1133 target_ulong helper_dmfc0_tcschedule (void)
1135 return env->active_tc.CP0_TCSchedule;
1138 target_ulong helper_dmfc0_tcschefback (void)
1140 return env->active_tc.CP0_TCScheFBack;
1143 target_ulong helper_dmfc0_lladdr (void)
1145 return env->lladdr >> env->CP0_LLAddr_shift;
1148 target_ulong helper_dmfc0_watchlo (uint32_t sel)
1150 return env->CP0_WatchLo[sel];
1152 #endif /* TARGET_MIPS64 */
1154 void helper_mtc0_index (target_ulong arg1)
1156 int num = 1;
1157 unsigned int tmp = env->tlb->nb_tlb;
1159 do {
1160 tmp >>= 1;
1161 num <<= 1;
1162 } while (tmp);
1163 env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1166 void helper_mtc0_mvpcontrol (target_ulong arg1)
1168 uint32_t mask = 0;
1169 uint32_t newval;
1171 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1172 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1173 (1 << CP0MVPCo_EVP);
1174 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1175 mask |= (1 << CP0MVPCo_STLB);
1176 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1178 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1180 env->mvp->CP0_MVPControl = newval;
1183 void helper_mtc0_vpecontrol (target_ulong arg1)
1185 uint32_t mask;
1186 uint32_t newval;
1188 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1189 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1190 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1192 /* Yield scheduler intercept not implemented. */
1193 /* Gating storage scheduler intercept not implemented. */
1195 // TODO: Enable/disable TCs.
1197 env->CP0_VPEControl = newval;
1200 void helper_mttc0_vpecontrol(target_ulong arg1)
1202 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1203 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1204 uint32_t mask;
1205 uint32_t newval;
1207 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1208 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1209 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1211 /* TODO: Enable/disable TCs. */
1213 other->CP0_VPEControl = newval;
1216 target_ulong helper_mftc0_vpecontrol(void)
1218 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1219 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1220 /* FIXME: Mask away return zero on read bits. */
1221 return other->CP0_VPEControl;
1224 target_ulong helper_mftc0_vpeconf0(void)
1226 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1227 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1229 return other->CP0_VPEConf0;
1232 void helper_mtc0_vpeconf0 (target_ulong arg1)
1234 uint32_t mask = 0;
1235 uint32_t newval;
1237 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1238 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1239 mask |= (0xff << CP0VPEC0_XTC);
1240 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1242 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1244 // TODO: TC exclusive handling due to ERL/EXL.
1246 env->CP0_VPEConf0 = newval;
1249 void helper_mttc0_vpeconf0(target_ulong arg1)
1251 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1252 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1253 uint32_t mask = 0;
1254 uint32_t newval;
1256 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1257 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1259 /* TODO: TC exclusive handling due to ERL/EXL. */
1260 other->CP0_VPEConf0 = newval;
1263 void helper_mtc0_vpeconf1 (target_ulong arg1)
1265 uint32_t mask = 0;
1266 uint32_t newval;
1268 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1269 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1270 (0xff << CP0VPEC1_NCP1);
1271 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1273 /* UDI not implemented. */
1274 /* CP2 not implemented. */
1276 // TODO: Handle FPU (CP1) binding.
1278 env->CP0_VPEConf1 = newval;
1281 void helper_mtc0_yqmask (target_ulong arg1)
1283 /* Yield qualifier inputs not implemented. */
1284 env->CP0_YQMask = 0x00000000;
1287 void helper_mtc0_vpeopt (target_ulong arg1)
1289 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1292 void helper_mtc0_entrylo0 (target_ulong arg1)
1294 /* Large physaddr (PABITS) not implemented */
1295 /* 1k pages not implemented */
1296 env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1299 void helper_mtc0_tcstatus (target_ulong arg1)
1301 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1302 uint32_t newval;
1304 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1306 env->active_tc.CP0_TCStatus = newval;
1307 sync_c0_tcstatus(env, env->current_tc, newval);
1310 void helper_mttc0_tcstatus (target_ulong arg1)
1312 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1313 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1315 if (other_tc == other->current_tc)
1316 other->active_tc.CP0_TCStatus = arg1;
1317 else
1318 other->tcs[other_tc].CP0_TCStatus = arg1;
1319 sync_c0_tcstatus(other, other_tc, arg1);
1322 void helper_mtc0_tcbind (target_ulong arg1)
1324 uint32_t mask = (1 << CP0TCBd_TBE);
1325 uint32_t newval;
1327 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1328 mask |= (1 << CP0TCBd_CurVPE);
1329 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1330 env->active_tc.CP0_TCBind = newval;
1333 void helper_mttc0_tcbind (target_ulong arg1)
1335 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1336 uint32_t mask = (1 << CP0TCBd_TBE);
1337 uint32_t newval;
1338 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1340 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1341 mask |= (1 << CP0TCBd_CurVPE);
1342 if (other_tc == other->current_tc) {
1343 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1344 other->active_tc.CP0_TCBind = newval;
1345 } else {
1346 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1347 other->tcs[other_tc].CP0_TCBind = newval;
1351 void helper_mtc0_tcrestart (target_ulong arg1)
1353 env->active_tc.PC = arg1;
1354 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1355 env->lladdr = 0ULL;
1356 /* MIPS16 not implemented. */
1359 void helper_mttc0_tcrestart (target_ulong arg1)
1361 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1362 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1364 if (other_tc == other->current_tc) {
1365 other->active_tc.PC = arg1;
1366 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1367 other->lladdr = 0ULL;
1368 /* MIPS16 not implemented. */
1369 } else {
1370 other->tcs[other_tc].PC = arg1;
1371 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1372 other->lladdr = 0ULL;
1373 /* MIPS16 not implemented. */
1377 void helper_mtc0_tchalt (target_ulong arg1)
1379 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1381 // TODO: Halt TC / Restart (if allocated+active) TC.
1382 if (env->active_tc.CP0_TCHalt & 1) {
1383 mips_tc_sleep(env, env->current_tc);
1384 } else {
1385 mips_tc_wake(env, env->current_tc);
1389 void helper_mttc0_tchalt (target_ulong arg1)
1391 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1392 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1394 // TODO: Halt TC / Restart (if allocated+active) TC.
1396 if (other_tc == other->current_tc)
1397 other->active_tc.CP0_TCHalt = arg1;
1398 else
1399 other->tcs[other_tc].CP0_TCHalt = arg1;
1401 if (arg1 & 1) {
1402 mips_tc_sleep(other, other_tc);
1403 } else {
1404 mips_tc_wake(other, other_tc);
1408 void helper_mtc0_tccontext (target_ulong arg1)
1410 env->active_tc.CP0_TCContext = arg1;
1413 void helper_mttc0_tccontext (target_ulong arg1)
1415 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1416 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1418 if (other_tc == other->current_tc)
1419 other->active_tc.CP0_TCContext = arg1;
1420 else
1421 other->tcs[other_tc].CP0_TCContext = arg1;
1424 void helper_mtc0_tcschedule (target_ulong arg1)
1426 env->active_tc.CP0_TCSchedule = arg1;
1429 void helper_mttc0_tcschedule (target_ulong arg1)
1431 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1432 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1434 if (other_tc == other->current_tc)
1435 other->active_tc.CP0_TCSchedule = arg1;
1436 else
1437 other->tcs[other_tc].CP0_TCSchedule = arg1;
1440 void helper_mtc0_tcschefback (target_ulong arg1)
1442 env->active_tc.CP0_TCScheFBack = arg1;
1445 void helper_mttc0_tcschefback (target_ulong arg1)
1447 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1448 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1450 if (other_tc == other->current_tc)
1451 other->active_tc.CP0_TCScheFBack = arg1;
1452 else
1453 other->tcs[other_tc].CP0_TCScheFBack = arg1;
1456 void helper_mtc0_entrylo1 (target_ulong arg1)
1458 /* Large physaddr (PABITS) not implemented */
1459 /* 1k pages not implemented */
1460 env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1463 void helper_mtc0_context (target_ulong arg1)
1465 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1468 void helper_mtc0_pagemask (target_ulong arg1)
1470 /* 1k pages not implemented */
1471 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1474 void helper_mtc0_pagegrain (target_ulong arg1)
1476 /* SmartMIPS not implemented */
1477 /* Large physaddr (PABITS) not implemented */
1478 /* 1k pages not implemented */
1479 env->CP0_PageGrain = 0;
1482 void helper_mtc0_wired (target_ulong arg1)
1484 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1487 void helper_mtc0_srsconf0 (target_ulong arg1)
1489 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1492 void helper_mtc0_srsconf1 (target_ulong arg1)
1494 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1497 void helper_mtc0_srsconf2 (target_ulong arg1)
1499 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1502 void helper_mtc0_srsconf3 (target_ulong arg1)
1504 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1507 void helper_mtc0_srsconf4 (target_ulong arg1)
1509 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1512 void helper_mtc0_hwrena (target_ulong arg1)
1514 env->CP0_HWREna = arg1 & 0x0000000F;
1517 void helper_mtc0_count (target_ulong arg1)
1519 cpu_mips_store_count(env, arg1);
1522 void helper_mtc0_entryhi (target_ulong arg1)
1524 target_ulong old, val;
1526 /* 1k pages not implemented */
1527 val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1528 #if defined(TARGET_MIPS64)
1529 val &= env->SEGMask;
1530 #endif
1531 old = env->CP0_EntryHi;
1532 env->CP0_EntryHi = val;
1533 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1534 sync_c0_entryhi(env, env->current_tc);
1536 /* If the ASID changes, flush qemu's TLB. */
1537 if ((old & 0xFF) != (val & 0xFF))
1538 cpu_mips_tlb_flush(env, 1);
1541 void helper_mttc0_entryhi(target_ulong arg1)
1543 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1544 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1546 other->CP0_EntryHi = arg1;
1547 sync_c0_entryhi(other, other_tc);
1550 void helper_mtc0_compare (target_ulong arg1)
1552 cpu_mips_store_compare(env, arg1);
1555 void helper_mtc0_status (target_ulong arg1)
1557 uint32_t val, old;
1558 uint32_t mask = env->CP0_Status_rw_bitmask;
1560 val = arg1 & mask;
1561 old = env->CP0_Status;
1562 env->CP0_Status = (env->CP0_Status & ~mask) | val;
1563 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1564 sync_c0_status(env, env->current_tc);
1565 } else {
1566 compute_hflags(env);
1569 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1570 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1571 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1572 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1573 env->CP0_Cause);
1574 switch (env->hflags & MIPS_HFLAG_KSU) {
1575 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1576 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1577 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1578 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1583 void helper_mttc0_status(target_ulong arg1)
1585 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1586 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1588 other->CP0_Status = arg1 & ~0xf1000018;
1589 sync_c0_status(other, other_tc);
1592 void helper_mtc0_intctl (target_ulong arg1)
1594 /* vectored interrupts not implemented, no performance counters. */
1595 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1598 void helper_mtc0_srsctl (target_ulong arg1)
1600 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1601 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1604 static void mtc0_cause(CPUMIPSState *cpu, target_ulong arg1)
1606 uint32_t mask = 0x00C00300;
1607 uint32_t old = cpu->CP0_Cause;
1608 int i;
1610 if (cpu->insn_flags & ISA_MIPS32R2) {
1611 mask |= 1 << CP0Ca_DC;
1614 cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask);
1616 if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) {
1617 if (cpu->CP0_Cause & (1 << CP0Ca_DC)) {
1618 cpu_mips_stop_count(cpu);
1619 } else {
1620 cpu_mips_start_count(cpu);
1624 /* Set/reset software interrupts */
1625 for (i = 0 ; i < 2 ; i++) {
1626 if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1627 cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i)));
1632 void helper_mtc0_cause(target_ulong arg1)
1634 mtc0_cause(env, arg1);
1637 void helper_mttc0_cause(target_ulong arg1)
1639 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1640 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1642 mtc0_cause(other, arg1);
1645 target_ulong helper_mftc0_epc(void)
1647 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1648 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1650 return other->CP0_EPC;
1653 target_ulong helper_mftc0_ebase(void)
1655 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1656 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1658 return other->CP0_EBase;
1661 void helper_mtc0_ebase (target_ulong arg1)
1663 /* vectored interrupts not implemented */
1664 env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1667 void helper_mttc0_ebase(target_ulong arg1)
1669 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1670 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1671 other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1674 target_ulong helper_mftc0_configx(target_ulong idx)
1676 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1677 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1679 switch (idx) {
1680 case 0: return other->CP0_Config0;
1681 case 1: return other->CP0_Config1;
1682 case 2: return other->CP0_Config2;
1683 case 3: return other->CP0_Config3;
1684 /* 4 and 5 are reserved. */
1685 case 6: return other->CP0_Config6;
1686 case 7: return other->CP0_Config7;
1687 default:
1688 break;
1690 return 0;
1693 void helper_mtc0_config0 (target_ulong arg1)
1695 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1698 void helper_mtc0_config2 (target_ulong arg1)
1700 /* tertiary/secondary caches not implemented */
1701 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1704 void helper_mtc0_lladdr (target_ulong arg1)
1706 target_long mask = env->CP0_LLAddr_rw_bitmask;
1707 arg1 = arg1 << env->CP0_LLAddr_shift;
1708 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1711 void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1713 /* Watch exceptions for instructions, data loads, data stores
1714 not implemented. */
1715 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1718 void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1720 env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1721 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1724 void helper_mtc0_xcontext (target_ulong arg1)
1726 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1727 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1730 void helper_mtc0_framemask (target_ulong arg1)
1732 env->CP0_Framemask = arg1; /* XXX */
1735 void helper_mtc0_debug (target_ulong arg1)
1737 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1738 if (arg1 & (1 << CP0DB_DM))
1739 env->hflags |= MIPS_HFLAG_DM;
1740 else
1741 env->hflags &= ~MIPS_HFLAG_DM;
1744 void helper_mttc0_debug(target_ulong arg1)
1746 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1747 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1748 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1750 /* XXX: Might be wrong, check with EJTAG spec. */
1751 if (other_tc == other->current_tc)
1752 other->active_tc.CP0_Debug_tcstatus = val;
1753 else
1754 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1755 other->CP0_Debug = (other->CP0_Debug &
1756 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1757 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1760 void helper_mtc0_performance0 (target_ulong arg1)
1762 env->CP0_Performance0 = arg1 & 0x000007ff;
1765 void helper_mtc0_taglo (target_ulong arg1)
1767 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1770 void helper_mtc0_datalo (target_ulong arg1)
1772 env->CP0_DataLo = arg1; /* XXX */
1775 void helper_mtc0_taghi (target_ulong arg1)
1777 env->CP0_TagHi = arg1; /* XXX */
1780 void helper_mtc0_datahi (target_ulong arg1)
1782 env->CP0_DataHi = arg1; /* XXX */
1785 /* MIPS MT functions */
1786 target_ulong helper_mftgpr(uint32_t sel)
1788 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1789 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1791 if (other_tc == other->current_tc)
1792 return other->active_tc.gpr[sel];
1793 else
1794 return other->tcs[other_tc].gpr[sel];
1797 target_ulong helper_mftlo(uint32_t sel)
1799 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1800 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1802 if (other_tc == other->current_tc)
1803 return other->active_tc.LO[sel];
1804 else
1805 return other->tcs[other_tc].LO[sel];
1808 target_ulong helper_mfthi(uint32_t sel)
1810 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1811 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1813 if (other_tc == other->current_tc)
1814 return other->active_tc.HI[sel];
1815 else
1816 return other->tcs[other_tc].HI[sel];
1819 target_ulong helper_mftacx(uint32_t sel)
1821 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1822 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1824 if (other_tc == other->current_tc)
1825 return other->active_tc.ACX[sel];
1826 else
1827 return other->tcs[other_tc].ACX[sel];
1830 target_ulong helper_mftdsp(void)
1832 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1833 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1835 if (other_tc == other->current_tc)
1836 return other->active_tc.DSPControl;
1837 else
1838 return other->tcs[other_tc].DSPControl;
1841 void helper_mttgpr(target_ulong arg1, uint32_t sel)
1843 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1844 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1846 if (other_tc == other->current_tc)
1847 other->active_tc.gpr[sel] = arg1;
1848 else
1849 other->tcs[other_tc].gpr[sel] = arg1;
1852 void helper_mttlo(target_ulong arg1, uint32_t sel)
1854 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1855 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1857 if (other_tc == other->current_tc)
1858 other->active_tc.LO[sel] = arg1;
1859 else
1860 other->tcs[other_tc].LO[sel] = arg1;
1863 void helper_mtthi(target_ulong arg1, uint32_t sel)
1865 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1866 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1868 if (other_tc == other->current_tc)
1869 other->active_tc.HI[sel] = arg1;
1870 else
1871 other->tcs[other_tc].HI[sel] = arg1;
1874 void helper_mttacx(target_ulong arg1, uint32_t sel)
1876 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1877 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1879 if (other_tc == other->current_tc)
1880 other->active_tc.ACX[sel] = arg1;
1881 else
1882 other->tcs[other_tc].ACX[sel] = arg1;
1885 void helper_mttdsp(target_ulong arg1)
1887 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1888 CPUMIPSState *other = mips_cpu_map_tc(&other_tc);
1890 if (other_tc == other->current_tc)
1891 other->active_tc.DSPControl = arg1;
1892 else
1893 other->tcs[other_tc].DSPControl = arg1;
1896 /* MIPS MT functions */
1897 target_ulong helper_dmt(void)
1899 // TODO
1900 return 0;
1903 target_ulong helper_emt(void)
1905 // TODO
1906 return 0;
1909 target_ulong helper_dvpe(void)
1911 CPUMIPSState *other_cpu = first_cpu;
1912 target_ulong prev = env->mvp->CP0_MVPControl;
1914 do {
1915 /* Turn off all VPEs except the one executing the dvpe. */
1916 if (other_cpu != env) {
1917 other_cpu->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1918 mips_vpe_sleep(other_cpu);
1920 other_cpu = other_cpu->next_cpu;
1921 } while (other_cpu);
1922 return prev;
1925 target_ulong helper_evpe(void)
1927 CPUMIPSState *other_cpu = first_cpu;
1928 target_ulong prev = env->mvp->CP0_MVPControl;
1930 do {
1931 if (other_cpu != env
1932 /* If the VPE is WFI, don't disturb its sleep. */
1933 && !mips_vpe_is_wfi(other_cpu)) {
1934 /* Enable the VPE. */
1935 other_cpu->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1936 mips_vpe_wake(other_cpu); /* And wake it up. */
1938 other_cpu = other_cpu->next_cpu;
1939 } while (other_cpu);
1940 return prev;
1942 #endif /* !CONFIG_USER_ONLY */
1944 void helper_fork(target_ulong arg1, target_ulong arg2)
1946 // arg1 = rt, arg2 = rs
1947 arg1 = 0;
1948 // TODO: store to TC register
1951 target_ulong helper_yield(target_ulong arg)
1953 target_long arg1 = arg;
1955 if (arg1 < 0) {
1956 /* No scheduling policy implemented. */
1957 if (arg1 != -2) {
1958 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1959 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1960 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1961 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1962 helper_raise_exception(EXCP_THREAD);
1965 } else if (arg1 == 0) {
1966 if (0 /* TODO: TC underflow */) {
1967 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1968 helper_raise_exception(EXCP_THREAD);
1969 } else {
1970 // TODO: Deallocate TC
1972 } else if (arg1 > 0) {
1973 /* Yield qualifier inputs not implemented. */
1974 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1975 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1976 helper_raise_exception(EXCP_THREAD);
1978 return env->CP0_YQMask;
1981 #ifndef CONFIG_USER_ONLY
1982 /* TLB management */
1983 static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
1985 /* Flush qemu's TLB and discard all shadowed entries. */
1986 tlb_flush (env, flush_global);
1987 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1990 static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first)
1992 /* Discard entries from env->tlb[first] onwards. */
1993 while (env->tlb->tlb_in_use > first) {
1994 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1998 static void r4k_fill_tlb (int idx)
2000 r4k_tlb_t *tlb;
2002 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
2003 tlb = &env->tlb->mmu.r4k.tlb[idx];
2004 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
2005 #if defined(TARGET_MIPS64)
2006 tlb->VPN &= env->SEGMask;
2007 #endif
2008 tlb->ASID = env->CP0_EntryHi & 0xFF;
2009 tlb->PageMask = env->CP0_PageMask;
2010 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
2011 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
2012 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
2013 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
2014 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
2015 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
2016 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
2017 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
2018 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
2021 void r4k_helper_tlbwi (void)
2023 int idx;
2025 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2027 /* Discard cached TLB entries. We could avoid doing this if the
2028 tlbwi is just upgrading access permissions on the current entry;
2029 that might be a further win. */
2030 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
2032 r4k_invalidate_tlb(env, idx, 0);
2033 r4k_fill_tlb(idx);
2036 void r4k_helper_tlbwr (void)
2038 int r = cpu_mips_get_random(env);
2040 r4k_invalidate_tlb(env, r, 1);
2041 r4k_fill_tlb(r);
2044 void r4k_helper_tlbp (void)
2046 r4k_tlb_t *tlb;
2047 target_ulong mask;
2048 target_ulong tag;
2049 target_ulong VPN;
2050 uint8_t ASID;
2051 int i;
2053 ASID = env->CP0_EntryHi & 0xFF;
2054 for (i = 0; i < env->tlb->nb_tlb; i++) {
2055 tlb = &env->tlb->mmu.r4k.tlb[i];
2056 /* 1k pages are not supported. */
2057 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2058 tag = env->CP0_EntryHi & ~mask;
2059 VPN = tlb->VPN & ~mask;
2060 /* Check ASID, virtual page number & size */
2061 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2062 /* TLB match */
2063 env->CP0_Index = i;
2064 break;
2067 if (i == env->tlb->nb_tlb) {
2068 /* No match. Discard any shadow entries, if any of them match. */
2069 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
2070 tlb = &env->tlb->mmu.r4k.tlb[i];
2071 /* 1k pages are not supported. */
2072 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2073 tag = env->CP0_EntryHi & ~mask;
2074 VPN = tlb->VPN & ~mask;
2075 /* Check ASID, virtual page number & size */
2076 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2077 r4k_mips_tlb_flush_extra (env, i);
2078 break;
2082 env->CP0_Index |= 0x80000000;
2086 void r4k_helper_tlbr (void)
2088 r4k_tlb_t *tlb;
2089 uint8_t ASID;
2090 int idx;
2092 ASID = env->CP0_EntryHi & 0xFF;
2093 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2094 tlb = &env->tlb->mmu.r4k.tlb[idx];
2096 /* If this will change the current ASID, flush qemu's TLB. */
2097 if (ASID != tlb->ASID)
2098 cpu_mips_tlb_flush (env, 1);
2100 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2102 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2103 env->CP0_PageMask = tlb->PageMask;
2104 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2105 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
2106 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2107 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
2110 void helper_tlbwi(void)
2112 env->tlb->helper_tlbwi();
2115 void helper_tlbwr(void)
2117 env->tlb->helper_tlbwr();
2120 void helper_tlbp(void)
2122 env->tlb->helper_tlbp();
2125 void helper_tlbr(void)
2127 env->tlb->helper_tlbr();
2130 /* Specials */
2131 target_ulong helper_di (void)
2133 target_ulong t0 = env->CP0_Status;
2135 env->CP0_Status = t0 & ~(1 << CP0St_IE);
2136 return t0;
2139 target_ulong helper_ei (void)
2141 target_ulong t0 = env->CP0_Status;
2143 env->CP0_Status = t0 | (1 << CP0St_IE);
2144 return t0;
2147 static void debug_pre_eret (void)
2149 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2150 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2151 env->active_tc.PC, env->CP0_EPC);
2152 if (env->CP0_Status & (1 << CP0St_ERL))
2153 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2154 if (env->hflags & MIPS_HFLAG_DM)
2155 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2156 qemu_log("\n");
2160 static void debug_post_eret (void)
2162 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2163 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2164 env->active_tc.PC, env->CP0_EPC);
2165 if (env->CP0_Status & (1 << CP0St_ERL))
2166 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2167 if (env->hflags & MIPS_HFLAG_DM)
2168 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2169 switch (env->hflags & MIPS_HFLAG_KSU) {
2170 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2171 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2172 case MIPS_HFLAG_KM: qemu_log("\n"); break;
2173 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
2178 static void set_pc (target_ulong error_pc)
2180 env->active_tc.PC = error_pc & ~(target_ulong)1;
2181 if (error_pc & 1) {
2182 env->hflags |= MIPS_HFLAG_M16;
2183 } else {
2184 env->hflags &= ~(MIPS_HFLAG_M16);
2188 void helper_eret (void)
2190 debug_pre_eret();
2191 if (env->CP0_Status & (1 << CP0St_ERL)) {
2192 set_pc(env->CP0_ErrorEPC);
2193 env->CP0_Status &= ~(1 << CP0St_ERL);
2194 } else {
2195 set_pc(env->CP0_EPC);
2196 env->CP0_Status &= ~(1 << CP0St_EXL);
2198 compute_hflags(env);
2199 debug_post_eret();
2200 env->lladdr = 1;
2203 void helper_deret (void)
2205 debug_pre_eret();
2206 set_pc(env->CP0_DEPC);
2208 env->hflags &= MIPS_HFLAG_DM;
2209 compute_hflags(env);
2210 debug_post_eret();
2211 env->lladdr = 1;
2213 #endif /* !CONFIG_USER_ONLY */
2215 target_ulong helper_rdhwr_cpunum(void)
2217 if ((env->hflags & MIPS_HFLAG_CP0) ||
2218 (env->CP0_HWREna & (1 << 0)))
2219 return env->CP0_EBase & 0x3ff;
2220 else
2221 helper_raise_exception(EXCP_RI);
2223 return 0;
2226 target_ulong helper_rdhwr_synci_step(void)
2228 if ((env->hflags & MIPS_HFLAG_CP0) ||
2229 (env->CP0_HWREna & (1 << 1)))
2230 return env->SYNCI_Step;
2231 else
2232 helper_raise_exception(EXCP_RI);
2234 return 0;
2237 target_ulong helper_rdhwr_cc(void)
2239 if ((env->hflags & MIPS_HFLAG_CP0) ||
2240 (env->CP0_HWREna & (1 << 2)))
2241 return env->CP0_Count;
2242 else
2243 helper_raise_exception(EXCP_RI);
2245 return 0;
2248 target_ulong helper_rdhwr_ccres(void)
2250 if ((env->hflags & MIPS_HFLAG_CP0) ||
2251 (env->CP0_HWREna & (1 << 3)))
2252 return env->CCRes;
2253 else
2254 helper_raise_exception(EXCP_RI);
2256 return 0;
2259 void helper_pmon (int function)
2261 function /= 2;
2262 switch (function) {
2263 case 2: /* TODO: char inbyte(int waitflag); */
2264 if (env->active_tc.gpr[4] == 0)
2265 env->active_tc.gpr[2] = -1;
2266 /* Fall through */
2267 case 11: /* TODO: char inbyte (void); */
2268 env->active_tc.gpr[2] = -1;
2269 break;
2270 case 3:
2271 case 12:
2272 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2273 break;
2274 case 17:
2275 break;
2276 case 158:
2278 unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
2279 printf("%s", fmt);
2281 break;
2285 void helper_wait (void)
2287 env->halted = 1;
2288 cpu_reset_interrupt(env, CPU_INTERRUPT_WAKE);
2289 helper_raise_exception(EXCP_HLT);
2292 #if !defined(CONFIG_USER_ONLY)
2294 static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
2295 int is_user, uintptr_t retaddr);
2297 #define MMUSUFFIX _mmu
2298 #define ALIGNED_ONLY
2300 #define SHIFT 0
2301 #include "softmmu_template.h"
2303 #define SHIFT 1
2304 #include "softmmu_template.h"
2306 #define SHIFT 2
2307 #include "softmmu_template.h"
2309 #define SHIFT 3
2310 #include "softmmu_template.h"
2312 static void do_unaligned_access(target_ulong addr, int is_write,
2313 int is_user, uintptr_t retaddr)
2315 env->CP0_BadVAddr = addr;
2316 do_restore_state (retaddr);
2317 helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2320 void tlb_fill(CPUMIPSState *env1, target_ulong addr, int is_write, int mmu_idx,
2321 uintptr_t retaddr)
2323 TranslationBlock *tb;
2324 CPUMIPSState *saved_env;
2325 int ret;
2327 saved_env = env;
2328 env = env1;
2329 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2330 if (ret) {
2331 if (retaddr) {
2332 /* now we have a real cpu fault */
2333 tb = tb_find_pc(retaddr);
2334 if (tb) {
2335 /* the PC is inside the translated code. It means that we have
2336 a virtual CPU fault */
2337 cpu_restore_state(tb, env, retaddr);
2340 helper_raise_exception_err(env->exception_index, env->error_code);
2342 env = saved_env;
2345 void cpu_unassigned_access(CPUMIPSState *env1, target_phys_addr_t addr,
2346 int is_write, int is_exec, int unused, int size)
2348 env = env1;
2350 if (is_exec)
2351 helper_raise_exception(EXCP_IBE);
2352 else
2353 helper_raise_exception(EXCP_DBE);
2355 #endif /* !CONFIG_USER_ONLY */
2357 /* Complex FPU operations which may need stack space. */
2359 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
2360 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2361 #define FLOAT_TWO32 make_float32(1 << 30)
2362 #define FLOAT_TWO64 make_float64(1ULL << 62)
2363 #define FLOAT_QNAN32 0x7fbfffff
2364 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2365 #define FLOAT_SNAN32 0x7fffffff
2366 #define FLOAT_SNAN64 0x7fffffffffffffffULL
2368 /* convert MIPS rounding mode in FCR31 to IEEE library */
2369 static unsigned int ieee_rm[] = {
2370 float_round_nearest_even,
2371 float_round_to_zero,
2372 float_round_up,
2373 float_round_down
2376 #define RESTORE_ROUNDING_MODE \
2377 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2379 #define RESTORE_FLUSH_MODE \
2380 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2382 target_ulong helper_cfc1 (uint32_t reg)
2384 target_ulong arg1;
2386 switch (reg) {
2387 case 0:
2388 arg1 = (int32_t)env->active_fpu.fcr0;
2389 break;
2390 case 25:
2391 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2392 break;
2393 case 26:
2394 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2395 break;
2396 case 28:
2397 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2398 break;
2399 default:
2400 arg1 = (int32_t)env->active_fpu.fcr31;
2401 break;
2404 return arg1;
2407 void helper_ctc1 (target_ulong arg1, uint32_t reg)
2409 switch(reg) {
2410 case 25:
2411 if (arg1 & 0xffffff00)
2412 return;
2413 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2414 ((arg1 & 0x1) << 23);
2415 break;
2416 case 26:
2417 if (arg1 & 0x007c0000)
2418 return;
2419 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2420 break;
2421 case 28:
2422 if (arg1 & 0x007c0000)
2423 return;
2424 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2425 ((arg1 & 0x4) << 22);
2426 break;
2427 case 31:
2428 if (arg1 & 0x007c0000)
2429 return;
2430 env->active_fpu.fcr31 = arg1;
2431 break;
2432 default:
2433 return;
2435 /* set rounding mode */
2436 RESTORE_ROUNDING_MODE;
2437 /* set flush-to-zero mode */
2438 RESTORE_FLUSH_MODE;
2439 set_float_exception_flags(0, &env->active_fpu.fp_status);
2440 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2441 helper_raise_exception(EXCP_FPE);
2444 static inline int ieee_ex_to_mips(int xcpt)
2446 int ret = 0;
2447 if (xcpt) {
2448 if (xcpt & float_flag_invalid) {
2449 ret |= FP_INVALID;
2451 if (xcpt & float_flag_overflow) {
2452 ret |= FP_OVERFLOW;
2454 if (xcpt & float_flag_underflow) {
2455 ret |= FP_UNDERFLOW;
2457 if (xcpt & float_flag_divbyzero) {
2458 ret |= FP_DIV0;
2460 if (xcpt & float_flag_inexact) {
2461 ret |= FP_INEXACT;
2464 return ret;
2467 static inline void update_fcr31(void)
2469 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2471 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2472 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2473 helper_raise_exception(EXCP_FPE);
2474 else
2475 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2478 /* Float support.
2479 Single precition routines have a "s" suffix, double precision a
2480 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2481 paired single lower "pl", paired single upper "pu". */
2483 /* unary operations, modifying fp status */
2484 uint64_t helper_float_sqrt_d(uint64_t fdt0)
2486 return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2489 uint32_t helper_float_sqrt_s(uint32_t fst0)
2491 return float32_sqrt(fst0, &env->active_fpu.fp_status);
2494 uint64_t helper_float_cvtd_s(uint32_t fst0)
2496 uint64_t fdt2;
2498 set_float_exception_flags(0, &env->active_fpu.fp_status);
2499 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2500 update_fcr31();
2501 return fdt2;
2504 uint64_t helper_float_cvtd_w(uint32_t wt0)
2506 uint64_t fdt2;
2508 set_float_exception_flags(0, &env->active_fpu.fp_status);
2509 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2510 update_fcr31();
2511 return fdt2;
2514 uint64_t helper_float_cvtd_l(uint64_t dt0)
2516 uint64_t fdt2;
2518 set_float_exception_flags(0, &env->active_fpu.fp_status);
2519 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2520 update_fcr31();
2521 return fdt2;
2524 uint64_t helper_float_cvtl_d(uint64_t fdt0)
2526 uint64_t dt2;
2528 set_float_exception_flags(0, &env->active_fpu.fp_status);
2529 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2530 update_fcr31();
2531 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2532 dt2 = FLOAT_SNAN64;
2533 return dt2;
2536 uint64_t helper_float_cvtl_s(uint32_t fst0)
2538 uint64_t dt2;
2540 set_float_exception_flags(0, &env->active_fpu.fp_status);
2541 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2542 update_fcr31();
2543 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2544 dt2 = FLOAT_SNAN64;
2545 return dt2;
2548 uint64_t helper_float_cvtps_pw(uint64_t dt0)
2550 uint32_t fst2;
2551 uint32_t fsth2;
2553 set_float_exception_flags(0, &env->active_fpu.fp_status);
2554 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2555 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2556 update_fcr31();
2557 return ((uint64_t)fsth2 << 32) | fst2;
2560 uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2562 uint32_t wt2;
2563 uint32_t wth2;
2565 set_float_exception_flags(0, &env->active_fpu.fp_status);
2566 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2567 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2568 update_fcr31();
2569 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2570 wt2 = FLOAT_SNAN32;
2571 wth2 = FLOAT_SNAN32;
2573 return ((uint64_t)wth2 << 32) | wt2;
2576 uint32_t helper_float_cvts_d(uint64_t fdt0)
2578 uint32_t fst2;
2580 set_float_exception_flags(0, &env->active_fpu.fp_status);
2581 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2582 update_fcr31();
2583 return fst2;
2586 uint32_t helper_float_cvts_w(uint32_t wt0)
2588 uint32_t fst2;
2590 set_float_exception_flags(0, &env->active_fpu.fp_status);
2591 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2592 update_fcr31();
2593 return fst2;
2596 uint32_t helper_float_cvts_l(uint64_t dt0)
2598 uint32_t fst2;
2600 set_float_exception_flags(0, &env->active_fpu.fp_status);
2601 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2602 update_fcr31();
2603 return fst2;
2606 uint32_t helper_float_cvts_pl(uint32_t wt0)
2608 uint32_t wt2;
2610 set_float_exception_flags(0, &env->active_fpu.fp_status);
2611 wt2 = wt0;
2612 update_fcr31();
2613 return wt2;
2616 uint32_t helper_float_cvts_pu(uint32_t wth0)
2618 uint32_t wt2;
2620 set_float_exception_flags(0, &env->active_fpu.fp_status);
2621 wt2 = wth0;
2622 update_fcr31();
2623 return wt2;
2626 uint32_t helper_float_cvtw_s(uint32_t fst0)
2628 uint32_t wt2;
2630 set_float_exception_flags(0, &env->active_fpu.fp_status);
2631 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2632 update_fcr31();
2633 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2634 wt2 = FLOAT_SNAN32;
2635 return wt2;
2638 uint32_t helper_float_cvtw_d(uint64_t fdt0)
2640 uint32_t wt2;
2642 set_float_exception_flags(0, &env->active_fpu.fp_status);
2643 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2644 update_fcr31();
2645 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2646 wt2 = FLOAT_SNAN32;
2647 return wt2;
2650 uint64_t helper_float_roundl_d(uint64_t fdt0)
2652 uint64_t dt2;
2654 set_float_exception_flags(0, &env->active_fpu.fp_status);
2655 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2656 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2657 RESTORE_ROUNDING_MODE;
2658 update_fcr31();
2659 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2660 dt2 = FLOAT_SNAN64;
2661 return dt2;
2664 uint64_t helper_float_roundl_s(uint32_t fst0)
2666 uint64_t dt2;
2668 set_float_exception_flags(0, &env->active_fpu.fp_status);
2669 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2670 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2671 RESTORE_ROUNDING_MODE;
2672 update_fcr31();
2673 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2674 dt2 = FLOAT_SNAN64;
2675 return dt2;
2678 uint32_t helper_float_roundw_d(uint64_t fdt0)
2680 uint32_t wt2;
2682 set_float_exception_flags(0, &env->active_fpu.fp_status);
2683 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2684 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2685 RESTORE_ROUNDING_MODE;
2686 update_fcr31();
2687 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2688 wt2 = FLOAT_SNAN32;
2689 return wt2;
2692 uint32_t helper_float_roundw_s(uint32_t fst0)
2694 uint32_t wt2;
2696 set_float_exception_flags(0, &env->active_fpu.fp_status);
2697 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2698 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2699 RESTORE_ROUNDING_MODE;
2700 update_fcr31();
2701 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2702 wt2 = FLOAT_SNAN32;
2703 return wt2;
2706 uint64_t helper_float_truncl_d(uint64_t fdt0)
2708 uint64_t dt2;
2710 set_float_exception_flags(0, &env->active_fpu.fp_status);
2711 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2712 update_fcr31();
2713 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2714 dt2 = FLOAT_SNAN64;
2715 return dt2;
2718 uint64_t helper_float_truncl_s(uint32_t fst0)
2720 uint64_t dt2;
2722 set_float_exception_flags(0, &env->active_fpu.fp_status);
2723 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2724 update_fcr31();
2725 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2726 dt2 = FLOAT_SNAN64;
2727 return dt2;
2730 uint32_t helper_float_truncw_d(uint64_t fdt0)
2732 uint32_t wt2;
2734 set_float_exception_flags(0, &env->active_fpu.fp_status);
2735 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2736 update_fcr31();
2737 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2738 wt2 = FLOAT_SNAN32;
2739 return wt2;
2742 uint32_t helper_float_truncw_s(uint32_t fst0)
2744 uint32_t wt2;
2746 set_float_exception_flags(0, &env->active_fpu.fp_status);
2747 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2748 update_fcr31();
2749 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2750 wt2 = FLOAT_SNAN32;
2751 return wt2;
2754 uint64_t helper_float_ceill_d(uint64_t fdt0)
2756 uint64_t dt2;
2758 set_float_exception_flags(0, &env->active_fpu.fp_status);
2759 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2760 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2761 RESTORE_ROUNDING_MODE;
2762 update_fcr31();
2763 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2764 dt2 = FLOAT_SNAN64;
2765 return dt2;
2768 uint64_t helper_float_ceill_s(uint32_t fst0)
2770 uint64_t dt2;
2772 set_float_exception_flags(0, &env->active_fpu.fp_status);
2773 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2774 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2775 RESTORE_ROUNDING_MODE;
2776 update_fcr31();
2777 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2778 dt2 = FLOAT_SNAN64;
2779 return dt2;
2782 uint32_t helper_float_ceilw_d(uint64_t fdt0)
2784 uint32_t wt2;
2786 set_float_exception_flags(0, &env->active_fpu.fp_status);
2787 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2788 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2789 RESTORE_ROUNDING_MODE;
2790 update_fcr31();
2791 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2792 wt2 = FLOAT_SNAN32;
2793 return wt2;
2796 uint32_t helper_float_ceilw_s(uint32_t fst0)
2798 uint32_t wt2;
2800 set_float_exception_flags(0, &env->active_fpu.fp_status);
2801 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2802 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2803 RESTORE_ROUNDING_MODE;
2804 update_fcr31();
2805 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2806 wt2 = FLOAT_SNAN32;
2807 return wt2;
2810 uint64_t helper_float_floorl_d(uint64_t fdt0)
2812 uint64_t dt2;
2814 set_float_exception_flags(0, &env->active_fpu.fp_status);
2815 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2816 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2817 RESTORE_ROUNDING_MODE;
2818 update_fcr31();
2819 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2820 dt2 = FLOAT_SNAN64;
2821 return dt2;
2824 uint64_t helper_float_floorl_s(uint32_t fst0)
2826 uint64_t dt2;
2828 set_float_exception_flags(0, &env->active_fpu.fp_status);
2829 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2830 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2831 RESTORE_ROUNDING_MODE;
2832 update_fcr31();
2833 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2834 dt2 = FLOAT_SNAN64;
2835 return dt2;
2838 uint32_t helper_float_floorw_d(uint64_t fdt0)
2840 uint32_t wt2;
2842 set_float_exception_flags(0, &env->active_fpu.fp_status);
2843 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2844 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2845 RESTORE_ROUNDING_MODE;
2846 update_fcr31();
2847 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2848 wt2 = FLOAT_SNAN32;
2849 return wt2;
2852 uint32_t helper_float_floorw_s(uint32_t fst0)
2854 uint32_t wt2;
2856 set_float_exception_flags(0, &env->active_fpu.fp_status);
2857 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2858 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2859 RESTORE_ROUNDING_MODE;
2860 update_fcr31();
2861 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2862 wt2 = FLOAT_SNAN32;
2863 return wt2;
2866 /* unary operations, not modifying fp status */
2867 #define FLOAT_UNOP(name) \
2868 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2870 return float64_ ## name(fdt0); \
2872 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2874 return float32_ ## name(fst0); \
2876 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2878 uint32_t wt0; \
2879 uint32_t wth0; \
2881 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2882 wth0 = float32_ ## name(fdt0 >> 32); \
2883 return ((uint64_t)wth0 << 32) | wt0; \
2885 FLOAT_UNOP(abs)
2886 FLOAT_UNOP(chs)
2887 #undef FLOAT_UNOP
2889 /* MIPS specific unary operations */
2890 uint64_t helper_float_recip_d(uint64_t fdt0)
2892 uint64_t fdt2;
2894 set_float_exception_flags(0, &env->active_fpu.fp_status);
2895 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2896 update_fcr31();
2897 return fdt2;
2900 uint32_t helper_float_recip_s(uint32_t fst0)
2902 uint32_t fst2;
2904 set_float_exception_flags(0, &env->active_fpu.fp_status);
2905 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2906 update_fcr31();
2907 return fst2;
2910 uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2912 uint64_t fdt2;
2914 set_float_exception_flags(0, &env->active_fpu.fp_status);
2915 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2916 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2917 update_fcr31();
2918 return fdt2;
2921 uint32_t helper_float_rsqrt_s(uint32_t fst0)
2923 uint32_t fst2;
2925 set_float_exception_flags(0, &env->active_fpu.fp_status);
2926 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2927 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2928 update_fcr31();
2929 return fst2;
2932 uint64_t helper_float_recip1_d(uint64_t fdt0)
2934 uint64_t fdt2;
2936 set_float_exception_flags(0, &env->active_fpu.fp_status);
2937 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2938 update_fcr31();
2939 return fdt2;
2942 uint32_t helper_float_recip1_s(uint32_t fst0)
2944 uint32_t fst2;
2946 set_float_exception_flags(0, &env->active_fpu.fp_status);
2947 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2948 update_fcr31();
2949 return fst2;
2952 uint64_t helper_float_recip1_ps(uint64_t fdt0)
2954 uint32_t fst2;
2955 uint32_t fsth2;
2957 set_float_exception_flags(0, &env->active_fpu.fp_status);
2958 fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2959 fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2960 update_fcr31();
2961 return ((uint64_t)fsth2 << 32) | fst2;
2964 uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2966 uint64_t fdt2;
2968 set_float_exception_flags(0, &env->active_fpu.fp_status);
2969 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2970 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2971 update_fcr31();
2972 return fdt2;
2975 uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2977 uint32_t fst2;
2979 set_float_exception_flags(0, &env->active_fpu.fp_status);
2980 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2981 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2982 update_fcr31();
2983 return fst2;
2986 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2988 uint32_t fst2;
2989 uint32_t fsth2;
2991 set_float_exception_flags(0, &env->active_fpu.fp_status);
2992 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2993 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2994 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2995 fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2996 update_fcr31();
2997 return ((uint64_t)fsth2 << 32) | fst2;
3000 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
3002 /* binary operations */
3003 #define FLOAT_BINOP(name) \
3004 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
3006 uint64_t dt2; \
3008 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3009 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
3010 update_fcr31(); \
3011 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3012 dt2 = FLOAT_QNAN64; \
3013 return dt2; \
3016 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
3018 uint32_t wt2; \
3020 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3021 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3022 update_fcr31(); \
3023 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3024 wt2 = FLOAT_QNAN32; \
3025 return wt2; \
3028 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
3030 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3031 uint32_t fsth0 = fdt0 >> 32; \
3032 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3033 uint32_t fsth1 = fdt1 >> 32; \
3034 uint32_t wt2; \
3035 uint32_t wth2; \
3037 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3038 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3039 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
3040 update_fcr31(); \
3041 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
3042 wt2 = FLOAT_QNAN32; \
3043 wth2 = FLOAT_QNAN32; \
3045 return ((uint64_t)wth2 << 32) | wt2; \
3048 FLOAT_BINOP(add)
3049 FLOAT_BINOP(sub)
3050 FLOAT_BINOP(mul)
3051 FLOAT_BINOP(div)
3052 #undef FLOAT_BINOP
3054 /* ternary operations */
3055 #define FLOAT_TERNOP(name1, name2) \
3056 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3057 uint64_t fdt2) \
3059 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3060 return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3063 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3064 uint32_t fst2) \
3066 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3067 return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3070 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
3071 uint64_t fdt2) \
3073 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3074 uint32_t fsth0 = fdt0 >> 32; \
3075 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3076 uint32_t fsth1 = fdt1 >> 32; \
3077 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3078 uint32_t fsth2 = fdt2 >> 32; \
3080 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3081 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3082 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3083 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3084 return ((uint64_t)fsth2 << 32) | fst2; \
3087 FLOAT_TERNOP(mul, add)
3088 FLOAT_TERNOP(mul, sub)
3089 #undef FLOAT_TERNOP
3091 /* negated ternary operations */
3092 #define FLOAT_NTERNOP(name1, name2) \
3093 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3094 uint64_t fdt2) \
3096 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3097 fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3098 return float64_chs(fdt2); \
3101 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3102 uint32_t fst2) \
3104 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3105 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3106 return float32_chs(fst2); \
3109 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
3110 uint64_t fdt2) \
3112 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3113 uint32_t fsth0 = fdt0 >> 32; \
3114 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3115 uint32_t fsth1 = fdt1 >> 32; \
3116 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3117 uint32_t fsth2 = fdt2 >> 32; \
3119 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3120 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3121 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3122 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3123 fst2 = float32_chs(fst2); \
3124 fsth2 = float32_chs(fsth2); \
3125 return ((uint64_t)fsth2 << 32) | fst2; \
3128 FLOAT_NTERNOP(mul, add)
3129 FLOAT_NTERNOP(mul, sub)
3130 #undef FLOAT_NTERNOP
3132 /* MIPS specific binary operations */
3133 uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
3135 set_float_exception_flags(0, &env->active_fpu.fp_status);
3136 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3137 fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
3138 update_fcr31();
3139 return fdt2;
3142 uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
3144 set_float_exception_flags(0, &env->active_fpu.fp_status);
3145 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3146 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3147 update_fcr31();
3148 return fst2;
3151 uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
3153 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3154 uint32_t fsth0 = fdt0 >> 32;
3155 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3156 uint32_t fsth2 = fdt2 >> 32;
3158 set_float_exception_flags(0, &env->active_fpu.fp_status);
3159 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3160 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3161 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3162 fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
3163 update_fcr31();
3164 return ((uint64_t)fsth2 << 32) | fst2;
3167 uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
3169 set_float_exception_flags(0, &env->active_fpu.fp_status);
3170 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3171 fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
3172 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3173 update_fcr31();
3174 return fdt2;
3177 uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
3179 set_float_exception_flags(0, &env->active_fpu.fp_status);
3180 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3181 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3182 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3183 update_fcr31();
3184 return fst2;
3187 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
3189 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3190 uint32_t fsth0 = fdt0 >> 32;
3191 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3192 uint32_t fsth2 = fdt2 >> 32;
3194 set_float_exception_flags(0, &env->active_fpu.fp_status);
3195 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3196 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3197 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3198 fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3199 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3200 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3201 update_fcr31();
3202 return ((uint64_t)fsth2 << 32) | fst2;
3205 uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
3207 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3208 uint32_t fsth0 = fdt0 >> 32;
3209 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3210 uint32_t fsth1 = fdt1 >> 32;
3211 uint32_t fst2;
3212 uint32_t fsth2;
3214 set_float_exception_flags(0, &env->active_fpu.fp_status);
3215 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3216 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3217 update_fcr31();
3218 return ((uint64_t)fsth2 << 32) | fst2;
3221 uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
3223 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3224 uint32_t fsth0 = fdt0 >> 32;
3225 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3226 uint32_t fsth1 = fdt1 >> 32;
3227 uint32_t fst2;
3228 uint32_t fsth2;
3230 set_float_exception_flags(0, &env->active_fpu.fp_status);
3231 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3232 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3233 update_fcr31();
3234 return ((uint64_t)fsth2 << 32) | fst2;
3237 /* compare operations */
3238 #define FOP_COND_D(op, cond) \
3239 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3241 int c; \
3242 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3243 c = cond; \
3244 update_fcr31(); \
3245 if (c) \
3246 SET_FP_COND(cc, env->active_fpu); \
3247 else \
3248 CLEAR_FP_COND(cc, env->active_fpu); \
3250 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3252 int c; \
3253 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3254 fdt0 = float64_abs(fdt0); \
3255 fdt1 = float64_abs(fdt1); \
3256 c = cond; \
3257 update_fcr31(); \
3258 if (c) \
3259 SET_FP_COND(cc, env->active_fpu); \
3260 else \
3261 CLEAR_FP_COND(cc, env->active_fpu); \
3264 /* NOTE: the comma operator will make "cond" to eval to false,
3265 * but float64_unordered_quiet() is still called. */
3266 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3267 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3268 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3269 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3270 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3271 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3272 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3273 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3274 /* NOTE: the comma operator will make "cond" to eval to false,
3275 * but float64_unordered() is still called. */
3276 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3277 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3278 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3279 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3280 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3281 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3282 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3283 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3285 #define FOP_COND_S(op, cond) \
3286 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3288 int c; \
3289 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3290 c = cond; \
3291 update_fcr31(); \
3292 if (c) \
3293 SET_FP_COND(cc, env->active_fpu); \
3294 else \
3295 CLEAR_FP_COND(cc, env->active_fpu); \
3297 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3299 int c; \
3300 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3301 fst0 = float32_abs(fst0); \
3302 fst1 = float32_abs(fst1); \
3303 c = cond; \
3304 update_fcr31(); \
3305 if (c) \
3306 SET_FP_COND(cc, env->active_fpu); \
3307 else \
3308 CLEAR_FP_COND(cc, env->active_fpu); \
3311 /* NOTE: the comma operator will make "cond" to eval to false,
3312 * but float32_unordered_quiet() is still called. */
3313 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3314 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3315 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3316 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3317 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3318 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3319 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3320 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3321 /* NOTE: the comma operator will make "cond" to eval to false,
3322 * but float32_unordered() is still called. */
3323 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3324 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3325 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3326 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3327 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3328 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3329 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
3330 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3332 #define FOP_COND_PS(op, condl, condh) \
3333 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3335 uint32_t fst0, fsth0, fst1, fsth1; \
3336 int ch, cl; \
3337 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3338 fst0 = fdt0 & 0XFFFFFFFF; \
3339 fsth0 = fdt0 >> 32; \
3340 fst1 = fdt1 & 0XFFFFFFFF; \
3341 fsth1 = fdt1 >> 32; \
3342 cl = condl; \
3343 ch = condh; \
3344 update_fcr31(); \
3345 if (cl) \
3346 SET_FP_COND(cc, env->active_fpu); \
3347 else \
3348 CLEAR_FP_COND(cc, env->active_fpu); \
3349 if (ch) \
3350 SET_FP_COND(cc + 1, env->active_fpu); \
3351 else \
3352 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3354 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3356 uint32_t fst0, fsth0, fst1, fsth1; \
3357 int ch, cl; \
3358 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3359 fsth0 = float32_abs(fdt0 >> 32); \
3360 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3361 fsth1 = float32_abs(fdt1 >> 32); \
3362 cl = condl; \
3363 ch = condh; \
3364 update_fcr31(); \
3365 if (cl) \
3366 SET_FP_COND(cc, env->active_fpu); \
3367 else \
3368 CLEAR_FP_COND(cc, env->active_fpu); \
3369 if (ch) \
3370 SET_FP_COND(cc + 1, env->active_fpu); \
3371 else \
3372 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3375 /* NOTE: the comma operator will make "cond" to eval to false,
3376 * but float32_unordered_quiet() is still called. */
3377 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3378 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3379 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3380 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3381 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3382 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3383 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3384 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3385 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3386 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3387 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3388 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3389 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3390 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3391 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3392 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3393 /* NOTE: the comma operator will make "cond" to eval to false,
3394 * but float32_unordered() is still called. */
3395 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3396 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3397 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3398 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3399 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3400 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3401 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3402 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3403 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3404 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3405 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3406 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3407 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
3408 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3409 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3410 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))