kvm: Add kvm_has_pit_state2 helper
[qemu/ar7.git] / target-mips / op_helper.c
blobc51b9cb6f0e950f4edc94252cc98df322f0c762b
1 /*
2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include "cpu.h"
21 #include "dyngen-exec.h"
23 #include "host-utils.h"
25 #include "helper.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 #ifndef CONFIG_USER_ONLY
32 static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
33 #endif
35 static inline void compute_hflags(CPUState *env)
37 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
39 MIPS_HFLAG_UX);
40 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41 !(env->CP0_Status & (1 << CP0St_ERL)) &&
42 !(env->hflags & MIPS_HFLAG_DM)) {
43 env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
45 #if defined(TARGET_MIPS64)
46 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47 (env->CP0_Status & (1 << CP0St_PX)) ||
48 (env->CP0_Status & (1 << CP0St_UX))) {
49 env->hflags |= MIPS_HFLAG_64;
51 if (env->CP0_Status & (1 << CP0St_UX)) {
52 env->hflags |= MIPS_HFLAG_UX;
54 #endif
55 if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56 !(env->hflags & MIPS_HFLAG_KSU)) {
57 env->hflags |= MIPS_HFLAG_CP0;
59 if (env->CP0_Status & (1 << CP0St_CU1)) {
60 env->hflags |= MIPS_HFLAG_FPU;
62 if (env->CP0_Status & (1 << CP0St_FR)) {
63 env->hflags |= MIPS_HFLAG_F64;
65 if (env->insn_flags & ISA_MIPS32R2) {
66 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67 env->hflags |= MIPS_HFLAG_COP1X;
69 } else if (env->insn_flags & ISA_MIPS32) {
70 if (env->hflags & MIPS_HFLAG_64) {
71 env->hflags |= MIPS_HFLAG_COP1X;
73 } else if (env->insn_flags & ISA_MIPS4) {
74 /* All supported MIPS IV CPUs use the XX (CU3) to enable
75 and disable the MIPS IV extensions to the MIPS III ISA.
76 Some other MIPS IV CPUs ignore the bit, so the check here
77 would be too restrictive for them. */
78 if (env->CP0_Status & (1 << CP0St_CU3)) {
79 env->hflags |= MIPS_HFLAG_COP1X;
84 /*****************************************************************************/
85 /* Exceptions processing helpers */
87 void helper_raise_exception_err (uint32_t exception, int error_code)
89 #if 1
90 if (exception < 0x100)
91 qemu_log("%s: %d %d\n", __func__, exception, error_code);
92 #endif
93 env->exception_index = exception;
94 env->error_code = error_code;
95 cpu_loop_exit(env);
98 void helper_raise_exception (uint32_t exception)
100 helper_raise_exception_err(exception, 0);
103 #if !defined(CONFIG_USER_ONLY)
104 static void do_restore_state (void *pc_ptr)
106 TranslationBlock *tb;
107 unsigned long pc = (unsigned long) pc_ptr;
109 tb = tb_find_pc (pc);
110 if (tb) {
111 cpu_restore_state(tb, env, pc);
114 #endif
116 #if defined(CONFIG_USER_ONLY)
117 #define HELPER_LD(name, insn, type) \
118 static inline type do_##name(target_ulong addr, int mem_idx) \
120 return (type) insn##_raw(addr); \
122 #else
123 #define HELPER_LD(name, insn, type) \
124 static inline type do_##name(target_ulong addr, int mem_idx) \
126 switch (mem_idx) \
128 case 0: return (type) insn##_kernel(addr); break; \
129 case 1: return (type) insn##_super(addr); break; \
130 default: \
131 case 2: return (type) insn##_user(addr); break; \
134 #endif
135 HELPER_LD(lbu, ldub, uint8_t)
136 HELPER_LD(lw, ldl, int32_t)
137 #ifdef TARGET_MIPS64
138 HELPER_LD(ld, ldq, int64_t)
139 #endif
140 #undef HELPER_LD
142 #if defined(CONFIG_USER_ONLY)
143 #define HELPER_ST(name, insn, type) \
144 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
146 insn##_raw(addr, val); \
148 #else
149 #define HELPER_ST(name, insn, type) \
150 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
152 switch (mem_idx) \
154 case 0: insn##_kernel(addr, val); break; \
155 case 1: insn##_super(addr, val); break; \
156 default: \
157 case 2: insn##_user(addr, val); break; \
160 #endif
161 HELPER_ST(sb, stb, uint8_t)
162 HELPER_ST(sw, stl, uint32_t)
163 #ifdef TARGET_MIPS64
164 HELPER_ST(sd, stq, uint64_t)
165 #endif
166 #undef HELPER_ST
168 target_ulong helper_clo (target_ulong arg1)
170 return clo32(arg1);
173 target_ulong helper_clz (target_ulong arg1)
175 return clz32(arg1);
178 #if defined(TARGET_MIPS64)
179 target_ulong helper_dclo (target_ulong arg1)
181 return clo64(arg1);
184 target_ulong helper_dclz (target_ulong arg1)
186 return clz64(arg1);
188 #endif /* TARGET_MIPS64 */
190 /* 64 bits arithmetic for 32 bits hosts */
191 static inline uint64_t get_HILO (void)
193 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
196 static inline void set_HILO (uint64_t HILO)
198 env->active_tc.LO[0] = (int32_t)HILO;
199 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
202 static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
204 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
205 arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
208 static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
210 arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
211 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
214 /* Multiplication variants of the vr54xx. */
215 target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
217 set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
219 return arg1;
222 target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
224 set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
226 return arg1;
229 target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
231 set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
233 return arg1;
236 target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
238 set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
240 return arg1;
243 target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
245 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
247 return arg1;
250 target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
252 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
254 return arg1;
257 target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
259 set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
261 return arg1;
264 target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
266 set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
268 return arg1;
271 target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
273 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
275 return arg1;
278 target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
280 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
282 return arg1;
285 target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
287 set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
289 return arg1;
292 target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
294 set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
296 return arg1;
299 target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
301 set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
303 return arg1;
306 target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
308 set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
310 return arg1;
313 #ifdef TARGET_MIPS64
314 void helper_dmult (target_ulong arg1, target_ulong arg2)
316 muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
319 void helper_dmultu (target_ulong arg1, target_ulong arg2)
321 mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
323 #endif
325 #ifndef CONFIG_USER_ONLY
327 static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
329 target_phys_addr_t lladdr;
331 lladdr = cpu_mips_translate_address(env, address, rw);
333 if (lladdr == -1LL) {
334 cpu_loop_exit(env);
335 } else {
336 return lladdr;
340 #define HELPER_LD_ATOMIC(name, insn) \
341 target_ulong helper_##name(target_ulong arg, int mem_idx) \
343 env->lladdr = do_translate_address(arg, 0); \
344 env->llval = do_##insn(arg, mem_idx); \
345 return env->llval; \
347 HELPER_LD_ATOMIC(ll, lw)
348 #ifdef TARGET_MIPS64
349 HELPER_LD_ATOMIC(lld, ld)
350 #endif
351 #undef HELPER_LD_ATOMIC
353 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
354 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
356 target_long tmp; \
358 if (arg2 & almask) { \
359 env->CP0_BadVAddr = arg2; \
360 helper_raise_exception(EXCP_AdES); \
362 if (do_translate_address(arg2, 1) == env->lladdr) { \
363 tmp = do_##ld_insn(arg2, mem_idx); \
364 if (tmp == env->llval) { \
365 do_##st_insn(arg2, arg1, mem_idx); \
366 return 1; \
369 return 0; \
371 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
372 #ifdef TARGET_MIPS64
373 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
374 #endif
375 #undef HELPER_ST_ATOMIC
376 #endif
378 #ifdef TARGET_WORDS_BIGENDIAN
379 #define GET_LMASK(v) ((v) & 3)
380 #define GET_OFFSET(addr, offset) (addr + (offset))
381 #else
382 #define GET_LMASK(v) (((v) & 3) ^ 3)
383 #define GET_OFFSET(addr, offset) (addr - (offset))
384 #endif
386 target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
388 target_ulong tmp;
390 tmp = do_lbu(arg2, mem_idx);
391 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
393 if (GET_LMASK(arg2) <= 2) {
394 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
395 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
398 if (GET_LMASK(arg2) <= 1) {
399 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
400 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
403 if (GET_LMASK(arg2) == 0) {
404 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
405 arg1 = (arg1 & 0xFFFFFF00) | tmp;
407 return (int32_t)arg1;
410 target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
412 target_ulong tmp;
414 tmp = do_lbu(arg2, mem_idx);
415 arg1 = (arg1 & 0xFFFFFF00) | tmp;
417 if (GET_LMASK(arg2) >= 1) {
418 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
419 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
422 if (GET_LMASK(arg2) >= 2) {
423 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
424 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
427 if (GET_LMASK(arg2) == 3) {
428 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
429 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
431 return (int32_t)arg1;
434 void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
436 do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
438 if (GET_LMASK(arg2) <= 2)
439 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
441 if (GET_LMASK(arg2) <= 1)
442 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
444 if (GET_LMASK(arg2) == 0)
445 do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
448 void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
450 do_sb(arg2, (uint8_t)arg1, mem_idx);
452 if (GET_LMASK(arg2) >= 1)
453 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
455 if (GET_LMASK(arg2) >= 2)
456 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
458 if (GET_LMASK(arg2) == 3)
459 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
462 #if defined(TARGET_MIPS64)
463 /* "half" load and stores. We must do the memory access inline,
464 or fault handling won't work. */
466 #ifdef TARGET_WORDS_BIGENDIAN
467 #define GET_LMASK64(v) ((v) & 7)
468 #else
469 #define GET_LMASK64(v) (((v) & 7) ^ 7)
470 #endif
472 target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
474 uint64_t tmp;
476 tmp = do_lbu(arg2, mem_idx);
477 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
479 if (GET_LMASK64(arg2) <= 6) {
480 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
481 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
484 if (GET_LMASK64(arg2) <= 5) {
485 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
486 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
489 if (GET_LMASK64(arg2) <= 4) {
490 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
491 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
494 if (GET_LMASK64(arg2) <= 3) {
495 tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
496 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
499 if (GET_LMASK64(arg2) <= 2) {
500 tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
501 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
504 if (GET_LMASK64(arg2) <= 1) {
505 tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
506 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
509 if (GET_LMASK64(arg2) == 0) {
510 tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
511 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
514 return arg1;
517 target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
519 uint64_t tmp;
521 tmp = do_lbu(arg2, mem_idx);
522 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
524 if (GET_LMASK64(arg2) >= 1) {
525 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
526 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
529 if (GET_LMASK64(arg2) >= 2) {
530 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
531 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
534 if (GET_LMASK64(arg2) >= 3) {
535 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
536 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
539 if (GET_LMASK64(arg2) >= 4) {
540 tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
541 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
544 if (GET_LMASK64(arg2) >= 5) {
545 tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
546 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
549 if (GET_LMASK64(arg2) >= 6) {
550 tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
551 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
554 if (GET_LMASK64(arg2) == 7) {
555 tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
556 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
559 return arg1;
562 void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
564 do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
566 if (GET_LMASK64(arg2) <= 6)
567 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
569 if (GET_LMASK64(arg2) <= 5)
570 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
572 if (GET_LMASK64(arg2) <= 4)
573 do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
575 if (GET_LMASK64(arg2) <= 3)
576 do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
578 if (GET_LMASK64(arg2) <= 2)
579 do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
581 if (GET_LMASK64(arg2) <= 1)
582 do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
584 if (GET_LMASK64(arg2) <= 0)
585 do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
588 void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
590 do_sb(arg2, (uint8_t)arg1, mem_idx);
592 if (GET_LMASK64(arg2) >= 1)
593 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
595 if (GET_LMASK64(arg2) >= 2)
596 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
598 if (GET_LMASK64(arg2) >= 3)
599 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
601 if (GET_LMASK64(arg2) >= 4)
602 do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
604 if (GET_LMASK64(arg2) >= 5)
605 do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
607 if (GET_LMASK64(arg2) >= 6)
608 do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
610 if (GET_LMASK64(arg2) == 7)
611 do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
613 #endif /* TARGET_MIPS64 */
615 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
617 void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
619 target_ulong base_reglist = reglist & 0xf;
620 target_ulong do_r31 = reglist & 0x10;
621 #ifdef CONFIG_USER_ONLY
622 #undef ldfun
623 #define ldfun ldl_raw
624 #else
625 uint32_t (*ldfun)(target_ulong);
627 switch (mem_idx)
629 case 0: ldfun = ldl_kernel; break;
630 case 1: ldfun = ldl_super; break;
631 default:
632 case 2: ldfun = ldl_user; break;
634 #endif
636 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
637 target_ulong i;
639 for (i = 0; i < base_reglist; i++) {
640 env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
641 addr += 4;
645 if (do_r31) {
646 env->active_tc.gpr[31] = (target_long) ldfun(addr);
650 void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
652 target_ulong base_reglist = reglist & 0xf;
653 target_ulong do_r31 = reglist & 0x10;
654 #ifdef CONFIG_USER_ONLY
655 #undef stfun
656 #define stfun stl_raw
657 #else
658 void (*stfun)(target_ulong, uint32_t);
660 switch (mem_idx)
662 case 0: stfun = stl_kernel; break;
663 case 1: stfun = stl_super; break;
664 default:
665 case 2: stfun = stl_user; break;
667 #endif
669 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
670 target_ulong i;
672 for (i = 0; i < base_reglist; i++) {
673 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
674 addr += 4;
678 if (do_r31) {
679 stfun(addr, env->active_tc.gpr[31]);
683 #if defined(TARGET_MIPS64)
684 void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
686 target_ulong base_reglist = reglist & 0xf;
687 target_ulong do_r31 = reglist & 0x10;
688 #ifdef CONFIG_USER_ONLY
689 #undef ldfun
690 #define ldfun ldq_raw
691 #else
692 uint64_t (*ldfun)(target_ulong);
694 switch (mem_idx)
696 case 0: ldfun = ldq_kernel; break;
697 case 1: ldfun = ldq_super; break;
698 default:
699 case 2: ldfun = ldq_user; break;
701 #endif
703 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
704 target_ulong i;
706 for (i = 0; i < base_reglist; i++) {
707 env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
708 addr += 8;
712 if (do_r31) {
713 env->active_tc.gpr[31] = ldfun(addr);
717 void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
719 target_ulong base_reglist = reglist & 0xf;
720 target_ulong do_r31 = reglist & 0x10;
721 #ifdef CONFIG_USER_ONLY
722 #undef stfun
723 #define stfun stq_raw
724 #else
725 void (*stfun)(target_ulong, uint64_t);
727 switch (mem_idx)
729 case 0: stfun = stq_kernel; break;
730 case 1: stfun = stq_super; break;
731 default:
732 case 2: stfun = stq_user; break;
734 #endif
736 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
737 target_ulong i;
739 for (i = 0; i < base_reglist; i++) {
740 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
741 addr += 8;
745 if (do_r31) {
746 stfun(addr, env->active_tc.gpr[31]);
749 #endif
751 #ifndef CONFIG_USER_ONLY
752 /* SMP helpers. */
753 static int mips_vpe_is_wfi(CPUState *c)
755 /* If the VPE is halted but otherwise active, it means it's waiting for
756 an interrupt. */
757 return c->halted && mips_vpe_active(c);
760 static inline void mips_vpe_wake(CPUState *c)
762 /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
763 because there might be other conditions that state that c should
764 be sleeping. */
765 cpu_interrupt(c, CPU_INTERRUPT_WAKE);
768 static inline void mips_vpe_sleep(CPUState *c)
770 /* The VPE was shut off, really go to bed.
771 Reset any old _WAKE requests. */
772 c->halted = 1;
773 cpu_reset_interrupt(c, CPU_INTERRUPT_WAKE);
776 static inline void mips_tc_wake(CPUState *c, int tc)
778 /* FIXME: TC reschedule. */
779 if (mips_vpe_active(c) && !mips_vpe_is_wfi(c)) {
780 mips_vpe_wake(c);
784 static inline void mips_tc_sleep(CPUState *c, int tc)
786 /* FIXME: TC reschedule. */
787 if (!mips_vpe_active(c)) {
788 mips_vpe_sleep(c);
792 /* tc should point to an int with the value of the global TC index.
793 This function will transform it into a local index within the
794 returned CPUState.
796 FIXME: This code assumes that all VPEs have the same number of TCs,
797 which depends on runtime setup. Can probably be fixed by
798 walking the list of CPUStates. */
799 static CPUState *mips_cpu_map_tc(int *tc)
801 CPUState *other;
802 int vpe_idx, nr_threads = env->nr_threads;
803 int tc_idx = *tc;
805 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
806 /* Not allowed to address other CPUs. */
807 *tc = env->current_tc;
808 return env;
811 vpe_idx = tc_idx / nr_threads;
812 *tc = tc_idx % nr_threads;
813 other = qemu_get_cpu(vpe_idx);
814 return other ? other : env;
817 /* The per VPE CP0_Status register shares some fields with the per TC
818 CP0_TCStatus registers. These fields are wired to the same registers,
819 so changes to either of them should be reflected on both registers.
821 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
823 These helper call synchronizes the regs for a given cpu. */
825 /* Called for updates to CP0_Status. */
826 static void sync_c0_status(CPUState *cpu, int tc)
828 int32_t tcstatus, *tcst;
829 uint32_t v = cpu->CP0_Status;
830 uint32_t cu, mx, asid, ksu;
831 uint32_t mask = ((1 << CP0TCSt_TCU3)
832 | (1 << CP0TCSt_TCU2)
833 | (1 << CP0TCSt_TCU1)
834 | (1 << CP0TCSt_TCU0)
835 | (1 << CP0TCSt_TMX)
836 | (3 << CP0TCSt_TKSU)
837 | (0xff << CP0TCSt_TASID));
839 cu = (v >> CP0St_CU0) & 0xf;
840 mx = (v >> CP0St_MX) & 0x1;
841 ksu = (v >> CP0St_KSU) & 0x3;
842 asid = env->CP0_EntryHi & 0xff;
844 tcstatus = cu << CP0TCSt_TCU0;
845 tcstatus |= mx << CP0TCSt_TMX;
846 tcstatus |= ksu << CP0TCSt_TKSU;
847 tcstatus |= asid;
849 if (tc == cpu->current_tc) {
850 tcst = &cpu->active_tc.CP0_TCStatus;
851 } else {
852 tcst = &cpu->tcs[tc].CP0_TCStatus;
855 *tcst &= ~mask;
856 *tcst |= tcstatus;
857 compute_hflags(cpu);
860 /* Called for updates to CP0_TCStatus. */
861 static void sync_c0_tcstatus(CPUState *cpu, int tc, target_ulong v)
863 uint32_t status;
864 uint32_t tcu, tmx, tasid, tksu;
865 uint32_t mask = ((1 << CP0St_CU3)
866 | (1 << CP0St_CU2)
867 | (1 << CP0St_CU1)
868 | (1 << CP0St_CU0)
869 | (1 << CP0St_MX)
870 | (3 << CP0St_KSU));
872 tcu = (v >> CP0TCSt_TCU0) & 0xf;
873 tmx = (v >> CP0TCSt_TMX) & 0x1;
874 tasid = v & 0xff;
875 tksu = (v >> CP0TCSt_TKSU) & 0x3;
877 status = tcu << CP0St_CU0;
878 status |= tmx << CP0St_MX;
879 status |= tksu << CP0St_KSU;
881 cpu->CP0_Status &= ~mask;
882 cpu->CP0_Status |= status;
884 /* Sync the TASID with EntryHi. */
885 cpu->CP0_EntryHi &= ~0xff;
886 cpu->CP0_EntryHi = tasid;
888 compute_hflags(cpu);
891 /* Called for updates to CP0_EntryHi. */
892 static void sync_c0_entryhi(CPUState *cpu, int tc)
894 int32_t *tcst;
895 uint32_t asid, v = cpu->CP0_EntryHi;
897 asid = v & 0xff;
899 if (tc == cpu->current_tc) {
900 tcst = &cpu->active_tc.CP0_TCStatus;
901 } else {
902 tcst = &cpu->tcs[tc].CP0_TCStatus;
905 *tcst &= ~0xff;
906 *tcst |= asid;
909 /* CP0 helpers */
910 target_ulong helper_mfc0_mvpcontrol (void)
912 return env->mvp->CP0_MVPControl;
915 target_ulong helper_mfc0_mvpconf0 (void)
917 return env->mvp->CP0_MVPConf0;
920 target_ulong helper_mfc0_mvpconf1 (void)
922 return env->mvp->CP0_MVPConf1;
925 target_ulong helper_mfc0_random (void)
927 return (int32_t)cpu_mips_get_random(env);
930 target_ulong helper_mfc0_tcstatus (void)
932 return env->active_tc.CP0_TCStatus;
935 target_ulong helper_mftc0_tcstatus(void)
937 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
938 CPUState *other = mips_cpu_map_tc(&other_tc);
940 if (other_tc == other->current_tc)
941 return other->active_tc.CP0_TCStatus;
942 else
943 return other->tcs[other_tc].CP0_TCStatus;
946 target_ulong helper_mfc0_tcbind (void)
948 return env->active_tc.CP0_TCBind;
951 target_ulong helper_mftc0_tcbind(void)
953 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
954 CPUState *other = mips_cpu_map_tc(&other_tc);
956 if (other_tc == other->current_tc)
957 return other->active_tc.CP0_TCBind;
958 else
959 return other->tcs[other_tc].CP0_TCBind;
962 target_ulong helper_mfc0_tcrestart (void)
964 return env->active_tc.PC;
967 target_ulong helper_mftc0_tcrestart(void)
969 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
970 CPUState *other = mips_cpu_map_tc(&other_tc);
972 if (other_tc == other->current_tc)
973 return other->active_tc.PC;
974 else
975 return other->tcs[other_tc].PC;
978 target_ulong helper_mfc0_tchalt (void)
980 return env->active_tc.CP0_TCHalt;
983 target_ulong helper_mftc0_tchalt(void)
985 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
986 CPUState *other = mips_cpu_map_tc(&other_tc);
988 if (other_tc == other->current_tc)
989 return other->active_tc.CP0_TCHalt;
990 else
991 return other->tcs[other_tc].CP0_TCHalt;
994 target_ulong helper_mfc0_tccontext (void)
996 return env->active_tc.CP0_TCContext;
999 target_ulong helper_mftc0_tccontext(void)
1001 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1002 CPUState *other = mips_cpu_map_tc(&other_tc);
1004 if (other_tc == other->current_tc)
1005 return other->active_tc.CP0_TCContext;
1006 else
1007 return other->tcs[other_tc].CP0_TCContext;
1010 target_ulong helper_mfc0_tcschedule (void)
1012 return env->active_tc.CP0_TCSchedule;
1015 target_ulong helper_mftc0_tcschedule(void)
1017 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1018 CPUState *other = mips_cpu_map_tc(&other_tc);
1020 if (other_tc == other->current_tc)
1021 return other->active_tc.CP0_TCSchedule;
1022 else
1023 return other->tcs[other_tc].CP0_TCSchedule;
1026 target_ulong helper_mfc0_tcschefback (void)
1028 return env->active_tc.CP0_TCScheFBack;
1031 target_ulong helper_mftc0_tcschefback(void)
1033 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1034 CPUState *other = mips_cpu_map_tc(&other_tc);
1036 if (other_tc == other->current_tc)
1037 return other->active_tc.CP0_TCScheFBack;
1038 else
1039 return other->tcs[other_tc].CP0_TCScheFBack;
1042 target_ulong helper_mfc0_count (void)
1044 return (int32_t)cpu_mips_get_count(env);
1047 target_ulong helper_mftc0_entryhi(void)
1049 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1050 CPUState *other = mips_cpu_map_tc(&other_tc);
1052 return other->CP0_EntryHi;
1055 target_ulong helper_mftc0_cause(void)
1057 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1058 int32_t tccause;
1059 CPUState *other = mips_cpu_map_tc(&other_tc);
1061 if (other_tc == other->current_tc) {
1062 tccause = other->CP0_Cause;
1063 } else {
1064 tccause = other->CP0_Cause;
1067 return tccause;
1070 target_ulong helper_mftc0_status(void)
1072 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1073 CPUState *other = mips_cpu_map_tc(&other_tc);
1075 return other->CP0_Status;
1078 target_ulong helper_mfc0_lladdr (void)
1080 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
1083 target_ulong helper_mfc0_watchlo (uint32_t sel)
1085 return (int32_t)env->CP0_WatchLo[sel];
1088 target_ulong helper_mfc0_watchhi (uint32_t sel)
1090 return env->CP0_WatchHi[sel];
1093 target_ulong helper_mfc0_debug (void)
1095 target_ulong t0 = env->CP0_Debug;
1096 if (env->hflags & MIPS_HFLAG_DM)
1097 t0 |= 1 << CP0DB_DM;
1099 return t0;
1102 target_ulong helper_mftc0_debug(void)
1104 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1105 int32_t tcstatus;
1106 CPUState *other = mips_cpu_map_tc(&other_tc);
1108 if (other_tc == other->current_tc)
1109 tcstatus = other->active_tc.CP0_Debug_tcstatus;
1110 else
1111 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1113 /* XXX: Might be wrong, check with EJTAG spec. */
1114 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1115 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1118 #if defined(TARGET_MIPS64)
1119 target_ulong helper_dmfc0_tcrestart (void)
1121 return env->active_tc.PC;
1124 target_ulong helper_dmfc0_tchalt (void)
1126 return env->active_tc.CP0_TCHalt;
1129 target_ulong helper_dmfc0_tccontext (void)
1131 return env->active_tc.CP0_TCContext;
1134 target_ulong helper_dmfc0_tcschedule (void)
1136 return env->active_tc.CP0_TCSchedule;
1139 target_ulong helper_dmfc0_tcschefback (void)
1141 return env->active_tc.CP0_TCScheFBack;
1144 target_ulong helper_dmfc0_lladdr (void)
1146 return env->lladdr >> env->CP0_LLAddr_shift;
1149 target_ulong helper_dmfc0_watchlo (uint32_t sel)
1151 return env->CP0_WatchLo[sel];
1153 #endif /* TARGET_MIPS64 */
1155 void helper_mtc0_index (target_ulong arg1)
1157 int num = 1;
1158 unsigned int tmp = env->tlb->nb_tlb;
1160 do {
1161 tmp >>= 1;
1162 num <<= 1;
1163 } while (tmp);
1164 env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1167 void helper_mtc0_mvpcontrol (target_ulong arg1)
1169 uint32_t mask = 0;
1170 uint32_t newval;
1172 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1173 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1174 (1 << CP0MVPCo_EVP);
1175 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1176 mask |= (1 << CP0MVPCo_STLB);
1177 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1179 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1181 env->mvp->CP0_MVPControl = newval;
1184 void helper_mtc0_vpecontrol (target_ulong arg1)
1186 uint32_t mask;
1187 uint32_t newval;
1189 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1190 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1191 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1193 /* Yield scheduler intercept not implemented. */
1194 /* Gating storage scheduler intercept not implemented. */
1196 // TODO: Enable/disable TCs.
1198 env->CP0_VPEControl = newval;
1201 void helper_mttc0_vpecontrol(target_ulong arg1)
1203 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1204 CPUState *other = mips_cpu_map_tc(&other_tc);
1205 uint32_t mask;
1206 uint32_t newval;
1208 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1209 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1210 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1212 /* TODO: Enable/disable TCs. */
1214 other->CP0_VPEControl = newval;
1217 target_ulong helper_mftc0_vpecontrol(void)
1219 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1220 CPUState *other = mips_cpu_map_tc(&other_tc);
1221 /* FIXME: Mask away return zero on read bits. */
1222 return other->CP0_VPEControl;
1225 target_ulong helper_mftc0_vpeconf0(void)
1227 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1228 CPUState *other = mips_cpu_map_tc(&other_tc);
1230 return other->CP0_VPEConf0;
1233 void helper_mtc0_vpeconf0 (target_ulong arg1)
1235 uint32_t mask = 0;
1236 uint32_t newval;
1238 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1239 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1240 mask |= (0xff << CP0VPEC0_XTC);
1241 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1243 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1245 // TODO: TC exclusive handling due to ERL/EXL.
1247 env->CP0_VPEConf0 = newval;
1250 void helper_mttc0_vpeconf0(target_ulong arg1)
1252 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1253 CPUState *other = mips_cpu_map_tc(&other_tc);
1254 uint32_t mask = 0;
1255 uint32_t newval;
1257 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1258 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1260 /* TODO: TC exclusive handling due to ERL/EXL. */
1261 other->CP0_VPEConf0 = newval;
1264 void helper_mtc0_vpeconf1 (target_ulong arg1)
1266 uint32_t mask = 0;
1267 uint32_t newval;
1269 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1270 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1271 (0xff << CP0VPEC1_NCP1);
1272 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1274 /* UDI not implemented. */
1275 /* CP2 not implemented. */
1277 // TODO: Handle FPU (CP1) binding.
1279 env->CP0_VPEConf1 = newval;
1282 void helper_mtc0_yqmask (target_ulong arg1)
1284 /* Yield qualifier inputs not implemented. */
1285 env->CP0_YQMask = 0x00000000;
1288 void helper_mtc0_vpeopt (target_ulong arg1)
1290 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1293 void helper_mtc0_entrylo0 (target_ulong arg1)
1295 /* Large physaddr (PABITS) not implemented */
1296 /* 1k pages not implemented */
1297 env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1300 void helper_mtc0_tcstatus (target_ulong arg1)
1302 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1303 uint32_t newval;
1305 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1307 env->active_tc.CP0_TCStatus = newval;
1308 sync_c0_tcstatus(env, env->current_tc, newval);
1311 void helper_mttc0_tcstatus (target_ulong arg1)
1313 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1314 CPUState *other = mips_cpu_map_tc(&other_tc);
1316 if (other_tc == other->current_tc)
1317 other->active_tc.CP0_TCStatus = arg1;
1318 else
1319 other->tcs[other_tc].CP0_TCStatus = arg1;
1320 sync_c0_tcstatus(other, other_tc, arg1);
1323 void helper_mtc0_tcbind (target_ulong arg1)
1325 uint32_t mask = (1 << CP0TCBd_TBE);
1326 uint32_t newval;
1328 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1329 mask |= (1 << CP0TCBd_CurVPE);
1330 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1331 env->active_tc.CP0_TCBind = newval;
1334 void helper_mttc0_tcbind (target_ulong arg1)
1336 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1337 uint32_t mask = (1 << CP0TCBd_TBE);
1338 uint32_t newval;
1339 CPUState *other = mips_cpu_map_tc(&other_tc);
1341 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1342 mask |= (1 << CP0TCBd_CurVPE);
1343 if (other_tc == other->current_tc) {
1344 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1345 other->active_tc.CP0_TCBind = newval;
1346 } else {
1347 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1348 other->tcs[other_tc].CP0_TCBind = newval;
1352 void helper_mtc0_tcrestart (target_ulong arg1)
1354 env->active_tc.PC = arg1;
1355 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1356 env->lladdr = 0ULL;
1357 /* MIPS16 not implemented. */
1360 void helper_mttc0_tcrestart (target_ulong arg1)
1362 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1363 CPUState *other = mips_cpu_map_tc(&other_tc);
1365 if (other_tc == other->current_tc) {
1366 other->active_tc.PC = arg1;
1367 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1368 other->lladdr = 0ULL;
1369 /* MIPS16 not implemented. */
1370 } else {
1371 other->tcs[other_tc].PC = arg1;
1372 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1373 other->lladdr = 0ULL;
1374 /* MIPS16 not implemented. */
1378 void helper_mtc0_tchalt (target_ulong arg1)
1380 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1382 // TODO: Halt TC / Restart (if allocated+active) TC.
1383 if (env->active_tc.CP0_TCHalt & 1) {
1384 mips_tc_sleep(env, env->current_tc);
1385 } else {
1386 mips_tc_wake(env, env->current_tc);
1390 void helper_mttc0_tchalt (target_ulong arg1)
1392 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1393 CPUState *other = mips_cpu_map_tc(&other_tc);
1395 // TODO: Halt TC / Restart (if allocated+active) TC.
1397 if (other_tc == other->current_tc)
1398 other->active_tc.CP0_TCHalt = arg1;
1399 else
1400 other->tcs[other_tc].CP0_TCHalt = arg1;
1402 if (arg1 & 1) {
1403 mips_tc_sleep(other, other_tc);
1404 } else {
1405 mips_tc_wake(other, other_tc);
1409 void helper_mtc0_tccontext (target_ulong arg1)
1411 env->active_tc.CP0_TCContext = arg1;
1414 void helper_mttc0_tccontext (target_ulong arg1)
1416 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1417 CPUState *other = mips_cpu_map_tc(&other_tc);
1419 if (other_tc == other->current_tc)
1420 other->active_tc.CP0_TCContext = arg1;
1421 else
1422 other->tcs[other_tc].CP0_TCContext = arg1;
1425 void helper_mtc0_tcschedule (target_ulong arg1)
1427 env->active_tc.CP0_TCSchedule = arg1;
1430 void helper_mttc0_tcschedule (target_ulong arg1)
1432 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1433 CPUState *other = mips_cpu_map_tc(&other_tc);
1435 if (other_tc == other->current_tc)
1436 other->active_tc.CP0_TCSchedule = arg1;
1437 else
1438 other->tcs[other_tc].CP0_TCSchedule = arg1;
1441 void helper_mtc0_tcschefback (target_ulong arg1)
1443 env->active_tc.CP0_TCScheFBack = arg1;
1446 void helper_mttc0_tcschefback (target_ulong arg1)
1448 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1449 CPUState *other = mips_cpu_map_tc(&other_tc);
1451 if (other_tc == other->current_tc)
1452 other->active_tc.CP0_TCScheFBack = arg1;
1453 else
1454 other->tcs[other_tc].CP0_TCScheFBack = arg1;
1457 void helper_mtc0_entrylo1 (target_ulong arg1)
1459 /* Large physaddr (PABITS) not implemented */
1460 /* 1k pages not implemented */
1461 env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1464 void helper_mtc0_context (target_ulong arg1)
1466 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1469 void helper_mtc0_pagemask (target_ulong arg1)
1471 /* 1k pages not implemented */
1472 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1475 void helper_mtc0_pagegrain (target_ulong arg1)
1477 /* SmartMIPS not implemented */
1478 /* Large physaddr (PABITS) not implemented */
1479 /* 1k pages not implemented */
1480 env->CP0_PageGrain = 0;
1483 void helper_mtc0_wired (target_ulong arg1)
1485 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1488 void helper_mtc0_srsconf0 (target_ulong arg1)
1490 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1493 void helper_mtc0_srsconf1 (target_ulong arg1)
1495 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1498 void helper_mtc0_srsconf2 (target_ulong arg1)
1500 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1503 void helper_mtc0_srsconf3 (target_ulong arg1)
1505 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1508 void helper_mtc0_srsconf4 (target_ulong arg1)
1510 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1513 void helper_mtc0_hwrena (target_ulong arg1)
1515 env->CP0_HWREna = arg1 & 0x0000000F;
1518 void helper_mtc0_count (target_ulong arg1)
1520 cpu_mips_store_count(env, arg1);
1523 void helper_mtc0_entryhi (target_ulong arg1)
1525 target_ulong old, val;
1527 /* 1k pages not implemented */
1528 val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1529 #if defined(TARGET_MIPS64)
1530 val &= env->SEGMask;
1531 #endif
1532 old = env->CP0_EntryHi;
1533 env->CP0_EntryHi = val;
1534 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1535 sync_c0_entryhi(env, env->current_tc);
1537 /* If the ASID changes, flush qemu's TLB. */
1538 if ((old & 0xFF) != (val & 0xFF))
1539 cpu_mips_tlb_flush(env, 1);
1542 void helper_mttc0_entryhi(target_ulong arg1)
1544 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1545 CPUState *other = mips_cpu_map_tc(&other_tc);
1547 other->CP0_EntryHi = arg1;
1548 sync_c0_entryhi(other, other_tc);
1551 void helper_mtc0_compare (target_ulong arg1)
1553 cpu_mips_store_compare(env, arg1);
1556 void helper_mtc0_status (target_ulong arg1)
1558 uint32_t val, old;
1559 uint32_t mask = env->CP0_Status_rw_bitmask;
1561 val = arg1 & mask;
1562 old = env->CP0_Status;
1563 env->CP0_Status = (env->CP0_Status & ~mask) | val;
1564 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1565 sync_c0_status(env, env->current_tc);
1566 } else {
1567 compute_hflags(env);
1570 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1571 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1572 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1573 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1574 env->CP0_Cause);
1575 switch (env->hflags & MIPS_HFLAG_KSU) {
1576 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1577 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1578 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1579 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1584 void helper_mttc0_status(target_ulong arg1)
1586 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1587 CPUState *other = mips_cpu_map_tc(&other_tc);
1589 other->CP0_Status = arg1 & ~0xf1000018;
1590 sync_c0_status(other, other_tc);
1593 void helper_mtc0_intctl (target_ulong arg1)
1595 /* vectored interrupts not implemented, no performance counters. */
1596 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1599 void helper_mtc0_srsctl (target_ulong arg1)
1601 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1602 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1605 static void mtc0_cause(CPUState *cpu, target_ulong arg1)
1607 uint32_t mask = 0x00C00300;
1608 uint32_t old = cpu->CP0_Cause;
1609 int i;
1611 if (cpu->insn_flags & ISA_MIPS32R2) {
1612 mask |= 1 << CP0Ca_DC;
1615 cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask);
1617 if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) {
1618 if (cpu->CP0_Cause & (1 << CP0Ca_DC)) {
1619 cpu_mips_stop_count(cpu);
1620 } else {
1621 cpu_mips_start_count(cpu);
1625 /* Set/reset software interrupts */
1626 for (i = 0 ; i < 2 ; i++) {
1627 if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1628 cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i)));
1633 void helper_mtc0_cause(target_ulong arg1)
1635 mtc0_cause(env, arg1);
1638 void helper_mttc0_cause(target_ulong arg1)
1640 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1641 CPUState *other = mips_cpu_map_tc(&other_tc);
1643 mtc0_cause(other, arg1);
1646 target_ulong helper_mftc0_epc(void)
1648 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1649 CPUState *other = mips_cpu_map_tc(&other_tc);
1651 return other->CP0_EPC;
1654 target_ulong helper_mftc0_ebase(void)
1656 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1657 CPUState *other = mips_cpu_map_tc(&other_tc);
1659 return other->CP0_EBase;
1662 void helper_mtc0_ebase (target_ulong arg1)
1664 /* vectored interrupts not implemented */
1665 env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1668 void helper_mttc0_ebase(target_ulong arg1)
1670 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1671 CPUState *other = mips_cpu_map_tc(&other_tc);
1672 other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1675 target_ulong helper_mftc0_configx(target_ulong idx)
1677 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1678 CPUState *other = mips_cpu_map_tc(&other_tc);
1680 switch (idx) {
1681 case 0: return other->CP0_Config0;
1682 case 1: return other->CP0_Config1;
1683 case 2: return other->CP0_Config2;
1684 case 3: return other->CP0_Config3;
1685 /* 4 and 5 are reserved. */
1686 case 6: return other->CP0_Config6;
1687 case 7: return other->CP0_Config7;
1688 default:
1689 break;
1691 return 0;
1694 void helper_mtc0_config0 (target_ulong arg1)
1696 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1699 void helper_mtc0_config2 (target_ulong arg1)
1701 /* tertiary/secondary caches not implemented */
1702 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1705 void helper_mtc0_lladdr (target_ulong arg1)
1707 target_long mask = env->CP0_LLAddr_rw_bitmask;
1708 arg1 = arg1 << env->CP0_LLAddr_shift;
1709 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1712 void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1714 /* Watch exceptions for instructions, data loads, data stores
1715 not implemented. */
1716 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1719 void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1721 env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1722 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1725 void helper_mtc0_xcontext (target_ulong arg1)
1727 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1728 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1731 void helper_mtc0_framemask (target_ulong arg1)
1733 env->CP0_Framemask = arg1; /* XXX */
1736 void helper_mtc0_debug (target_ulong arg1)
1738 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1739 if (arg1 & (1 << CP0DB_DM))
1740 env->hflags |= MIPS_HFLAG_DM;
1741 else
1742 env->hflags &= ~MIPS_HFLAG_DM;
1745 void helper_mttc0_debug(target_ulong arg1)
1747 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1748 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1749 CPUState *other = mips_cpu_map_tc(&other_tc);
1751 /* XXX: Might be wrong, check with EJTAG spec. */
1752 if (other_tc == other->current_tc)
1753 other->active_tc.CP0_Debug_tcstatus = val;
1754 else
1755 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1756 other->CP0_Debug = (other->CP0_Debug &
1757 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1758 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1761 void helper_mtc0_performance0 (target_ulong arg1)
1763 env->CP0_Performance0 = arg1 & 0x000007ff;
1766 void helper_mtc0_taglo (target_ulong arg1)
1768 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1771 void helper_mtc0_datalo (target_ulong arg1)
1773 env->CP0_DataLo = arg1; /* XXX */
1776 void helper_mtc0_taghi (target_ulong arg1)
1778 env->CP0_TagHi = arg1; /* XXX */
1781 void helper_mtc0_datahi (target_ulong arg1)
1783 env->CP0_DataHi = arg1; /* XXX */
1786 /* MIPS MT functions */
1787 target_ulong helper_mftgpr(uint32_t sel)
1789 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1790 CPUState *other = mips_cpu_map_tc(&other_tc);
1792 if (other_tc == other->current_tc)
1793 return other->active_tc.gpr[sel];
1794 else
1795 return other->tcs[other_tc].gpr[sel];
1798 target_ulong helper_mftlo(uint32_t sel)
1800 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1801 CPUState *other = mips_cpu_map_tc(&other_tc);
1803 if (other_tc == other->current_tc)
1804 return other->active_tc.LO[sel];
1805 else
1806 return other->tcs[other_tc].LO[sel];
1809 target_ulong helper_mfthi(uint32_t sel)
1811 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1812 CPUState *other = mips_cpu_map_tc(&other_tc);
1814 if (other_tc == other->current_tc)
1815 return other->active_tc.HI[sel];
1816 else
1817 return other->tcs[other_tc].HI[sel];
1820 target_ulong helper_mftacx(uint32_t sel)
1822 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1823 CPUState *other = mips_cpu_map_tc(&other_tc);
1825 if (other_tc == other->current_tc)
1826 return other->active_tc.ACX[sel];
1827 else
1828 return other->tcs[other_tc].ACX[sel];
1831 target_ulong helper_mftdsp(void)
1833 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1834 CPUState *other = mips_cpu_map_tc(&other_tc);
1836 if (other_tc == other->current_tc)
1837 return other->active_tc.DSPControl;
1838 else
1839 return other->tcs[other_tc].DSPControl;
1842 void helper_mttgpr(target_ulong arg1, uint32_t sel)
1844 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1845 CPUState *other = mips_cpu_map_tc(&other_tc);
1847 if (other_tc == other->current_tc)
1848 other->active_tc.gpr[sel] = arg1;
1849 else
1850 other->tcs[other_tc].gpr[sel] = arg1;
1853 void helper_mttlo(target_ulong arg1, uint32_t sel)
1855 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1856 CPUState *other = mips_cpu_map_tc(&other_tc);
1858 if (other_tc == other->current_tc)
1859 other->active_tc.LO[sel] = arg1;
1860 else
1861 other->tcs[other_tc].LO[sel] = arg1;
1864 void helper_mtthi(target_ulong arg1, uint32_t sel)
1866 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1867 CPUState *other = mips_cpu_map_tc(&other_tc);
1869 if (other_tc == other->current_tc)
1870 other->active_tc.HI[sel] = arg1;
1871 else
1872 other->tcs[other_tc].HI[sel] = arg1;
1875 void helper_mttacx(target_ulong arg1, uint32_t sel)
1877 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1878 CPUState *other = mips_cpu_map_tc(&other_tc);
1880 if (other_tc == other->current_tc)
1881 other->active_tc.ACX[sel] = arg1;
1882 else
1883 other->tcs[other_tc].ACX[sel] = arg1;
1886 void helper_mttdsp(target_ulong arg1)
1888 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1889 CPUState *other = mips_cpu_map_tc(&other_tc);
1891 if (other_tc == other->current_tc)
1892 other->active_tc.DSPControl = arg1;
1893 else
1894 other->tcs[other_tc].DSPControl = arg1;
1897 /* MIPS MT functions */
1898 target_ulong helper_dmt(void)
1900 // TODO
1901 return 0;
1904 target_ulong helper_emt(void)
1906 // TODO
1907 return 0;
1910 target_ulong helper_dvpe(void)
1912 CPUState *other_cpu = first_cpu;
1913 target_ulong prev = env->mvp->CP0_MVPControl;
1915 do {
1916 /* Turn off all VPEs except the one executing the dvpe. */
1917 if (other_cpu != env) {
1918 other_cpu->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1919 mips_vpe_sleep(other_cpu);
1921 other_cpu = other_cpu->next_cpu;
1922 } while (other_cpu);
1923 return prev;
1926 target_ulong helper_evpe(void)
1928 CPUState *other_cpu = first_cpu;
1929 target_ulong prev = env->mvp->CP0_MVPControl;
1931 do {
1932 if (other_cpu != env
1933 /* If the VPE is WFI, dont distrub it's sleep. */
1934 && !mips_vpe_is_wfi(other_cpu)) {
1935 /* Enable the VPE. */
1936 other_cpu->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1937 mips_vpe_wake(other_cpu); /* And wake it up. */
1939 other_cpu = other_cpu->next_cpu;
1940 } while (other_cpu);
1941 return prev;
1943 #endif /* !CONFIG_USER_ONLY */
1945 void helper_fork(target_ulong arg1, target_ulong arg2)
1947 // arg1 = rt, arg2 = rs
1948 arg1 = 0;
1949 // TODO: store to TC register
1952 target_ulong helper_yield(target_ulong arg)
1954 target_long arg1 = arg;
1956 if (arg1 < 0) {
1957 /* No scheduling policy implemented. */
1958 if (arg1 != -2) {
1959 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1960 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1961 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1962 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1963 helper_raise_exception(EXCP_THREAD);
1966 } else if (arg1 == 0) {
1967 if (0 /* TODO: TC underflow */) {
1968 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1969 helper_raise_exception(EXCP_THREAD);
1970 } else {
1971 // TODO: Deallocate TC
1973 } else if (arg1 > 0) {
1974 /* Yield qualifier inputs not implemented. */
1975 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1976 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1977 helper_raise_exception(EXCP_THREAD);
1979 return env->CP0_YQMask;
1982 #ifndef CONFIG_USER_ONLY
1983 /* TLB management */
1984 static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1986 /* Flush qemu's TLB and discard all shadowed entries. */
1987 tlb_flush (env, flush_global);
1988 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1991 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1993 /* Discard entries from env->tlb[first] onwards. */
1994 while (env->tlb->tlb_in_use > first) {
1995 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1999 static void r4k_fill_tlb (int idx)
2001 r4k_tlb_t *tlb;
2003 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
2004 tlb = &env->tlb->mmu.r4k.tlb[idx];
2005 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
2006 #if defined(TARGET_MIPS64)
2007 tlb->VPN &= env->SEGMask;
2008 #endif
2009 tlb->ASID = env->CP0_EntryHi & 0xFF;
2010 tlb->PageMask = env->CP0_PageMask;
2011 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
2012 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
2013 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
2014 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
2015 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
2016 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
2017 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
2018 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
2019 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
2022 void r4k_helper_tlbwi (void)
2024 int idx;
2026 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2028 /* Discard cached TLB entries. We could avoid doing this if the
2029 tlbwi is just upgrading access permissions on the current entry;
2030 that might be a further win. */
2031 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
2033 r4k_invalidate_tlb(env, idx, 0);
2034 r4k_fill_tlb(idx);
2037 void r4k_helper_tlbwr (void)
2039 int r = cpu_mips_get_random(env);
2041 r4k_invalidate_tlb(env, r, 1);
2042 r4k_fill_tlb(r);
2045 void r4k_helper_tlbp (void)
2047 r4k_tlb_t *tlb;
2048 target_ulong mask;
2049 target_ulong tag;
2050 target_ulong VPN;
2051 uint8_t ASID;
2052 int i;
2054 ASID = env->CP0_EntryHi & 0xFF;
2055 for (i = 0; i < env->tlb->nb_tlb; i++) {
2056 tlb = &env->tlb->mmu.r4k.tlb[i];
2057 /* 1k pages are not supported. */
2058 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2059 tag = env->CP0_EntryHi & ~mask;
2060 VPN = tlb->VPN & ~mask;
2061 /* Check ASID, virtual page number & size */
2062 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2063 /* TLB match */
2064 env->CP0_Index = i;
2065 break;
2068 if (i == env->tlb->nb_tlb) {
2069 /* No match. Discard any shadow entries, if any of them match. */
2070 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
2071 tlb = &env->tlb->mmu.r4k.tlb[i];
2072 /* 1k pages are not supported. */
2073 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2074 tag = env->CP0_EntryHi & ~mask;
2075 VPN = tlb->VPN & ~mask;
2076 /* Check ASID, virtual page number & size */
2077 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2078 r4k_mips_tlb_flush_extra (env, i);
2079 break;
2083 env->CP0_Index |= 0x80000000;
2087 void r4k_helper_tlbr (void)
2089 r4k_tlb_t *tlb;
2090 uint8_t ASID;
2091 int idx;
2093 ASID = env->CP0_EntryHi & 0xFF;
2094 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2095 tlb = &env->tlb->mmu.r4k.tlb[idx];
2097 /* If this will change the current ASID, flush qemu's TLB. */
2098 if (ASID != tlb->ASID)
2099 cpu_mips_tlb_flush (env, 1);
2101 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2103 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2104 env->CP0_PageMask = tlb->PageMask;
2105 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2106 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
2107 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2108 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
2111 void helper_tlbwi(void)
2113 env->tlb->helper_tlbwi();
2116 void helper_tlbwr(void)
2118 env->tlb->helper_tlbwr();
2121 void helper_tlbp(void)
2123 env->tlb->helper_tlbp();
2126 void helper_tlbr(void)
2128 env->tlb->helper_tlbr();
2131 /* Specials */
2132 target_ulong helper_di (void)
2134 target_ulong t0 = env->CP0_Status;
2136 env->CP0_Status = t0 & ~(1 << CP0St_IE);
2137 return t0;
2140 target_ulong helper_ei (void)
2142 target_ulong t0 = env->CP0_Status;
2144 env->CP0_Status = t0 | (1 << CP0St_IE);
2145 return t0;
2148 static void debug_pre_eret (void)
2150 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2151 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2152 env->active_tc.PC, env->CP0_EPC);
2153 if (env->CP0_Status & (1 << CP0St_ERL))
2154 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2155 if (env->hflags & MIPS_HFLAG_DM)
2156 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2157 qemu_log("\n");
2161 static void debug_post_eret (void)
2163 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2164 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2165 env->active_tc.PC, env->CP0_EPC);
2166 if (env->CP0_Status & (1 << CP0St_ERL))
2167 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2168 if (env->hflags & MIPS_HFLAG_DM)
2169 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2170 switch (env->hflags & MIPS_HFLAG_KSU) {
2171 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2172 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2173 case MIPS_HFLAG_KM: qemu_log("\n"); break;
2174 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
2179 static void set_pc (target_ulong error_pc)
2181 env->active_tc.PC = error_pc & ~(target_ulong)1;
2182 if (error_pc & 1) {
2183 env->hflags |= MIPS_HFLAG_M16;
2184 } else {
2185 env->hflags &= ~(MIPS_HFLAG_M16);
2189 void helper_eret (void)
2191 debug_pre_eret();
2192 if (env->CP0_Status & (1 << CP0St_ERL)) {
2193 set_pc(env->CP0_ErrorEPC);
2194 env->CP0_Status &= ~(1 << CP0St_ERL);
2195 } else {
2196 set_pc(env->CP0_EPC);
2197 env->CP0_Status &= ~(1 << CP0St_EXL);
2199 compute_hflags(env);
2200 debug_post_eret();
2201 env->lladdr = 1;
2204 void helper_deret (void)
2206 debug_pre_eret();
2207 set_pc(env->CP0_DEPC);
2209 env->hflags &= MIPS_HFLAG_DM;
2210 compute_hflags(env);
2211 debug_post_eret();
2212 env->lladdr = 1;
2214 #endif /* !CONFIG_USER_ONLY */
2216 target_ulong helper_rdhwr_cpunum(void)
2218 if ((env->hflags & MIPS_HFLAG_CP0) ||
2219 (env->CP0_HWREna & (1 << 0)))
2220 return env->CP0_EBase & 0x3ff;
2221 else
2222 helper_raise_exception(EXCP_RI);
2224 return 0;
2227 target_ulong helper_rdhwr_synci_step(void)
2229 if ((env->hflags & MIPS_HFLAG_CP0) ||
2230 (env->CP0_HWREna & (1 << 1)))
2231 return env->SYNCI_Step;
2232 else
2233 helper_raise_exception(EXCP_RI);
2235 return 0;
2238 target_ulong helper_rdhwr_cc(void)
2240 if ((env->hflags & MIPS_HFLAG_CP0) ||
2241 (env->CP0_HWREna & (1 << 2)))
2242 return env->CP0_Count;
2243 else
2244 helper_raise_exception(EXCP_RI);
2246 return 0;
2249 target_ulong helper_rdhwr_ccres(void)
2251 if ((env->hflags & MIPS_HFLAG_CP0) ||
2252 (env->CP0_HWREna & (1 << 3)))
2253 return env->CCRes;
2254 else
2255 helper_raise_exception(EXCP_RI);
2257 return 0;
2260 void helper_pmon (int function)
2262 function /= 2;
2263 switch (function) {
2264 case 2: /* TODO: char inbyte(int waitflag); */
2265 if (env->active_tc.gpr[4] == 0)
2266 env->active_tc.gpr[2] = -1;
2267 /* Fall through */
2268 case 11: /* TODO: char inbyte (void); */
2269 env->active_tc.gpr[2] = -1;
2270 break;
2271 case 3:
2272 case 12:
2273 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2274 break;
2275 case 17:
2276 break;
2277 case 158:
2279 unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
2280 printf("%s", fmt);
2282 break;
2286 void helper_wait (void)
2288 env->halted = 1;
2289 cpu_reset_interrupt(env, CPU_INTERRUPT_WAKE);
2290 helper_raise_exception(EXCP_HLT);
2293 #if !defined(CONFIG_USER_ONLY)
2295 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
2297 #define MMUSUFFIX _mmu
2298 #define ALIGNED_ONLY
2300 #define SHIFT 0
2301 #include "softmmu_template.h"
2303 #define SHIFT 1
2304 #include "softmmu_template.h"
2306 #define SHIFT 2
2307 #include "softmmu_template.h"
2309 #define SHIFT 3
2310 #include "softmmu_template.h"
2312 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
2314 env->CP0_BadVAddr = addr;
2315 do_restore_state (retaddr);
2316 helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2319 void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
2320 void *retaddr)
2322 TranslationBlock *tb;
2323 CPUState *saved_env;
2324 unsigned long pc;
2325 int ret;
2327 saved_env = env;
2328 env = env1;
2329 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2330 if (ret) {
2331 if (retaddr) {
2332 /* now we have a real cpu fault */
2333 pc = (unsigned long)retaddr;
2334 tb = tb_find_pc(pc);
2335 if (tb) {
2336 /* the PC is inside the translated code. It means that we have
2337 a virtual CPU fault */
2338 cpu_restore_state(tb, env, pc);
2341 helper_raise_exception_err(env->exception_index, env->error_code);
2343 env = saved_env;
2346 void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
2347 int is_write, int is_exec, int unused, int size)
2349 env = env1;
2351 if (is_exec)
2352 helper_raise_exception(EXCP_IBE);
2353 else
2354 helper_raise_exception(EXCP_DBE);
2356 #endif /* !CONFIG_USER_ONLY */
2358 /* Complex FPU operations which may need stack space. */
2360 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
2361 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2362 #define FLOAT_TWO32 make_float32(1 << 30)
2363 #define FLOAT_TWO64 make_float64(1ULL << 62)
2364 #define FLOAT_QNAN32 0x7fbfffff
2365 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2366 #define FLOAT_SNAN32 0x7fffffff
2367 #define FLOAT_SNAN64 0x7fffffffffffffffULL
2369 /* convert MIPS rounding mode in FCR31 to IEEE library */
2370 static unsigned int ieee_rm[] = {
2371 float_round_nearest_even,
2372 float_round_to_zero,
2373 float_round_up,
2374 float_round_down
2377 #define RESTORE_ROUNDING_MODE \
2378 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2380 #define RESTORE_FLUSH_MODE \
2381 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2383 target_ulong helper_cfc1 (uint32_t reg)
2385 target_ulong arg1;
2387 switch (reg) {
2388 case 0:
2389 arg1 = (int32_t)env->active_fpu.fcr0;
2390 break;
2391 case 25:
2392 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2393 break;
2394 case 26:
2395 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2396 break;
2397 case 28:
2398 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2399 break;
2400 default:
2401 arg1 = (int32_t)env->active_fpu.fcr31;
2402 break;
2405 return arg1;
2408 void helper_ctc1 (target_ulong arg1, uint32_t reg)
2410 switch(reg) {
2411 case 25:
2412 if (arg1 & 0xffffff00)
2413 return;
2414 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2415 ((arg1 & 0x1) << 23);
2416 break;
2417 case 26:
2418 if (arg1 & 0x007c0000)
2419 return;
2420 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2421 break;
2422 case 28:
2423 if (arg1 & 0x007c0000)
2424 return;
2425 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2426 ((arg1 & 0x4) << 22);
2427 break;
2428 case 31:
2429 if (arg1 & 0x007c0000)
2430 return;
2431 env->active_fpu.fcr31 = arg1;
2432 break;
2433 default:
2434 return;
2436 /* set rounding mode */
2437 RESTORE_ROUNDING_MODE;
2438 /* set flush-to-zero mode */
2439 RESTORE_FLUSH_MODE;
2440 set_float_exception_flags(0, &env->active_fpu.fp_status);
2441 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2442 helper_raise_exception(EXCP_FPE);
2445 static inline int ieee_ex_to_mips(int xcpt)
2447 int ret = 0;
2448 if (xcpt) {
2449 if (xcpt & float_flag_invalid) {
2450 ret |= FP_INVALID;
2452 if (xcpt & float_flag_overflow) {
2453 ret |= FP_OVERFLOW;
2455 if (xcpt & float_flag_underflow) {
2456 ret |= FP_UNDERFLOW;
2458 if (xcpt & float_flag_divbyzero) {
2459 ret |= FP_DIV0;
2461 if (xcpt & float_flag_inexact) {
2462 ret |= FP_INEXACT;
2465 return ret;
2468 static inline void update_fcr31(void)
2470 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2472 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2473 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2474 helper_raise_exception(EXCP_FPE);
2475 else
2476 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2479 /* Float support.
2480 Single precition routines have a "s" suffix, double precision a
2481 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2482 paired single lower "pl", paired single upper "pu". */
2484 /* unary operations, modifying fp status */
2485 uint64_t helper_float_sqrt_d(uint64_t fdt0)
2487 return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2490 uint32_t helper_float_sqrt_s(uint32_t fst0)
2492 return float32_sqrt(fst0, &env->active_fpu.fp_status);
2495 uint64_t helper_float_cvtd_s(uint32_t fst0)
2497 uint64_t fdt2;
2499 set_float_exception_flags(0, &env->active_fpu.fp_status);
2500 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2501 update_fcr31();
2502 return fdt2;
2505 uint64_t helper_float_cvtd_w(uint32_t wt0)
2507 uint64_t fdt2;
2509 set_float_exception_flags(0, &env->active_fpu.fp_status);
2510 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2511 update_fcr31();
2512 return fdt2;
2515 uint64_t helper_float_cvtd_l(uint64_t dt0)
2517 uint64_t fdt2;
2519 set_float_exception_flags(0, &env->active_fpu.fp_status);
2520 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2521 update_fcr31();
2522 return fdt2;
2525 uint64_t helper_float_cvtl_d(uint64_t fdt0)
2527 uint64_t dt2;
2529 set_float_exception_flags(0, &env->active_fpu.fp_status);
2530 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2531 update_fcr31();
2532 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2533 dt2 = FLOAT_SNAN64;
2534 return dt2;
2537 uint64_t helper_float_cvtl_s(uint32_t fst0)
2539 uint64_t dt2;
2541 set_float_exception_flags(0, &env->active_fpu.fp_status);
2542 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2543 update_fcr31();
2544 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2545 dt2 = FLOAT_SNAN64;
2546 return dt2;
2549 uint64_t helper_float_cvtps_pw(uint64_t dt0)
2551 uint32_t fst2;
2552 uint32_t fsth2;
2554 set_float_exception_flags(0, &env->active_fpu.fp_status);
2555 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2556 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2557 update_fcr31();
2558 return ((uint64_t)fsth2 << 32) | fst2;
2561 uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2563 uint32_t wt2;
2564 uint32_t wth2;
2566 set_float_exception_flags(0, &env->active_fpu.fp_status);
2567 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2568 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2569 update_fcr31();
2570 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2571 wt2 = FLOAT_SNAN32;
2572 wth2 = FLOAT_SNAN32;
2574 return ((uint64_t)wth2 << 32) | wt2;
2577 uint32_t helper_float_cvts_d(uint64_t fdt0)
2579 uint32_t fst2;
2581 set_float_exception_flags(0, &env->active_fpu.fp_status);
2582 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2583 update_fcr31();
2584 return fst2;
2587 uint32_t helper_float_cvts_w(uint32_t wt0)
2589 uint32_t fst2;
2591 set_float_exception_flags(0, &env->active_fpu.fp_status);
2592 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2593 update_fcr31();
2594 return fst2;
2597 uint32_t helper_float_cvts_l(uint64_t dt0)
2599 uint32_t fst2;
2601 set_float_exception_flags(0, &env->active_fpu.fp_status);
2602 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2603 update_fcr31();
2604 return fst2;
2607 uint32_t helper_float_cvts_pl(uint32_t wt0)
2609 uint32_t wt2;
2611 set_float_exception_flags(0, &env->active_fpu.fp_status);
2612 wt2 = wt0;
2613 update_fcr31();
2614 return wt2;
2617 uint32_t helper_float_cvts_pu(uint32_t wth0)
2619 uint32_t wt2;
2621 set_float_exception_flags(0, &env->active_fpu.fp_status);
2622 wt2 = wth0;
2623 update_fcr31();
2624 return wt2;
2627 uint32_t helper_float_cvtw_s(uint32_t fst0)
2629 uint32_t wt2;
2631 set_float_exception_flags(0, &env->active_fpu.fp_status);
2632 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2633 update_fcr31();
2634 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2635 wt2 = FLOAT_SNAN32;
2636 return wt2;
2639 uint32_t helper_float_cvtw_d(uint64_t fdt0)
2641 uint32_t wt2;
2643 set_float_exception_flags(0, &env->active_fpu.fp_status);
2644 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2645 update_fcr31();
2646 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2647 wt2 = FLOAT_SNAN32;
2648 return wt2;
2651 uint64_t helper_float_roundl_d(uint64_t fdt0)
2653 uint64_t dt2;
2655 set_float_exception_flags(0, &env->active_fpu.fp_status);
2656 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2657 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2658 RESTORE_ROUNDING_MODE;
2659 update_fcr31();
2660 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2661 dt2 = FLOAT_SNAN64;
2662 return dt2;
2665 uint64_t helper_float_roundl_s(uint32_t fst0)
2667 uint64_t dt2;
2669 set_float_exception_flags(0, &env->active_fpu.fp_status);
2670 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2671 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2672 RESTORE_ROUNDING_MODE;
2673 update_fcr31();
2674 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2675 dt2 = FLOAT_SNAN64;
2676 return dt2;
2679 uint32_t helper_float_roundw_d(uint64_t fdt0)
2681 uint32_t wt2;
2683 set_float_exception_flags(0, &env->active_fpu.fp_status);
2684 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2685 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2686 RESTORE_ROUNDING_MODE;
2687 update_fcr31();
2688 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2689 wt2 = FLOAT_SNAN32;
2690 return wt2;
2693 uint32_t helper_float_roundw_s(uint32_t fst0)
2695 uint32_t wt2;
2697 set_float_exception_flags(0, &env->active_fpu.fp_status);
2698 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2699 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2700 RESTORE_ROUNDING_MODE;
2701 update_fcr31();
2702 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2703 wt2 = FLOAT_SNAN32;
2704 return wt2;
2707 uint64_t helper_float_truncl_d(uint64_t fdt0)
2709 uint64_t dt2;
2711 set_float_exception_flags(0, &env->active_fpu.fp_status);
2712 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2713 update_fcr31();
2714 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2715 dt2 = FLOAT_SNAN64;
2716 return dt2;
2719 uint64_t helper_float_truncl_s(uint32_t fst0)
2721 uint64_t dt2;
2723 set_float_exception_flags(0, &env->active_fpu.fp_status);
2724 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2725 update_fcr31();
2726 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2727 dt2 = FLOAT_SNAN64;
2728 return dt2;
2731 uint32_t helper_float_truncw_d(uint64_t fdt0)
2733 uint32_t wt2;
2735 set_float_exception_flags(0, &env->active_fpu.fp_status);
2736 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2737 update_fcr31();
2738 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2739 wt2 = FLOAT_SNAN32;
2740 return wt2;
2743 uint32_t helper_float_truncw_s(uint32_t fst0)
2745 uint32_t wt2;
2747 set_float_exception_flags(0, &env->active_fpu.fp_status);
2748 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2749 update_fcr31();
2750 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2751 wt2 = FLOAT_SNAN32;
2752 return wt2;
2755 uint64_t helper_float_ceill_d(uint64_t fdt0)
2757 uint64_t dt2;
2759 set_float_exception_flags(0, &env->active_fpu.fp_status);
2760 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2761 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2762 RESTORE_ROUNDING_MODE;
2763 update_fcr31();
2764 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2765 dt2 = FLOAT_SNAN64;
2766 return dt2;
2769 uint64_t helper_float_ceill_s(uint32_t fst0)
2771 uint64_t dt2;
2773 set_float_exception_flags(0, &env->active_fpu.fp_status);
2774 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2775 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2776 RESTORE_ROUNDING_MODE;
2777 update_fcr31();
2778 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2779 dt2 = FLOAT_SNAN64;
2780 return dt2;
2783 uint32_t helper_float_ceilw_d(uint64_t fdt0)
2785 uint32_t wt2;
2787 set_float_exception_flags(0, &env->active_fpu.fp_status);
2788 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2789 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2790 RESTORE_ROUNDING_MODE;
2791 update_fcr31();
2792 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2793 wt2 = FLOAT_SNAN32;
2794 return wt2;
2797 uint32_t helper_float_ceilw_s(uint32_t fst0)
2799 uint32_t wt2;
2801 set_float_exception_flags(0, &env->active_fpu.fp_status);
2802 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2803 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2804 RESTORE_ROUNDING_MODE;
2805 update_fcr31();
2806 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2807 wt2 = FLOAT_SNAN32;
2808 return wt2;
2811 uint64_t helper_float_floorl_d(uint64_t fdt0)
2813 uint64_t dt2;
2815 set_float_exception_flags(0, &env->active_fpu.fp_status);
2816 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2817 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2818 RESTORE_ROUNDING_MODE;
2819 update_fcr31();
2820 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2821 dt2 = FLOAT_SNAN64;
2822 return dt2;
2825 uint64_t helper_float_floorl_s(uint32_t fst0)
2827 uint64_t dt2;
2829 set_float_exception_flags(0, &env->active_fpu.fp_status);
2830 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2831 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2832 RESTORE_ROUNDING_MODE;
2833 update_fcr31();
2834 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2835 dt2 = FLOAT_SNAN64;
2836 return dt2;
2839 uint32_t helper_float_floorw_d(uint64_t fdt0)
2841 uint32_t wt2;
2843 set_float_exception_flags(0, &env->active_fpu.fp_status);
2844 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2845 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2846 RESTORE_ROUNDING_MODE;
2847 update_fcr31();
2848 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2849 wt2 = FLOAT_SNAN32;
2850 return wt2;
2853 uint32_t helper_float_floorw_s(uint32_t fst0)
2855 uint32_t wt2;
2857 set_float_exception_flags(0, &env->active_fpu.fp_status);
2858 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2859 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2860 RESTORE_ROUNDING_MODE;
2861 update_fcr31();
2862 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2863 wt2 = FLOAT_SNAN32;
2864 return wt2;
2867 /* unary operations, not modifying fp status */
2868 #define FLOAT_UNOP(name) \
2869 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2871 return float64_ ## name(fdt0); \
2873 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2875 return float32_ ## name(fst0); \
2877 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2879 uint32_t wt0; \
2880 uint32_t wth0; \
2882 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2883 wth0 = float32_ ## name(fdt0 >> 32); \
2884 return ((uint64_t)wth0 << 32) | wt0; \
2886 FLOAT_UNOP(abs)
2887 FLOAT_UNOP(chs)
2888 #undef FLOAT_UNOP
2890 /* MIPS specific unary operations */
2891 uint64_t helper_float_recip_d(uint64_t fdt0)
2893 uint64_t fdt2;
2895 set_float_exception_flags(0, &env->active_fpu.fp_status);
2896 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2897 update_fcr31();
2898 return fdt2;
2901 uint32_t helper_float_recip_s(uint32_t fst0)
2903 uint32_t fst2;
2905 set_float_exception_flags(0, &env->active_fpu.fp_status);
2906 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2907 update_fcr31();
2908 return fst2;
2911 uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2913 uint64_t fdt2;
2915 set_float_exception_flags(0, &env->active_fpu.fp_status);
2916 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2917 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2918 update_fcr31();
2919 return fdt2;
2922 uint32_t helper_float_rsqrt_s(uint32_t fst0)
2924 uint32_t fst2;
2926 set_float_exception_flags(0, &env->active_fpu.fp_status);
2927 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2928 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2929 update_fcr31();
2930 return fst2;
2933 uint64_t helper_float_recip1_d(uint64_t fdt0)
2935 uint64_t fdt2;
2937 set_float_exception_flags(0, &env->active_fpu.fp_status);
2938 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2939 update_fcr31();
2940 return fdt2;
2943 uint32_t helper_float_recip1_s(uint32_t fst0)
2945 uint32_t fst2;
2947 set_float_exception_flags(0, &env->active_fpu.fp_status);
2948 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2949 update_fcr31();
2950 return fst2;
2953 uint64_t helper_float_recip1_ps(uint64_t fdt0)
2955 uint32_t fst2;
2956 uint32_t fsth2;
2958 set_float_exception_flags(0, &env->active_fpu.fp_status);
2959 fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2960 fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2961 update_fcr31();
2962 return ((uint64_t)fsth2 << 32) | fst2;
2965 uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2967 uint64_t fdt2;
2969 set_float_exception_flags(0, &env->active_fpu.fp_status);
2970 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2971 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2972 update_fcr31();
2973 return fdt2;
2976 uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2978 uint32_t fst2;
2980 set_float_exception_flags(0, &env->active_fpu.fp_status);
2981 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2982 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2983 update_fcr31();
2984 return fst2;
2987 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2989 uint32_t fst2;
2990 uint32_t fsth2;
2992 set_float_exception_flags(0, &env->active_fpu.fp_status);
2993 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2994 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2995 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2996 fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2997 update_fcr31();
2998 return ((uint64_t)fsth2 << 32) | fst2;
3001 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
3003 /* binary operations */
3004 #define FLOAT_BINOP(name) \
3005 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
3007 uint64_t dt2; \
3009 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3010 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
3011 update_fcr31(); \
3012 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3013 dt2 = FLOAT_QNAN64; \
3014 return dt2; \
3017 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
3019 uint32_t wt2; \
3021 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3022 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3023 update_fcr31(); \
3024 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3025 wt2 = FLOAT_QNAN32; \
3026 return wt2; \
3029 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
3031 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3032 uint32_t fsth0 = fdt0 >> 32; \
3033 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3034 uint32_t fsth1 = fdt1 >> 32; \
3035 uint32_t wt2; \
3036 uint32_t wth2; \
3038 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3039 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3040 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
3041 update_fcr31(); \
3042 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
3043 wt2 = FLOAT_QNAN32; \
3044 wth2 = FLOAT_QNAN32; \
3046 return ((uint64_t)wth2 << 32) | wt2; \
3049 FLOAT_BINOP(add)
3050 FLOAT_BINOP(sub)
3051 FLOAT_BINOP(mul)
3052 FLOAT_BINOP(div)
3053 #undef FLOAT_BINOP
3055 /* ternary operations */
3056 #define FLOAT_TERNOP(name1, name2) \
3057 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3058 uint64_t fdt2) \
3060 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3061 return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3064 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3065 uint32_t fst2) \
3067 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3068 return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3071 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
3072 uint64_t fdt2) \
3074 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3075 uint32_t fsth0 = fdt0 >> 32; \
3076 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3077 uint32_t fsth1 = fdt1 >> 32; \
3078 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3079 uint32_t fsth2 = fdt2 >> 32; \
3081 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3082 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3083 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3084 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3085 return ((uint64_t)fsth2 << 32) | fst2; \
3088 FLOAT_TERNOP(mul, add)
3089 FLOAT_TERNOP(mul, sub)
3090 #undef FLOAT_TERNOP
3092 /* negated ternary operations */
3093 #define FLOAT_NTERNOP(name1, name2) \
3094 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3095 uint64_t fdt2) \
3097 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3098 fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3099 return float64_chs(fdt2); \
3102 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3103 uint32_t fst2) \
3105 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3106 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3107 return float32_chs(fst2); \
3110 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
3111 uint64_t fdt2) \
3113 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3114 uint32_t fsth0 = fdt0 >> 32; \
3115 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3116 uint32_t fsth1 = fdt1 >> 32; \
3117 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3118 uint32_t fsth2 = fdt2 >> 32; \
3120 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3121 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3122 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3123 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3124 fst2 = float32_chs(fst2); \
3125 fsth2 = float32_chs(fsth2); \
3126 return ((uint64_t)fsth2 << 32) | fst2; \
3129 FLOAT_NTERNOP(mul, add)
3130 FLOAT_NTERNOP(mul, sub)
3131 #undef FLOAT_NTERNOP
3133 /* MIPS specific binary operations */
3134 uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
3136 set_float_exception_flags(0, &env->active_fpu.fp_status);
3137 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3138 fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
3139 update_fcr31();
3140 return fdt2;
3143 uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
3145 set_float_exception_flags(0, &env->active_fpu.fp_status);
3146 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3147 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3148 update_fcr31();
3149 return fst2;
3152 uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
3154 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3155 uint32_t fsth0 = fdt0 >> 32;
3156 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3157 uint32_t fsth2 = fdt2 >> 32;
3159 set_float_exception_flags(0, &env->active_fpu.fp_status);
3160 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3161 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3162 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3163 fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
3164 update_fcr31();
3165 return ((uint64_t)fsth2 << 32) | fst2;
3168 uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
3170 set_float_exception_flags(0, &env->active_fpu.fp_status);
3171 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3172 fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
3173 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3174 update_fcr31();
3175 return fdt2;
3178 uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
3180 set_float_exception_flags(0, &env->active_fpu.fp_status);
3181 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3182 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3183 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3184 update_fcr31();
3185 return fst2;
3188 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
3190 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3191 uint32_t fsth0 = fdt0 >> 32;
3192 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3193 uint32_t fsth2 = fdt2 >> 32;
3195 set_float_exception_flags(0, &env->active_fpu.fp_status);
3196 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3197 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3198 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3199 fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3200 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3201 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3202 update_fcr31();
3203 return ((uint64_t)fsth2 << 32) | fst2;
3206 uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
3208 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3209 uint32_t fsth0 = fdt0 >> 32;
3210 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3211 uint32_t fsth1 = fdt1 >> 32;
3212 uint32_t fst2;
3213 uint32_t fsth2;
3215 set_float_exception_flags(0, &env->active_fpu.fp_status);
3216 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3217 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3218 update_fcr31();
3219 return ((uint64_t)fsth2 << 32) | fst2;
3222 uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
3224 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3225 uint32_t fsth0 = fdt0 >> 32;
3226 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3227 uint32_t fsth1 = fdt1 >> 32;
3228 uint32_t fst2;
3229 uint32_t fsth2;
3231 set_float_exception_flags(0, &env->active_fpu.fp_status);
3232 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3233 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3234 update_fcr31();
3235 return ((uint64_t)fsth2 << 32) | fst2;
3238 /* compare operations */
3239 #define FOP_COND_D(op, cond) \
3240 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3242 int c; \
3243 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3244 c = cond; \
3245 update_fcr31(); \
3246 if (c) \
3247 SET_FP_COND(cc, env->active_fpu); \
3248 else \
3249 CLEAR_FP_COND(cc, env->active_fpu); \
3251 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3253 int c; \
3254 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3255 fdt0 = float64_abs(fdt0); \
3256 fdt1 = float64_abs(fdt1); \
3257 c = cond; \
3258 update_fcr31(); \
3259 if (c) \
3260 SET_FP_COND(cc, env->active_fpu); \
3261 else \
3262 CLEAR_FP_COND(cc, env->active_fpu); \
3265 /* NOTE: the comma operator will make "cond" to eval to false,
3266 * but float64_unordered_quiet() is still called. */
3267 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3268 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3269 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3270 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3271 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3272 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3273 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3274 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3275 /* NOTE: the comma operator will make "cond" to eval to false,
3276 * but float64_unordered() is still called. */
3277 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3278 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3279 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3280 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3281 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3282 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3283 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3284 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3286 #define FOP_COND_S(op, cond) \
3287 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3289 int c; \
3290 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3291 c = cond; \
3292 update_fcr31(); \
3293 if (c) \
3294 SET_FP_COND(cc, env->active_fpu); \
3295 else \
3296 CLEAR_FP_COND(cc, env->active_fpu); \
3298 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3300 int c; \
3301 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3302 fst0 = float32_abs(fst0); \
3303 fst1 = float32_abs(fst1); \
3304 c = cond; \
3305 update_fcr31(); \
3306 if (c) \
3307 SET_FP_COND(cc, env->active_fpu); \
3308 else \
3309 CLEAR_FP_COND(cc, env->active_fpu); \
3312 /* NOTE: the comma operator will make "cond" to eval to false,
3313 * but float32_unordered_quiet() is still called. */
3314 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3315 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3316 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3317 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3318 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3319 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3320 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3321 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3322 /* NOTE: the comma operator will make "cond" to eval to false,
3323 * but float32_unordered() is still called. */
3324 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3325 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3326 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3327 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3328 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3329 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3330 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
3331 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3333 #define FOP_COND_PS(op, condl, condh) \
3334 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3336 uint32_t fst0, fsth0, fst1, fsth1; \
3337 int ch, cl; \
3338 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3339 fst0 = fdt0 & 0XFFFFFFFF; \
3340 fsth0 = fdt0 >> 32; \
3341 fst1 = fdt1 & 0XFFFFFFFF; \
3342 fsth1 = fdt1 >> 32; \
3343 cl = condl; \
3344 ch = condh; \
3345 update_fcr31(); \
3346 if (cl) \
3347 SET_FP_COND(cc, env->active_fpu); \
3348 else \
3349 CLEAR_FP_COND(cc, env->active_fpu); \
3350 if (ch) \
3351 SET_FP_COND(cc + 1, env->active_fpu); \
3352 else \
3353 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3355 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3357 uint32_t fst0, fsth0, fst1, fsth1; \
3358 int ch, cl; \
3359 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3360 fsth0 = float32_abs(fdt0 >> 32); \
3361 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3362 fsth1 = float32_abs(fdt1 >> 32); \
3363 cl = condl; \
3364 ch = condh; \
3365 update_fcr31(); \
3366 if (cl) \
3367 SET_FP_COND(cc, env->active_fpu); \
3368 else \
3369 CLEAR_FP_COND(cc, env->active_fpu); \
3370 if (ch) \
3371 SET_FP_COND(cc + 1, env->active_fpu); \
3372 else \
3373 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3376 /* NOTE: the comma operator will make "cond" to eval to false,
3377 * but float32_unordered_quiet() is still called. */
3378 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3379 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3380 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3381 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3382 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3383 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3384 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3385 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3386 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3387 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3388 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3389 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3390 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3391 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3392 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3393 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3394 /* NOTE: the comma operator will make "cond" to eval to false,
3395 * but float32_unordered() is still called. */
3396 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3397 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3398 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3399 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3400 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3401 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3402 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3403 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3404 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3405 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3406 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3407 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3408 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
3409 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3410 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3411 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))