mips: Correct IntCtl write mask for VInt
[qemu/cris-port.git] / target-mips / op_helper.c
blob03a4f1839ae615eee5303e95becd065ab5782fd4
1 /*
2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include "cpu.h"
21 #include "dyngen-exec.h"
23 #include "host-utils.h"
25 #include "helper.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 #ifndef CONFIG_USER_ONLY
32 static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
33 #endif
35 static inline void compute_hflags(CPUState *env)
37 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
39 MIPS_HFLAG_UX);
40 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41 !(env->CP0_Status & (1 << CP0St_ERL)) &&
42 !(env->hflags & MIPS_HFLAG_DM)) {
43 env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
45 #if defined(TARGET_MIPS64)
46 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47 (env->CP0_Status & (1 << CP0St_PX)) ||
48 (env->CP0_Status & (1 << CP0St_UX))) {
49 env->hflags |= MIPS_HFLAG_64;
51 if (env->CP0_Status & (1 << CP0St_UX)) {
52 env->hflags |= MIPS_HFLAG_UX;
54 #endif
55 if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56 !(env->hflags & MIPS_HFLAG_KSU)) {
57 env->hflags |= MIPS_HFLAG_CP0;
59 if (env->CP0_Status & (1 << CP0St_CU1)) {
60 env->hflags |= MIPS_HFLAG_FPU;
62 if (env->CP0_Status & (1 << CP0St_FR)) {
63 env->hflags |= MIPS_HFLAG_F64;
65 if (env->insn_flags & ISA_MIPS32R2) {
66 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67 env->hflags |= MIPS_HFLAG_COP1X;
69 } else if (env->insn_flags & ISA_MIPS32) {
70 if (env->hflags & MIPS_HFLAG_64) {
71 env->hflags |= MIPS_HFLAG_COP1X;
73 } else if (env->insn_flags & ISA_MIPS4) {
74 /* All supported MIPS IV CPUs use the XX (CU3) to enable
75 and disable the MIPS IV extensions to the MIPS III ISA.
76 Some other MIPS IV CPUs ignore the bit, so the check here
77 would be too restrictive for them. */
78 if (env->CP0_Status & (1 << CP0St_CU3)) {
79 env->hflags |= MIPS_HFLAG_COP1X;
84 /*****************************************************************************/
85 /* Exceptions processing helpers */
87 void helper_raise_exception_err (uint32_t exception, int error_code)
89 #if 1
90 if (exception < 0x100)
91 qemu_log("%s: %d %d\n", __func__, exception, error_code);
92 #endif
93 env->exception_index = exception;
94 env->error_code = error_code;
95 cpu_loop_exit(env);
98 void helper_raise_exception (uint32_t exception)
100 helper_raise_exception_err(exception, 0);
103 #if !defined(CONFIG_USER_ONLY)
104 static void do_restore_state (void *pc_ptr)
106 TranslationBlock *tb;
107 unsigned long pc = (unsigned long) pc_ptr;
109 tb = tb_find_pc (pc);
110 if (tb) {
111 cpu_restore_state(tb, env, pc);
114 #endif
116 #if defined(CONFIG_USER_ONLY)
117 #define HELPER_LD(name, insn, type) \
118 static inline type do_##name(target_ulong addr, int mem_idx) \
120 return (type) insn##_raw(addr); \
122 #else
123 #define HELPER_LD(name, insn, type) \
124 static inline type do_##name(target_ulong addr, int mem_idx) \
126 switch (mem_idx) \
128 case 0: return (type) insn##_kernel(addr); break; \
129 case 1: return (type) insn##_super(addr); break; \
130 default: \
131 case 2: return (type) insn##_user(addr); break; \
134 #endif
135 HELPER_LD(lbu, ldub, uint8_t)
136 HELPER_LD(lw, ldl, int32_t)
137 #ifdef TARGET_MIPS64
138 HELPER_LD(ld, ldq, int64_t)
139 #endif
140 #undef HELPER_LD
142 #if defined(CONFIG_USER_ONLY)
143 #define HELPER_ST(name, insn, type) \
144 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
146 insn##_raw(addr, val); \
148 #else
149 #define HELPER_ST(name, insn, type) \
150 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
152 switch (mem_idx) \
154 case 0: insn##_kernel(addr, val); break; \
155 case 1: insn##_super(addr, val); break; \
156 default: \
157 case 2: insn##_user(addr, val); break; \
160 #endif
161 HELPER_ST(sb, stb, uint8_t)
162 HELPER_ST(sw, stl, uint32_t)
163 #ifdef TARGET_MIPS64
164 HELPER_ST(sd, stq, uint64_t)
165 #endif
166 #undef HELPER_ST
168 target_ulong helper_clo (target_ulong arg1)
170 return clo32(arg1);
173 target_ulong helper_clz (target_ulong arg1)
175 return clz32(arg1);
178 #if defined(TARGET_MIPS64)
179 target_ulong helper_dclo (target_ulong arg1)
181 return clo64(arg1);
184 target_ulong helper_dclz (target_ulong arg1)
186 return clz64(arg1);
188 #endif /* TARGET_MIPS64 */
190 /* 64 bits arithmetic for 32 bits hosts */
191 static inline uint64_t get_HILO (void)
193 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
196 static inline void set_HILO (uint64_t HILO)
198 env->active_tc.LO[0] = (int32_t)HILO;
199 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
202 static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
204 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
205 arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
208 static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
210 arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
211 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
214 /* Multiplication variants of the vr54xx. */
215 target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
217 set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
219 return arg1;
222 target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
224 set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
226 return arg1;
229 target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
231 set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
233 return arg1;
236 target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
238 set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
240 return arg1;
243 target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
245 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
247 return arg1;
250 target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
252 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
254 return arg1;
257 target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
259 set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
261 return arg1;
264 target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
266 set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
268 return arg1;
271 target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
273 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
275 return arg1;
278 target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
280 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
282 return arg1;
285 target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
287 set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
289 return arg1;
292 target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
294 set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
296 return arg1;
299 target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
301 set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
303 return arg1;
306 target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
308 set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
310 return arg1;
313 #ifdef TARGET_MIPS64
314 void helper_dmult (target_ulong arg1, target_ulong arg2)
316 muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
319 void helper_dmultu (target_ulong arg1, target_ulong arg2)
321 mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
323 #endif
325 #ifndef CONFIG_USER_ONLY
327 static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
329 target_phys_addr_t lladdr;
331 lladdr = cpu_mips_translate_address(env, address, rw);
333 if (lladdr == -1LL) {
334 cpu_loop_exit(env);
335 } else {
336 return lladdr;
340 #define HELPER_LD_ATOMIC(name, insn) \
341 target_ulong helper_##name(target_ulong arg, int mem_idx) \
343 env->lladdr = do_translate_address(arg, 0); \
344 env->llval = do_##insn(arg, mem_idx); \
345 return env->llval; \
347 HELPER_LD_ATOMIC(ll, lw)
348 #ifdef TARGET_MIPS64
349 HELPER_LD_ATOMIC(lld, ld)
350 #endif
351 #undef HELPER_LD_ATOMIC
353 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
354 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
356 target_long tmp; \
358 if (arg2 & almask) { \
359 env->CP0_BadVAddr = arg2; \
360 helper_raise_exception(EXCP_AdES); \
362 if (do_translate_address(arg2, 1) == env->lladdr) { \
363 tmp = do_##ld_insn(arg2, mem_idx); \
364 if (tmp == env->llval) { \
365 do_##st_insn(arg2, arg1, mem_idx); \
366 return 1; \
369 return 0; \
371 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
372 #ifdef TARGET_MIPS64
373 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
374 #endif
375 #undef HELPER_ST_ATOMIC
376 #endif
378 #ifdef TARGET_WORDS_BIGENDIAN
379 #define GET_LMASK(v) ((v) & 3)
380 #define GET_OFFSET(addr, offset) (addr + (offset))
381 #else
382 #define GET_LMASK(v) (((v) & 3) ^ 3)
383 #define GET_OFFSET(addr, offset) (addr - (offset))
384 #endif
386 target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
388 target_ulong tmp;
390 tmp = do_lbu(arg2, mem_idx);
391 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
393 if (GET_LMASK(arg2) <= 2) {
394 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
395 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
398 if (GET_LMASK(arg2) <= 1) {
399 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
400 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
403 if (GET_LMASK(arg2) == 0) {
404 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
405 arg1 = (arg1 & 0xFFFFFF00) | tmp;
407 return (int32_t)arg1;
410 target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
412 target_ulong tmp;
414 tmp = do_lbu(arg2, mem_idx);
415 arg1 = (arg1 & 0xFFFFFF00) | tmp;
417 if (GET_LMASK(arg2) >= 1) {
418 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
419 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
422 if (GET_LMASK(arg2) >= 2) {
423 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
424 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
427 if (GET_LMASK(arg2) == 3) {
428 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
429 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
431 return (int32_t)arg1;
434 void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
436 do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
438 if (GET_LMASK(arg2) <= 2)
439 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
441 if (GET_LMASK(arg2) <= 1)
442 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
444 if (GET_LMASK(arg2) == 0)
445 do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
448 void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
450 do_sb(arg2, (uint8_t)arg1, mem_idx);
452 if (GET_LMASK(arg2) >= 1)
453 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
455 if (GET_LMASK(arg2) >= 2)
456 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
458 if (GET_LMASK(arg2) == 3)
459 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
462 #if defined(TARGET_MIPS64)
463 /* "half" load and stores. We must do the memory access inline,
464 or fault handling won't work. */
466 #ifdef TARGET_WORDS_BIGENDIAN
467 #define GET_LMASK64(v) ((v) & 7)
468 #else
469 #define GET_LMASK64(v) (((v) & 7) ^ 7)
470 #endif
472 target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
474 uint64_t tmp;
476 tmp = do_lbu(arg2, mem_idx);
477 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
479 if (GET_LMASK64(arg2) <= 6) {
480 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
481 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
484 if (GET_LMASK64(arg2) <= 5) {
485 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
486 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
489 if (GET_LMASK64(arg2) <= 4) {
490 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
491 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
494 if (GET_LMASK64(arg2) <= 3) {
495 tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
496 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
499 if (GET_LMASK64(arg2) <= 2) {
500 tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
501 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
504 if (GET_LMASK64(arg2) <= 1) {
505 tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
506 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
509 if (GET_LMASK64(arg2) == 0) {
510 tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
511 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
514 return arg1;
517 target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
519 uint64_t tmp;
521 tmp = do_lbu(arg2, mem_idx);
522 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
524 if (GET_LMASK64(arg2) >= 1) {
525 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
526 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
529 if (GET_LMASK64(arg2) >= 2) {
530 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
531 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
534 if (GET_LMASK64(arg2) >= 3) {
535 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
536 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
539 if (GET_LMASK64(arg2) >= 4) {
540 tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
541 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
544 if (GET_LMASK64(arg2) >= 5) {
545 tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
546 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
549 if (GET_LMASK64(arg2) >= 6) {
550 tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
551 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
554 if (GET_LMASK64(arg2) == 7) {
555 tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
556 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
559 return arg1;
562 void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
564 do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
566 if (GET_LMASK64(arg2) <= 6)
567 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
569 if (GET_LMASK64(arg2) <= 5)
570 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
572 if (GET_LMASK64(arg2) <= 4)
573 do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
575 if (GET_LMASK64(arg2) <= 3)
576 do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
578 if (GET_LMASK64(arg2) <= 2)
579 do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
581 if (GET_LMASK64(arg2) <= 1)
582 do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
584 if (GET_LMASK64(arg2) <= 0)
585 do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
588 void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
590 do_sb(arg2, (uint8_t)arg1, mem_idx);
592 if (GET_LMASK64(arg2) >= 1)
593 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
595 if (GET_LMASK64(arg2) >= 2)
596 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
598 if (GET_LMASK64(arg2) >= 3)
599 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
601 if (GET_LMASK64(arg2) >= 4)
602 do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
604 if (GET_LMASK64(arg2) >= 5)
605 do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
607 if (GET_LMASK64(arg2) >= 6)
608 do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
610 if (GET_LMASK64(arg2) == 7)
611 do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
613 #endif /* TARGET_MIPS64 */
615 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
617 void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
619 target_ulong base_reglist = reglist & 0xf;
620 target_ulong do_r31 = reglist & 0x10;
621 #ifdef CONFIG_USER_ONLY
622 #undef ldfun
623 #define ldfun ldl_raw
624 #else
625 uint32_t (*ldfun)(target_ulong);
627 switch (mem_idx)
629 case 0: ldfun = ldl_kernel; break;
630 case 1: ldfun = ldl_super; break;
631 default:
632 case 2: ldfun = ldl_user; break;
634 #endif
636 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
637 target_ulong i;
639 for (i = 0; i < base_reglist; i++) {
640 env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
641 addr += 4;
645 if (do_r31) {
646 env->active_tc.gpr[31] = (target_long) ldfun(addr);
650 void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
652 target_ulong base_reglist = reglist & 0xf;
653 target_ulong do_r31 = reglist & 0x10;
654 #ifdef CONFIG_USER_ONLY
655 #undef stfun
656 #define stfun stl_raw
657 #else
658 void (*stfun)(target_ulong, uint32_t);
660 switch (mem_idx)
662 case 0: stfun = stl_kernel; break;
663 case 1: stfun = stl_super; break;
664 default:
665 case 2: stfun = stl_user; break;
667 #endif
669 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
670 target_ulong i;
672 for (i = 0; i < base_reglist; i++) {
673 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
674 addr += 4;
678 if (do_r31) {
679 stfun(addr, env->active_tc.gpr[31]);
683 #if defined(TARGET_MIPS64)
684 void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
686 target_ulong base_reglist = reglist & 0xf;
687 target_ulong do_r31 = reglist & 0x10;
688 #ifdef CONFIG_USER_ONLY
689 #undef ldfun
690 #define ldfun ldq_raw
691 #else
692 uint64_t (*ldfun)(target_ulong);
694 switch (mem_idx)
696 case 0: ldfun = ldq_kernel; break;
697 case 1: ldfun = ldq_super; break;
698 default:
699 case 2: ldfun = ldq_user; break;
701 #endif
703 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
704 target_ulong i;
706 for (i = 0; i < base_reglist; i++) {
707 env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
708 addr += 8;
712 if (do_r31) {
713 env->active_tc.gpr[31] = ldfun(addr);
717 void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
719 target_ulong base_reglist = reglist & 0xf;
720 target_ulong do_r31 = reglist & 0x10;
721 #ifdef CONFIG_USER_ONLY
722 #undef stfun
723 #define stfun stq_raw
724 #else
725 void (*stfun)(target_ulong, uint64_t);
727 switch (mem_idx)
729 case 0: stfun = stq_kernel; break;
730 case 1: stfun = stq_super; break;
731 default:
732 case 2: stfun = stq_user; break;
734 #endif
736 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
737 target_ulong i;
739 for (i = 0; i < base_reglist; i++) {
740 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
741 addr += 8;
745 if (do_r31) {
746 stfun(addr, env->active_tc.gpr[31]);
749 #endif
751 #ifndef CONFIG_USER_ONLY
752 /* tc should point to an int with the value of the global TC index.
753 This function will transform it into a local index within the
754 returned CPUState.
756 FIXME: This code assumes that all VPEs have the same number of TCs,
757 which depends on runtime setup. Can probably be fixed by
758 walking the list of CPUStates. */
759 static CPUState *mips_cpu_map_tc(int *tc)
761 CPUState *other;
762 int vpe_idx, nr_threads = env->nr_threads;
763 int tc_idx = *tc;
765 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
766 /* Not allowed to address other CPUs. */
767 *tc = env->current_tc;
768 return env;
771 vpe_idx = tc_idx / nr_threads;
772 *tc = tc_idx % nr_threads;
773 other = qemu_get_cpu(vpe_idx);
774 return other ? other : env;
777 /* The per VPE CP0_Status register shares some fields with the per TC
778 CP0_TCStatus registers. These fields are wired to the same registers,
779 so changes to either of them should be reflected on both registers.
781 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
783 These helper call synchronizes the regs for a given cpu. */
785 /* Called for updates to CP0_Status. */
786 static void sync_c0_status(CPUState *cpu, int tc)
788 int32_t tcstatus, *tcst;
789 uint32_t v = cpu->CP0_Status;
790 uint32_t cu, mx, asid, ksu;
791 uint32_t mask = ((1 << CP0TCSt_TCU3)
792 | (1 << CP0TCSt_TCU2)
793 | (1 << CP0TCSt_TCU1)
794 | (1 << CP0TCSt_TCU0)
795 | (1 << CP0TCSt_TMX)
796 | (3 << CP0TCSt_TKSU)
797 | (0xff << CP0TCSt_TASID));
799 cu = (v >> CP0St_CU0) & 0xf;
800 mx = (v >> CP0St_MX) & 0x1;
801 ksu = (v >> CP0St_KSU) & 0x3;
802 asid = env->CP0_EntryHi & 0xff;
804 tcstatus = cu << CP0TCSt_TCU0;
805 tcstatus |= mx << CP0TCSt_TMX;
806 tcstatus |= ksu << CP0TCSt_TKSU;
807 tcstatus |= asid;
809 if (tc == cpu->current_tc) {
810 tcst = &cpu->active_tc.CP0_TCStatus;
811 } else {
812 tcst = &cpu->tcs[tc].CP0_TCStatus;
815 *tcst &= ~mask;
816 *tcst |= tcstatus;
817 compute_hflags(cpu);
820 /* Called for updates to CP0_TCStatus. */
821 static void sync_c0_tcstatus(CPUState *cpu, int tc, target_ulong v)
823 uint32_t status;
824 uint32_t tcu, tmx, tasid, tksu;
825 uint32_t mask = ((1 << CP0St_CU3)
826 | (1 << CP0St_CU2)
827 | (1 << CP0St_CU1)
828 | (1 << CP0St_CU0)
829 | (1 << CP0St_MX)
830 | (3 << CP0St_KSU));
832 tcu = (v >> CP0TCSt_TCU0) & 0xf;
833 tmx = (v >> CP0TCSt_TMX) & 0x1;
834 tasid = v & 0xff;
835 tksu = (v >> CP0TCSt_TKSU) & 0x3;
837 status = tcu << CP0St_CU0;
838 status |= tmx << CP0St_MX;
839 status |= tksu << CP0St_KSU;
841 cpu->CP0_Status &= ~mask;
842 cpu->CP0_Status |= status;
844 /* Sync the TASID with EntryHi. */
845 cpu->CP0_EntryHi &= ~0xff;
846 cpu->CP0_EntryHi = tasid;
848 compute_hflags(cpu);
851 /* Called for updates to CP0_EntryHi. */
852 static void sync_c0_entryhi(CPUState *cpu, int tc)
854 int32_t *tcst;
855 uint32_t asid, v = cpu->CP0_EntryHi;
857 asid = v & 0xff;
859 if (tc == cpu->current_tc) {
860 tcst = &cpu->active_tc.CP0_TCStatus;
861 } else {
862 tcst = &cpu->tcs[tc].CP0_TCStatus;
865 *tcst &= ~0xff;
866 *tcst |= asid;
869 /* CP0 helpers */
870 target_ulong helper_mfc0_mvpcontrol (void)
872 return env->mvp->CP0_MVPControl;
875 target_ulong helper_mfc0_mvpconf0 (void)
877 return env->mvp->CP0_MVPConf0;
880 target_ulong helper_mfc0_mvpconf1 (void)
882 return env->mvp->CP0_MVPConf1;
885 target_ulong helper_mfc0_random (void)
887 return (int32_t)cpu_mips_get_random(env);
890 target_ulong helper_mfc0_tcstatus (void)
892 return env->active_tc.CP0_TCStatus;
895 target_ulong helper_mftc0_tcstatus(void)
897 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
898 CPUState *other = mips_cpu_map_tc(&other_tc);
900 if (other_tc == other->current_tc)
901 return other->active_tc.CP0_TCStatus;
902 else
903 return other->tcs[other_tc].CP0_TCStatus;
906 target_ulong helper_mfc0_tcbind (void)
908 return env->active_tc.CP0_TCBind;
911 target_ulong helper_mftc0_tcbind(void)
913 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
914 CPUState *other = mips_cpu_map_tc(&other_tc);
916 if (other_tc == other->current_tc)
917 return other->active_tc.CP0_TCBind;
918 else
919 return other->tcs[other_tc].CP0_TCBind;
922 target_ulong helper_mfc0_tcrestart (void)
924 return env->active_tc.PC;
927 target_ulong helper_mftc0_tcrestart(void)
929 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
930 CPUState *other = mips_cpu_map_tc(&other_tc);
932 if (other_tc == other->current_tc)
933 return other->active_tc.PC;
934 else
935 return other->tcs[other_tc].PC;
938 target_ulong helper_mfc0_tchalt (void)
940 return env->active_tc.CP0_TCHalt;
943 target_ulong helper_mftc0_tchalt(void)
945 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
946 CPUState *other = mips_cpu_map_tc(&other_tc);
948 if (other_tc == other->current_tc)
949 return other->active_tc.CP0_TCHalt;
950 else
951 return other->tcs[other_tc].CP0_TCHalt;
954 target_ulong helper_mfc0_tccontext (void)
956 return env->active_tc.CP0_TCContext;
959 target_ulong helper_mftc0_tccontext(void)
961 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
962 CPUState *other = mips_cpu_map_tc(&other_tc);
964 if (other_tc == other->current_tc)
965 return other->active_tc.CP0_TCContext;
966 else
967 return other->tcs[other_tc].CP0_TCContext;
970 target_ulong helper_mfc0_tcschedule (void)
972 return env->active_tc.CP0_TCSchedule;
975 target_ulong helper_mftc0_tcschedule(void)
977 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
978 CPUState *other = mips_cpu_map_tc(&other_tc);
980 if (other_tc == other->current_tc)
981 return other->active_tc.CP0_TCSchedule;
982 else
983 return other->tcs[other_tc].CP0_TCSchedule;
986 target_ulong helper_mfc0_tcschefback (void)
988 return env->active_tc.CP0_TCScheFBack;
991 target_ulong helper_mftc0_tcschefback(void)
993 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
994 CPUState *other = mips_cpu_map_tc(&other_tc);
996 if (other_tc == other->current_tc)
997 return other->active_tc.CP0_TCScheFBack;
998 else
999 return other->tcs[other_tc].CP0_TCScheFBack;
1002 target_ulong helper_mfc0_count (void)
1004 return (int32_t)cpu_mips_get_count(env);
1007 target_ulong helper_mftc0_entryhi(void)
1009 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1010 CPUState *other = mips_cpu_map_tc(&other_tc);
1012 return other->CP0_EntryHi;
1015 target_ulong helper_mftc0_cause(void)
1017 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1018 int32_t tccause;
1019 CPUState *other = mips_cpu_map_tc(&other_tc);
1021 if (other_tc == other->current_tc) {
1022 tccause = other->CP0_Cause;
1023 } else {
1024 tccause = other->CP0_Cause;
1027 return tccause;
1030 target_ulong helper_mftc0_status(void)
1032 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1033 CPUState *other = mips_cpu_map_tc(&other_tc);
1035 return other->CP0_Status;
1038 target_ulong helper_mfc0_lladdr (void)
1040 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
1043 target_ulong helper_mfc0_watchlo (uint32_t sel)
1045 return (int32_t)env->CP0_WatchLo[sel];
1048 target_ulong helper_mfc0_watchhi (uint32_t sel)
1050 return env->CP0_WatchHi[sel];
1053 target_ulong helper_mfc0_debug (void)
1055 target_ulong t0 = env->CP0_Debug;
1056 if (env->hflags & MIPS_HFLAG_DM)
1057 t0 |= 1 << CP0DB_DM;
1059 return t0;
1062 target_ulong helper_mftc0_debug(void)
1064 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1065 int32_t tcstatus;
1066 CPUState *other = mips_cpu_map_tc(&other_tc);
1068 if (other_tc == other->current_tc)
1069 tcstatus = other->active_tc.CP0_Debug_tcstatus;
1070 else
1071 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1073 /* XXX: Might be wrong, check with EJTAG spec. */
1074 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1075 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1078 #if defined(TARGET_MIPS64)
1079 target_ulong helper_dmfc0_tcrestart (void)
1081 return env->active_tc.PC;
1084 target_ulong helper_dmfc0_tchalt (void)
1086 return env->active_tc.CP0_TCHalt;
1089 target_ulong helper_dmfc0_tccontext (void)
1091 return env->active_tc.CP0_TCContext;
1094 target_ulong helper_dmfc0_tcschedule (void)
1096 return env->active_tc.CP0_TCSchedule;
1099 target_ulong helper_dmfc0_tcschefback (void)
1101 return env->active_tc.CP0_TCScheFBack;
1104 target_ulong helper_dmfc0_lladdr (void)
1106 return env->lladdr >> env->CP0_LLAddr_shift;
1109 target_ulong helper_dmfc0_watchlo (uint32_t sel)
1111 return env->CP0_WatchLo[sel];
1113 #endif /* TARGET_MIPS64 */
1115 void helper_mtc0_index (target_ulong arg1)
1117 int num = 1;
1118 unsigned int tmp = env->tlb->nb_tlb;
1120 do {
1121 tmp >>= 1;
1122 num <<= 1;
1123 } while (tmp);
1124 env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1127 void helper_mtc0_mvpcontrol (target_ulong arg1)
1129 uint32_t mask = 0;
1130 uint32_t newval;
1132 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1133 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1134 (1 << CP0MVPCo_EVP);
1135 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1136 mask |= (1 << CP0MVPCo_STLB);
1137 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1139 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1141 env->mvp->CP0_MVPControl = newval;
1144 void helper_mtc0_vpecontrol (target_ulong arg1)
1146 uint32_t mask;
1147 uint32_t newval;
1149 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1150 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1151 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1153 /* Yield scheduler intercept not implemented. */
1154 /* Gating storage scheduler intercept not implemented. */
1156 // TODO: Enable/disable TCs.
1158 env->CP0_VPEControl = newval;
1161 void helper_mttc0_vpecontrol(target_ulong arg1)
1163 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1164 CPUState *other = mips_cpu_map_tc(&other_tc);
1165 uint32_t mask;
1166 uint32_t newval;
1168 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1169 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1170 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1172 /* TODO: Enable/disable TCs. */
1174 other->CP0_VPEControl = newval;
1177 target_ulong helper_mftc0_vpecontrol(void)
1179 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1180 CPUState *other = mips_cpu_map_tc(&other_tc);
1181 /* FIXME: Mask away return zero on read bits. */
1182 return other->CP0_VPEControl;
1185 target_ulong helper_mftc0_vpeconf0(void)
1187 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1188 CPUState *other = mips_cpu_map_tc(&other_tc);
1190 return other->CP0_VPEConf0;
1193 void helper_mtc0_vpeconf0 (target_ulong arg1)
1195 uint32_t mask = 0;
1196 uint32_t newval;
1198 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1199 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1200 mask |= (0xff << CP0VPEC0_XTC);
1201 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1203 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1205 // TODO: TC exclusive handling due to ERL/EXL.
1207 env->CP0_VPEConf0 = newval;
1210 void helper_mttc0_vpeconf0(target_ulong arg1)
1212 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1213 CPUState *other = mips_cpu_map_tc(&other_tc);
1214 uint32_t mask = 0;
1215 uint32_t newval;
1217 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1218 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1220 /* TODO: TC exclusive handling due to ERL/EXL. */
1221 other->CP0_VPEConf0 = newval;
1224 void helper_mtc0_vpeconf1 (target_ulong arg1)
1226 uint32_t mask = 0;
1227 uint32_t newval;
1229 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1230 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1231 (0xff << CP0VPEC1_NCP1);
1232 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1234 /* UDI not implemented. */
1235 /* CP2 not implemented. */
1237 // TODO: Handle FPU (CP1) binding.
1239 env->CP0_VPEConf1 = newval;
1242 void helper_mtc0_yqmask (target_ulong arg1)
1244 /* Yield qualifier inputs not implemented. */
1245 env->CP0_YQMask = 0x00000000;
1248 void helper_mtc0_vpeopt (target_ulong arg1)
1250 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1253 void helper_mtc0_entrylo0 (target_ulong arg1)
1255 /* Large physaddr (PABITS) not implemented */
1256 /* 1k pages not implemented */
1257 env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1260 void helper_mtc0_tcstatus (target_ulong arg1)
1262 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1263 uint32_t newval;
1265 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1267 env->active_tc.CP0_TCStatus = newval;
1268 sync_c0_tcstatus(env, env->current_tc, newval);
1271 void helper_mttc0_tcstatus (target_ulong arg1)
1273 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1274 CPUState *other = mips_cpu_map_tc(&other_tc);
1276 if (other_tc == other->current_tc)
1277 other->active_tc.CP0_TCStatus = arg1;
1278 else
1279 other->tcs[other_tc].CP0_TCStatus = arg1;
1280 sync_c0_tcstatus(other, other_tc, arg1);
1283 void helper_mtc0_tcbind (target_ulong arg1)
1285 uint32_t mask = (1 << CP0TCBd_TBE);
1286 uint32_t newval;
1288 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1289 mask |= (1 << CP0TCBd_CurVPE);
1290 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1291 env->active_tc.CP0_TCBind = newval;
1294 void helper_mttc0_tcbind (target_ulong arg1)
1296 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1297 uint32_t mask = (1 << CP0TCBd_TBE);
1298 uint32_t newval;
1299 CPUState *other = mips_cpu_map_tc(&other_tc);
1301 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1302 mask |= (1 << CP0TCBd_CurVPE);
1303 if (other_tc == other->current_tc) {
1304 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1305 other->active_tc.CP0_TCBind = newval;
1306 } else {
1307 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1308 other->tcs[other_tc].CP0_TCBind = newval;
1312 void helper_mtc0_tcrestart (target_ulong arg1)
1314 env->active_tc.PC = arg1;
1315 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1316 env->lladdr = 0ULL;
1317 /* MIPS16 not implemented. */
1320 void helper_mttc0_tcrestart (target_ulong arg1)
1322 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1323 CPUState *other = mips_cpu_map_tc(&other_tc);
1325 if (other_tc == other->current_tc) {
1326 other->active_tc.PC = arg1;
1327 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1328 other->lladdr = 0ULL;
1329 /* MIPS16 not implemented. */
1330 } else {
1331 other->tcs[other_tc].PC = arg1;
1332 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1333 other->lladdr = 0ULL;
1334 /* MIPS16 not implemented. */
1338 void helper_mtc0_tchalt (target_ulong arg1)
1340 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1342 // TODO: Halt TC / Restart (if allocated+active) TC.
1345 void helper_mttc0_tchalt (target_ulong arg1)
1347 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1348 CPUState *other = mips_cpu_map_tc(&other_tc);
1350 // TODO: Halt TC / Restart (if allocated+active) TC.
1352 if (other_tc == other->current_tc)
1353 other->active_tc.CP0_TCHalt = arg1;
1354 else
1355 other->tcs[other_tc].CP0_TCHalt = arg1;
1358 void helper_mtc0_tccontext (target_ulong arg1)
1360 env->active_tc.CP0_TCContext = arg1;
1363 void helper_mttc0_tccontext (target_ulong arg1)
1365 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1366 CPUState *other = mips_cpu_map_tc(&other_tc);
1368 if (other_tc == other->current_tc)
1369 other->active_tc.CP0_TCContext = arg1;
1370 else
1371 other->tcs[other_tc].CP0_TCContext = arg1;
1374 void helper_mtc0_tcschedule (target_ulong arg1)
1376 env->active_tc.CP0_TCSchedule = arg1;
1379 void helper_mttc0_tcschedule (target_ulong arg1)
1381 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1382 CPUState *other = mips_cpu_map_tc(&other_tc);
1384 if (other_tc == other->current_tc)
1385 other->active_tc.CP0_TCSchedule = arg1;
1386 else
1387 other->tcs[other_tc].CP0_TCSchedule = arg1;
1390 void helper_mtc0_tcschefback (target_ulong arg1)
1392 env->active_tc.CP0_TCScheFBack = arg1;
1395 void helper_mttc0_tcschefback (target_ulong arg1)
1397 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1398 CPUState *other = mips_cpu_map_tc(&other_tc);
1400 if (other_tc == other->current_tc)
1401 other->active_tc.CP0_TCScheFBack = arg1;
1402 else
1403 other->tcs[other_tc].CP0_TCScheFBack = arg1;
1406 void helper_mtc0_entrylo1 (target_ulong arg1)
1408 /* Large physaddr (PABITS) not implemented */
1409 /* 1k pages not implemented */
1410 env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1413 void helper_mtc0_context (target_ulong arg1)
1415 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1418 void helper_mtc0_pagemask (target_ulong arg1)
1420 /* 1k pages not implemented */
1421 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1424 void helper_mtc0_pagegrain (target_ulong arg1)
1426 /* SmartMIPS not implemented */
1427 /* Large physaddr (PABITS) not implemented */
1428 /* 1k pages not implemented */
1429 env->CP0_PageGrain = 0;
1432 void helper_mtc0_wired (target_ulong arg1)
1434 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1437 void helper_mtc0_srsconf0 (target_ulong arg1)
1439 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1442 void helper_mtc0_srsconf1 (target_ulong arg1)
1444 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1447 void helper_mtc0_srsconf2 (target_ulong arg1)
1449 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1452 void helper_mtc0_srsconf3 (target_ulong arg1)
1454 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1457 void helper_mtc0_srsconf4 (target_ulong arg1)
1459 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1462 void helper_mtc0_hwrena (target_ulong arg1)
1464 env->CP0_HWREna = arg1 & 0x0000000F;
1467 void helper_mtc0_count (target_ulong arg1)
1469 cpu_mips_store_count(env, arg1);
1472 void helper_mtc0_entryhi (target_ulong arg1)
1474 target_ulong old, val;
1476 /* 1k pages not implemented */
1477 val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1478 #if defined(TARGET_MIPS64)
1479 val &= env->SEGMask;
1480 #endif
1481 old = env->CP0_EntryHi;
1482 env->CP0_EntryHi = val;
1483 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1484 sync_c0_entryhi(env, env->current_tc);
1486 /* If the ASID changes, flush qemu's TLB. */
1487 if ((old & 0xFF) != (val & 0xFF))
1488 cpu_mips_tlb_flush(env, 1);
1491 void helper_mttc0_entryhi(target_ulong arg1)
1493 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1494 CPUState *other = mips_cpu_map_tc(&other_tc);
1496 other->CP0_EntryHi = arg1;
1497 sync_c0_entryhi(other, other_tc);
1500 void helper_mtc0_compare (target_ulong arg1)
1502 cpu_mips_store_compare(env, arg1);
1505 void helper_mtc0_status (target_ulong arg1)
1507 uint32_t val, old;
1508 uint32_t mask = env->CP0_Status_rw_bitmask;
1510 val = arg1 & mask;
1511 old = env->CP0_Status;
1512 env->CP0_Status = (env->CP0_Status & ~mask) | val;
1513 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1514 sync_c0_status(env, env->current_tc);
1515 } else {
1516 compute_hflags(env);
1519 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1520 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1521 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1522 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1523 env->CP0_Cause);
1524 switch (env->hflags & MIPS_HFLAG_KSU) {
1525 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1526 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1527 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1528 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1533 void helper_mttc0_status(target_ulong arg1)
1535 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1536 CPUState *other = mips_cpu_map_tc(&other_tc);
1538 other->CP0_Status = arg1 & ~0xf1000018;
1539 sync_c0_status(other, other_tc);
1542 void helper_mtc0_intctl (target_ulong arg1)
1544 /* vectored interrupts not implemented, no performance counters. */
1545 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1548 void helper_mtc0_srsctl (target_ulong arg1)
1550 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1551 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1554 static void mtc0_cause(CPUState *cpu, target_ulong arg1)
1556 uint32_t mask = 0x00C00300;
1557 uint32_t old = cpu->CP0_Cause;
1558 int i;
1560 if (cpu->insn_flags & ISA_MIPS32R2) {
1561 mask |= 1 << CP0Ca_DC;
1564 cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask);
1566 if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) {
1567 if (cpu->CP0_Cause & (1 << CP0Ca_DC)) {
1568 cpu_mips_stop_count(cpu);
1569 } else {
1570 cpu_mips_start_count(cpu);
1574 /* Set/reset software interrupts */
1575 for (i = 0 ; i < 2 ; i++) {
1576 if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1577 cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i)));
1582 void helper_mtc0_cause(target_ulong arg1)
1584 mtc0_cause(env, arg1);
1587 void helper_mttc0_cause(target_ulong arg1)
1589 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1590 CPUState *other = mips_cpu_map_tc(&other_tc);
1592 mtc0_cause(other, arg1);
1595 target_ulong helper_mftc0_epc(void)
1597 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1598 CPUState *other = mips_cpu_map_tc(&other_tc);
1600 return other->CP0_EPC;
1603 target_ulong helper_mftc0_ebase(void)
1605 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1606 CPUState *other = mips_cpu_map_tc(&other_tc);
1608 return other->CP0_EBase;
1611 void helper_mtc0_ebase (target_ulong arg1)
1613 /* vectored interrupts not implemented */
1614 env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1617 void helper_mttc0_ebase(target_ulong arg1)
1619 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1620 CPUState *other = mips_cpu_map_tc(&other_tc);
1621 other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1624 target_ulong helper_mftc0_configx(target_ulong idx)
1626 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1627 CPUState *other = mips_cpu_map_tc(&other_tc);
1629 switch (idx) {
1630 case 0: return other->CP0_Config0;
1631 case 1: return other->CP0_Config1;
1632 case 2: return other->CP0_Config2;
1633 case 3: return other->CP0_Config3;
1634 /* 4 and 5 are reserved. */
1635 case 6: return other->CP0_Config6;
1636 case 7: return other->CP0_Config7;
1637 default:
1638 break;
1640 return 0;
1643 void helper_mtc0_config0 (target_ulong arg1)
1645 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1648 void helper_mtc0_config2 (target_ulong arg1)
1650 /* tertiary/secondary caches not implemented */
1651 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1654 void helper_mtc0_lladdr (target_ulong arg1)
1656 target_long mask = env->CP0_LLAddr_rw_bitmask;
1657 arg1 = arg1 << env->CP0_LLAddr_shift;
1658 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1661 void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1663 /* Watch exceptions for instructions, data loads, data stores
1664 not implemented. */
1665 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1668 void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1670 env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1671 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1674 void helper_mtc0_xcontext (target_ulong arg1)
1676 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1677 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1680 void helper_mtc0_framemask (target_ulong arg1)
1682 env->CP0_Framemask = arg1; /* XXX */
1685 void helper_mtc0_debug (target_ulong arg1)
1687 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1688 if (arg1 & (1 << CP0DB_DM))
1689 env->hflags |= MIPS_HFLAG_DM;
1690 else
1691 env->hflags &= ~MIPS_HFLAG_DM;
1694 void helper_mttc0_debug(target_ulong arg1)
1696 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1697 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1698 CPUState *other = mips_cpu_map_tc(&other_tc);
1700 /* XXX: Might be wrong, check with EJTAG spec. */
1701 if (other_tc == other->current_tc)
1702 other->active_tc.CP0_Debug_tcstatus = val;
1703 else
1704 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1705 other->CP0_Debug = (other->CP0_Debug &
1706 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1707 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1710 void helper_mtc0_performance0 (target_ulong arg1)
1712 env->CP0_Performance0 = arg1 & 0x000007ff;
1715 void helper_mtc0_taglo (target_ulong arg1)
1717 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1720 void helper_mtc0_datalo (target_ulong arg1)
1722 env->CP0_DataLo = arg1; /* XXX */
1725 void helper_mtc0_taghi (target_ulong arg1)
1727 env->CP0_TagHi = arg1; /* XXX */
1730 void helper_mtc0_datahi (target_ulong arg1)
1732 env->CP0_DataHi = arg1; /* XXX */
1735 /* MIPS MT functions */
1736 target_ulong helper_mftgpr(uint32_t sel)
1738 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1739 CPUState *other = mips_cpu_map_tc(&other_tc);
1741 if (other_tc == other->current_tc)
1742 return other->active_tc.gpr[sel];
1743 else
1744 return other->tcs[other_tc].gpr[sel];
1747 target_ulong helper_mftlo(uint32_t sel)
1749 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1750 CPUState *other = mips_cpu_map_tc(&other_tc);
1752 if (other_tc == other->current_tc)
1753 return other->active_tc.LO[sel];
1754 else
1755 return other->tcs[other_tc].LO[sel];
1758 target_ulong helper_mfthi(uint32_t sel)
1760 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1761 CPUState *other = mips_cpu_map_tc(&other_tc);
1763 if (other_tc == other->current_tc)
1764 return other->active_tc.HI[sel];
1765 else
1766 return other->tcs[other_tc].HI[sel];
1769 target_ulong helper_mftacx(uint32_t sel)
1771 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1772 CPUState *other = mips_cpu_map_tc(&other_tc);
1774 if (other_tc == other->current_tc)
1775 return other->active_tc.ACX[sel];
1776 else
1777 return other->tcs[other_tc].ACX[sel];
1780 target_ulong helper_mftdsp(void)
1782 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1783 CPUState *other = mips_cpu_map_tc(&other_tc);
1785 if (other_tc == other->current_tc)
1786 return other->active_tc.DSPControl;
1787 else
1788 return other->tcs[other_tc].DSPControl;
1791 void helper_mttgpr(target_ulong arg1, uint32_t sel)
1793 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1794 CPUState *other = mips_cpu_map_tc(&other_tc);
1796 if (other_tc == other->current_tc)
1797 other->active_tc.gpr[sel] = arg1;
1798 else
1799 other->tcs[other_tc].gpr[sel] = arg1;
1802 void helper_mttlo(target_ulong arg1, uint32_t sel)
1804 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1805 CPUState *other = mips_cpu_map_tc(&other_tc);
1807 if (other_tc == other->current_tc)
1808 other->active_tc.LO[sel] = arg1;
1809 else
1810 other->tcs[other_tc].LO[sel] = arg1;
1813 void helper_mtthi(target_ulong arg1, uint32_t sel)
1815 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1816 CPUState *other = mips_cpu_map_tc(&other_tc);
1818 if (other_tc == other->current_tc)
1819 other->active_tc.HI[sel] = arg1;
1820 else
1821 other->tcs[other_tc].HI[sel] = arg1;
1824 void helper_mttacx(target_ulong arg1, uint32_t sel)
1826 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1827 CPUState *other = mips_cpu_map_tc(&other_tc);
1829 if (other_tc == other->current_tc)
1830 other->active_tc.ACX[sel] = arg1;
1831 else
1832 other->tcs[other_tc].ACX[sel] = arg1;
1835 void helper_mttdsp(target_ulong arg1)
1837 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1838 CPUState *other = mips_cpu_map_tc(&other_tc);
1840 if (other_tc == other->current_tc)
1841 other->active_tc.DSPControl = arg1;
1842 else
1843 other->tcs[other_tc].DSPControl = arg1;
1846 /* MIPS MT functions */
1847 target_ulong helper_dmt(void)
1849 // TODO
1850 return 0;
1853 target_ulong helper_emt(void)
1855 // TODO
1856 return 0;
1859 target_ulong helper_dvpe(void)
1861 // TODO
1862 return 0;
1865 target_ulong helper_evpe(void)
1867 // TODO
1868 return 0;
1870 #endif /* !CONFIG_USER_ONLY */
1872 void helper_fork(target_ulong arg1, target_ulong arg2)
1874 // arg1 = rt, arg2 = rs
1875 arg1 = 0;
1876 // TODO: store to TC register
1879 target_ulong helper_yield(target_ulong arg)
1881 target_long arg1 = arg;
1883 if (arg1 < 0) {
1884 /* No scheduling policy implemented. */
1885 if (arg1 != -2) {
1886 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1887 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1888 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1889 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1890 helper_raise_exception(EXCP_THREAD);
1893 } else if (arg1 == 0) {
1894 if (0 /* TODO: TC underflow */) {
1895 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1896 helper_raise_exception(EXCP_THREAD);
1897 } else {
1898 // TODO: Deallocate TC
1900 } else if (arg1 > 0) {
1901 /* Yield qualifier inputs not implemented. */
1902 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1903 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1904 helper_raise_exception(EXCP_THREAD);
1906 return env->CP0_YQMask;
1909 #ifndef CONFIG_USER_ONLY
1910 /* TLB management */
1911 static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1913 /* Flush qemu's TLB and discard all shadowed entries. */
1914 tlb_flush (env, flush_global);
1915 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1918 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1920 /* Discard entries from env->tlb[first] onwards. */
1921 while (env->tlb->tlb_in_use > first) {
1922 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1926 static void r4k_fill_tlb (int idx)
1928 r4k_tlb_t *tlb;
1930 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1931 tlb = &env->tlb->mmu.r4k.tlb[idx];
1932 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
1933 #if defined(TARGET_MIPS64)
1934 tlb->VPN &= env->SEGMask;
1935 #endif
1936 tlb->ASID = env->CP0_EntryHi & 0xFF;
1937 tlb->PageMask = env->CP0_PageMask;
1938 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
1939 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
1940 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
1941 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
1942 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
1943 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
1944 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
1945 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
1946 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
1949 void r4k_helper_tlbwi (void)
1951 int idx;
1953 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
1955 /* Discard cached TLB entries. We could avoid doing this if the
1956 tlbwi is just upgrading access permissions on the current entry;
1957 that might be a further win. */
1958 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
1960 r4k_invalidate_tlb(env, idx, 0);
1961 r4k_fill_tlb(idx);
1964 void r4k_helper_tlbwr (void)
1966 int r = cpu_mips_get_random(env);
1968 r4k_invalidate_tlb(env, r, 1);
1969 r4k_fill_tlb(r);
1972 void r4k_helper_tlbp (void)
1974 r4k_tlb_t *tlb;
1975 target_ulong mask;
1976 target_ulong tag;
1977 target_ulong VPN;
1978 uint8_t ASID;
1979 int i;
1981 ASID = env->CP0_EntryHi & 0xFF;
1982 for (i = 0; i < env->tlb->nb_tlb; i++) {
1983 tlb = &env->tlb->mmu.r4k.tlb[i];
1984 /* 1k pages are not supported. */
1985 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1986 tag = env->CP0_EntryHi & ~mask;
1987 VPN = tlb->VPN & ~mask;
1988 /* Check ASID, virtual page number & size */
1989 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
1990 /* TLB match */
1991 env->CP0_Index = i;
1992 break;
1995 if (i == env->tlb->nb_tlb) {
1996 /* No match. Discard any shadow entries, if any of them match. */
1997 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
1998 tlb = &env->tlb->mmu.r4k.tlb[i];
1999 /* 1k pages are not supported. */
2000 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2001 tag = env->CP0_EntryHi & ~mask;
2002 VPN = tlb->VPN & ~mask;
2003 /* Check ASID, virtual page number & size */
2004 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2005 r4k_mips_tlb_flush_extra (env, i);
2006 break;
2010 env->CP0_Index |= 0x80000000;
2014 void r4k_helper_tlbr (void)
2016 r4k_tlb_t *tlb;
2017 uint8_t ASID;
2018 int idx;
2020 ASID = env->CP0_EntryHi & 0xFF;
2021 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2022 tlb = &env->tlb->mmu.r4k.tlb[idx];
2024 /* If this will change the current ASID, flush qemu's TLB. */
2025 if (ASID != tlb->ASID)
2026 cpu_mips_tlb_flush (env, 1);
2028 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2030 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2031 env->CP0_PageMask = tlb->PageMask;
2032 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2033 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
2034 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2035 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
2038 void helper_tlbwi(void)
2040 env->tlb->helper_tlbwi();
2043 void helper_tlbwr(void)
2045 env->tlb->helper_tlbwr();
2048 void helper_tlbp(void)
2050 env->tlb->helper_tlbp();
2053 void helper_tlbr(void)
2055 env->tlb->helper_tlbr();
2058 /* Specials */
2059 target_ulong helper_di (void)
2061 target_ulong t0 = env->CP0_Status;
2063 env->CP0_Status = t0 & ~(1 << CP0St_IE);
2064 return t0;
2067 target_ulong helper_ei (void)
2069 target_ulong t0 = env->CP0_Status;
2071 env->CP0_Status = t0 | (1 << CP0St_IE);
2072 return t0;
2075 static void debug_pre_eret (void)
2077 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2078 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2079 env->active_tc.PC, env->CP0_EPC);
2080 if (env->CP0_Status & (1 << CP0St_ERL))
2081 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2082 if (env->hflags & MIPS_HFLAG_DM)
2083 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2084 qemu_log("\n");
2088 static void debug_post_eret (void)
2090 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2091 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2092 env->active_tc.PC, env->CP0_EPC);
2093 if (env->CP0_Status & (1 << CP0St_ERL))
2094 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2095 if (env->hflags & MIPS_HFLAG_DM)
2096 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2097 switch (env->hflags & MIPS_HFLAG_KSU) {
2098 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2099 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2100 case MIPS_HFLAG_KM: qemu_log("\n"); break;
2101 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
2106 static void set_pc (target_ulong error_pc)
2108 env->active_tc.PC = error_pc & ~(target_ulong)1;
2109 if (error_pc & 1) {
2110 env->hflags |= MIPS_HFLAG_M16;
2111 } else {
2112 env->hflags &= ~(MIPS_HFLAG_M16);
2116 void helper_eret (void)
2118 debug_pre_eret();
2119 if (env->CP0_Status & (1 << CP0St_ERL)) {
2120 set_pc(env->CP0_ErrorEPC);
2121 env->CP0_Status &= ~(1 << CP0St_ERL);
2122 } else {
2123 set_pc(env->CP0_EPC);
2124 env->CP0_Status &= ~(1 << CP0St_EXL);
2126 compute_hflags(env);
2127 debug_post_eret();
2128 env->lladdr = 1;
2131 void helper_deret (void)
2133 debug_pre_eret();
2134 set_pc(env->CP0_DEPC);
2136 env->hflags &= MIPS_HFLAG_DM;
2137 compute_hflags(env);
2138 debug_post_eret();
2139 env->lladdr = 1;
2141 #endif /* !CONFIG_USER_ONLY */
2143 target_ulong helper_rdhwr_cpunum(void)
2145 if ((env->hflags & MIPS_HFLAG_CP0) ||
2146 (env->CP0_HWREna & (1 << 0)))
2147 return env->CP0_EBase & 0x3ff;
2148 else
2149 helper_raise_exception(EXCP_RI);
2151 return 0;
2154 target_ulong helper_rdhwr_synci_step(void)
2156 if ((env->hflags & MIPS_HFLAG_CP0) ||
2157 (env->CP0_HWREna & (1 << 1)))
2158 return env->SYNCI_Step;
2159 else
2160 helper_raise_exception(EXCP_RI);
2162 return 0;
2165 target_ulong helper_rdhwr_cc(void)
2167 if ((env->hflags & MIPS_HFLAG_CP0) ||
2168 (env->CP0_HWREna & (1 << 2)))
2169 return env->CP0_Count;
2170 else
2171 helper_raise_exception(EXCP_RI);
2173 return 0;
2176 target_ulong helper_rdhwr_ccres(void)
2178 if ((env->hflags & MIPS_HFLAG_CP0) ||
2179 (env->CP0_HWREna & (1 << 3)))
2180 return env->CCRes;
2181 else
2182 helper_raise_exception(EXCP_RI);
2184 return 0;
2187 void helper_pmon (int function)
2189 function /= 2;
2190 switch (function) {
2191 case 2: /* TODO: char inbyte(int waitflag); */
2192 if (env->active_tc.gpr[4] == 0)
2193 env->active_tc.gpr[2] = -1;
2194 /* Fall through */
2195 case 11: /* TODO: char inbyte (void); */
2196 env->active_tc.gpr[2] = -1;
2197 break;
2198 case 3:
2199 case 12:
2200 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2201 break;
2202 case 17:
2203 break;
2204 case 158:
2206 unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
2207 printf("%s", fmt);
2209 break;
2213 void helper_wait (void)
2215 env->halted = 1;
2216 helper_raise_exception(EXCP_HLT);
2219 #if !defined(CONFIG_USER_ONLY)
2221 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
2223 #define MMUSUFFIX _mmu
2224 #define ALIGNED_ONLY
2226 #define SHIFT 0
2227 #include "softmmu_template.h"
2229 #define SHIFT 1
2230 #include "softmmu_template.h"
2232 #define SHIFT 2
2233 #include "softmmu_template.h"
2235 #define SHIFT 3
2236 #include "softmmu_template.h"
2238 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
2240 env->CP0_BadVAddr = addr;
2241 do_restore_state (retaddr);
2242 helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2245 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2247 TranslationBlock *tb;
2248 CPUState *saved_env;
2249 unsigned long pc;
2250 int ret;
2252 /* XXX: hack to restore env in all cases, even if not called from
2253 generated code */
2254 saved_env = env;
2255 env = cpu_single_env;
2256 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2257 if (ret) {
2258 if (retaddr) {
2259 /* now we have a real cpu fault */
2260 pc = (unsigned long)retaddr;
2261 tb = tb_find_pc(pc);
2262 if (tb) {
2263 /* the PC is inside the translated code. It means that we have
2264 a virtual CPU fault */
2265 cpu_restore_state(tb, env, pc);
2268 helper_raise_exception_err(env->exception_index, env->error_code);
2270 env = saved_env;
2273 void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
2274 int is_write, int is_exec, int unused, int size)
2276 env = env1;
2278 if (is_exec)
2279 helper_raise_exception(EXCP_IBE);
2280 else
2281 helper_raise_exception(EXCP_DBE);
2283 #endif /* !CONFIG_USER_ONLY */
2285 /* Complex FPU operations which may need stack space. */
2287 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
2288 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2289 #define FLOAT_TWO32 make_float32(1 << 30)
2290 #define FLOAT_TWO64 make_float64(1ULL << 62)
2291 #define FLOAT_QNAN32 0x7fbfffff
2292 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2293 #define FLOAT_SNAN32 0x7fffffff
2294 #define FLOAT_SNAN64 0x7fffffffffffffffULL
2296 /* convert MIPS rounding mode in FCR31 to IEEE library */
2297 static unsigned int ieee_rm[] = {
2298 float_round_nearest_even,
2299 float_round_to_zero,
2300 float_round_up,
2301 float_round_down
2304 #define RESTORE_ROUNDING_MODE \
2305 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2307 #define RESTORE_FLUSH_MODE \
2308 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2310 target_ulong helper_cfc1 (uint32_t reg)
2312 target_ulong arg1;
2314 switch (reg) {
2315 case 0:
2316 arg1 = (int32_t)env->active_fpu.fcr0;
2317 break;
2318 case 25:
2319 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2320 break;
2321 case 26:
2322 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2323 break;
2324 case 28:
2325 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2326 break;
2327 default:
2328 arg1 = (int32_t)env->active_fpu.fcr31;
2329 break;
2332 return arg1;
2335 void helper_ctc1 (target_ulong arg1, uint32_t reg)
2337 switch(reg) {
2338 case 25:
2339 if (arg1 & 0xffffff00)
2340 return;
2341 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2342 ((arg1 & 0x1) << 23);
2343 break;
2344 case 26:
2345 if (arg1 & 0x007c0000)
2346 return;
2347 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2348 break;
2349 case 28:
2350 if (arg1 & 0x007c0000)
2351 return;
2352 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2353 ((arg1 & 0x4) << 22);
2354 break;
2355 case 31:
2356 if (arg1 & 0x007c0000)
2357 return;
2358 env->active_fpu.fcr31 = arg1;
2359 break;
2360 default:
2361 return;
2363 /* set rounding mode */
2364 RESTORE_ROUNDING_MODE;
2365 /* set flush-to-zero mode */
2366 RESTORE_FLUSH_MODE;
2367 set_float_exception_flags(0, &env->active_fpu.fp_status);
2368 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2369 helper_raise_exception(EXCP_FPE);
2372 static inline int ieee_ex_to_mips(int xcpt)
2374 int ret = 0;
2375 if (xcpt) {
2376 if (xcpt & float_flag_invalid) {
2377 ret |= FP_INVALID;
2379 if (xcpt & float_flag_overflow) {
2380 ret |= FP_OVERFLOW;
2382 if (xcpt & float_flag_underflow) {
2383 ret |= FP_UNDERFLOW;
2385 if (xcpt & float_flag_divbyzero) {
2386 ret |= FP_DIV0;
2388 if (xcpt & float_flag_inexact) {
2389 ret |= FP_INEXACT;
2392 return ret;
2395 static inline void update_fcr31(void)
2397 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2399 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2400 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2401 helper_raise_exception(EXCP_FPE);
2402 else
2403 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2406 /* Float support.
2407 Single precition routines have a "s" suffix, double precision a
2408 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2409 paired single lower "pl", paired single upper "pu". */
2411 /* unary operations, modifying fp status */
2412 uint64_t helper_float_sqrt_d(uint64_t fdt0)
2414 return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2417 uint32_t helper_float_sqrt_s(uint32_t fst0)
2419 return float32_sqrt(fst0, &env->active_fpu.fp_status);
2422 uint64_t helper_float_cvtd_s(uint32_t fst0)
2424 uint64_t fdt2;
2426 set_float_exception_flags(0, &env->active_fpu.fp_status);
2427 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2428 update_fcr31();
2429 return fdt2;
2432 uint64_t helper_float_cvtd_w(uint32_t wt0)
2434 uint64_t fdt2;
2436 set_float_exception_flags(0, &env->active_fpu.fp_status);
2437 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2438 update_fcr31();
2439 return fdt2;
2442 uint64_t helper_float_cvtd_l(uint64_t dt0)
2444 uint64_t fdt2;
2446 set_float_exception_flags(0, &env->active_fpu.fp_status);
2447 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2448 update_fcr31();
2449 return fdt2;
2452 uint64_t helper_float_cvtl_d(uint64_t fdt0)
2454 uint64_t dt2;
2456 set_float_exception_flags(0, &env->active_fpu.fp_status);
2457 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2458 update_fcr31();
2459 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2460 dt2 = FLOAT_SNAN64;
2461 return dt2;
2464 uint64_t helper_float_cvtl_s(uint32_t fst0)
2466 uint64_t dt2;
2468 set_float_exception_flags(0, &env->active_fpu.fp_status);
2469 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2470 update_fcr31();
2471 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2472 dt2 = FLOAT_SNAN64;
2473 return dt2;
2476 uint64_t helper_float_cvtps_pw(uint64_t dt0)
2478 uint32_t fst2;
2479 uint32_t fsth2;
2481 set_float_exception_flags(0, &env->active_fpu.fp_status);
2482 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2483 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2484 update_fcr31();
2485 return ((uint64_t)fsth2 << 32) | fst2;
2488 uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2490 uint32_t wt2;
2491 uint32_t wth2;
2493 set_float_exception_flags(0, &env->active_fpu.fp_status);
2494 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2495 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2496 update_fcr31();
2497 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2498 wt2 = FLOAT_SNAN32;
2499 wth2 = FLOAT_SNAN32;
2501 return ((uint64_t)wth2 << 32) | wt2;
2504 uint32_t helper_float_cvts_d(uint64_t fdt0)
2506 uint32_t fst2;
2508 set_float_exception_flags(0, &env->active_fpu.fp_status);
2509 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2510 update_fcr31();
2511 return fst2;
2514 uint32_t helper_float_cvts_w(uint32_t wt0)
2516 uint32_t fst2;
2518 set_float_exception_flags(0, &env->active_fpu.fp_status);
2519 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2520 update_fcr31();
2521 return fst2;
2524 uint32_t helper_float_cvts_l(uint64_t dt0)
2526 uint32_t fst2;
2528 set_float_exception_flags(0, &env->active_fpu.fp_status);
2529 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2530 update_fcr31();
2531 return fst2;
2534 uint32_t helper_float_cvts_pl(uint32_t wt0)
2536 uint32_t wt2;
2538 set_float_exception_flags(0, &env->active_fpu.fp_status);
2539 wt2 = wt0;
2540 update_fcr31();
2541 return wt2;
2544 uint32_t helper_float_cvts_pu(uint32_t wth0)
2546 uint32_t wt2;
2548 set_float_exception_flags(0, &env->active_fpu.fp_status);
2549 wt2 = wth0;
2550 update_fcr31();
2551 return wt2;
2554 uint32_t helper_float_cvtw_s(uint32_t fst0)
2556 uint32_t wt2;
2558 set_float_exception_flags(0, &env->active_fpu.fp_status);
2559 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2560 update_fcr31();
2561 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2562 wt2 = FLOAT_SNAN32;
2563 return wt2;
2566 uint32_t helper_float_cvtw_d(uint64_t fdt0)
2568 uint32_t wt2;
2570 set_float_exception_flags(0, &env->active_fpu.fp_status);
2571 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2572 update_fcr31();
2573 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2574 wt2 = FLOAT_SNAN32;
2575 return wt2;
2578 uint64_t helper_float_roundl_d(uint64_t fdt0)
2580 uint64_t dt2;
2582 set_float_exception_flags(0, &env->active_fpu.fp_status);
2583 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2584 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2585 RESTORE_ROUNDING_MODE;
2586 update_fcr31();
2587 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2588 dt2 = FLOAT_SNAN64;
2589 return dt2;
2592 uint64_t helper_float_roundl_s(uint32_t fst0)
2594 uint64_t dt2;
2596 set_float_exception_flags(0, &env->active_fpu.fp_status);
2597 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2598 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2599 RESTORE_ROUNDING_MODE;
2600 update_fcr31();
2601 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2602 dt2 = FLOAT_SNAN64;
2603 return dt2;
2606 uint32_t helper_float_roundw_d(uint64_t fdt0)
2608 uint32_t wt2;
2610 set_float_exception_flags(0, &env->active_fpu.fp_status);
2611 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2612 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2613 RESTORE_ROUNDING_MODE;
2614 update_fcr31();
2615 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2616 wt2 = FLOAT_SNAN32;
2617 return wt2;
2620 uint32_t helper_float_roundw_s(uint32_t fst0)
2622 uint32_t wt2;
2624 set_float_exception_flags(0, &env->active_fpu.fp_status);
2625 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2626 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2627 RESTORE_ROUNDING_MODE;
2628 update_fcr31();
2629 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2630 wt2 = FLOAT_SNAN32;
2631 return wt2;
2634 uint64_t helper_float_truncl_d(uint64_t fdt0)
2636 uint64_t dt2;
2638 set_float_exception_flags(0, &env->active_fpu.fp_status);
2639 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2640 update_fcr31();
2641 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2642 dt2 = FLOAT_SNAN64;
2643 return dt2;
2646 uint64_t helper_float_truncl_s(uint32_t fst0)
2648 uint64_t dt2;
2650 set_float_exception_flags(0, &env->active_fpu.fp_status);
2651 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2652 update_fcr31();
2653 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2654 dt2 = FLOAT_SNAN64;
2655 return dt2;
2658 uint32_t helper_float_truncw_d(uint64_t fdt0)
2660 uint32_t wt2;
2662 set_float_exception_flags(0, &env->active_fpu.fp_status);
2663 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2664 update_fcr31();
2665 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2666 wt2 = FLOAT_SNAN32;
2667 return wt2;
2670 uint32_t helper_float_truncw_s(uint32_t fst0)
2672 uint32_t wt2;
2674 set_float_exception_flags(0, &env->active_fpu.fp_status);
2675 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2676 update_fcr31();
2677 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2678 wt2 = FLOAT_SNAN32;
2679 return wt2;
2682 uint64_t helper_float_ceill_d(uint64_t fdt0)
2684 uint64_t dt2;
2686 set_float_exception_flags(0, &env->active_fpu.fp_status);
2687 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2688 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2689 RESTORE_ROUNDING_MODE;
2690 update_fcr31();
2691 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2692 dt2 = FLOAT_SNAN64;
2693 return dt2;
2696 uint64_t helper_float_ceill_s(uint32_t fst0)
2698 uint64_t dt2;
2700 set_float_exception_flags(0, &env->active_fpu.fp_status);
2701 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2702 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2703 RESTORE_ROUNDING_MODE;
2704 update_fcr31();
2705 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2706 dt2 = FLOAT_SNAN64;
2707 return dt2;
2710 uint32_t helper_float_ceilw_d(uint64_t fdt0)
2712 uint32_t wt2;
2714 set_float_exception_flags(0, &env->active_fpu.fp_status);
2715 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2716 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2717 RESTORE_ROUNDING_MODE;
2718 update_fcr31();
2719 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2720 wt2 = FLOAT_SNAN32;
2721 return wt2;
2724 uint32_t helper_float_ceilw_s(uint32_t fst0)
2726 uint32_t wt2;
2728 set_float_exception_flags(0, &env->active_fpu.fp_status);
2729 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2730 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2731 RESTORE_ROUNDING_MODE;
2732 update_fcr31();
2733 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2734 wt2 = FLOAT_SNAN32;
2735 return wt2;
2738 uint64_t helper_float_floorl_d(uint64_t fdt0)
2740 uint64_t dt2;
2742 set_float_exception_flags(0, &env->active_fpu.fp_status);
2743 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2744 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2745 RESTORE_ROUNDING_MODE;
2746 update_fcr31();
2747 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2748 dt2 = FLOAT_SNAN64;
2749 return dt2;
2752 uint64_t helper_float_floorl_s(uint32_t fst0)
2754 uint64_t dt2;
2756 set_float_exception_flags(0, &env->active_fpu.fp_status);
2757 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2758 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2759 RESTORE_ROUNDING_MODE;
2760 update_fcr31();
2761 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2762 dt2 = FLOAT_SNAN64;
2763 return dt2;
2766 uint32_t helper_float_floorw_d(uint64_t fdt0)
2768 uint32_t wt2;
2770 set_float_exception_flags(0, &env->active_fpu.fp_status);
2771 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2772 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2773 RESTORE_ROUNDING_MODE;
2774 update_fcr31();
2775 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2776 wt2 = FLOAT_SNAN32;
2777 return wt2;
2780 uint32_t helper_float_floorw_s(uint32_t fst0)
2782 uint32_t wt2;
2784 set_float_exception_flags(0, &env->active_fpu.fp_status);
2785 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2786 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2787 RESTORE_ROUNDING_MODE;
2788 update_fcr31();
2789 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2790 wt2 = FLOAT_SNAN32;
2791 return wt2;
2794 /* unary operations, not modifying fp status */
2795 #define FLOAT_UNOP(name) \
2796 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2798 return float64_ ## name(fdt0); \
2800 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2802 return float32_ ## name(fst0); \
2804 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2806 uint32_t wt0; \
2807 uint32_t wth0; \
2809 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2810 wth0 = float32_ ## name(fdt0 >> 32); \
2811 return ((uint64_t)wth0 << 32) | wt0; \
2813 FLOAT_UNOP(abs)
2814 FLOAT_UNOP(chs)
2815 #undef FLOAT_UNOP
2817 /* MIPS specific unary operations */
2818 uint64_t helper_float_recip_d(uint64_t fdt0)
2820 uint64_t fdt2;
2822 set_float_exception_flags(0, &env->active_fpu.fp_status);
2823 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2824 update_fcr31();
2825 return fdt2;
2828 uint32_t helper_float_recip_s(uint32_t fst0)
2830 uint32_t fst2;
2832 set_float_exception_flags(0, &env->active_fpu.fp_status);
2833 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2834 update_fcr31();
2835 return fst2;
2838 uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2840 uint64_t fdt2;
2842 set_float_exception_flags(0, &env->active_fpu.fp_status);
2843 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2844 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2845 update_fcr31();
2846 return fdt2;
2849 uint32_t helper_float_rsqrt_s(uint32_t fst0)
2851 uint32_t fst2;
2853 set_float_exception_flags(0, &env->active_fpu.fp_status);
2854 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2855 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2856 update_fcr31();
2857 return fst2;
2860 uint64_t helper_float_recip1_d(uint64_t fdt0)
2862 uint64_t fdt2;
2864 set_float_exception_flags(0, &env->active_fpu.fp_status);
2865 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2866 update_fcr31();
2867 return fdt2;
2870 uint32_t helper_float_recip1_s(uint32_t fst0)
2872 uint32_t fst2;
2874 set_float_exception_flags(0, &env->active_fpu.fp_status);
2875 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2876 update_fcr31();
2877 return fst2;
2880 uint64_t helper_float_recip1_ps(uint64_t fdt0)
2882 uint32_t fst2;
2883 uint32_t fsth2;
2885 set_float_exception_flags(0, &env->active_fpu.fp_status);
2886 fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2887 fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2888 update_fcr31();
2889 return ((uint64_t)fsth2 << 32) | fst2;
2892 uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2894 uint64_t fdt2;
2896 set_float_exception_flags(0, &env->active_fpu.fp_status);
2897 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2898 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2899 update_fcr31();
2900 return fdt2;
2903 uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2905 uint32_t fst2;
2907 set_float_exception_flags(0, &env->active_fpu.fp_status);
2908 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2909 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2910 update_fcr31();
2911 return fst2;
2914 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2916 uint32_t fst2;
2917 uint32_t fsth2;
2919 set_float_exception_flags(0, &env->active_fpu.fp_status);
2920 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2921 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2922 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2923 fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2924 update_fcr31();
2925 return ((uint64_t)fsth2 << 32) | fst2;
2928 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2930 /* binary operations */
2931 #define FLOAT_BINOP(name) \
2932 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
2934 uint64_t dt2; \
2936 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2937 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
2938 update_fcr31(); \
2939 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
2940 dt2 = FLOAT_QNAN64; \
2941 return dt2; \
2944 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
2946 uint32_t wt2; \
2948 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2949 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
2950 update_fcr31(); \
2951 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
2952 wt2 = FLOAT_QNAN32; \
2953 return wt2; \
2956 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
2958 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
2959 uint32_t fsth0 = fdt0 >> 32; \
2960 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
2961 uint32_t fsth1 = fdt1 >> 32; \
2962 uint32_t wt2; \
2963 uint32_t wth2; \
2965 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2966 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
2967 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
2968 update_fcr31(); \
2969 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
2970 wt2 = FLOAT_QNAN32; \
2971 wth2 = FLOAT_QNAN32; \
2973 return ((uint64_t)wth2 << 32) | wt2; \
2976 FLOAT_BINOP(add)
2977 FLOAT_BINOP(sub)
2978 FLOAT_BINOP(mul)
2979 FLOAT_BINOP(div)
2980 #undef FLOAT_BINOP
2982 /* ternary operations */
2983 #define FLOAT_TERNOP(name1, name2) \
2984 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2985 uint64_t fdt2) \
2987 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
2988 return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
2991 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2992 uint32_t fst2) \
2994 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2995 return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2998 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2999 uint64_t fdt2) \
3001 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3002 uint32_t fsth0 = fdt0 >> 32; \
3003 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3004 uint32_t fsth1 = fdt1 >> 32; \
3005 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3006 uint32_t fsth2 = fdt2 >> 32; \
3008 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3009 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3010 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3011 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3012 return ((uint64_t)fsth2 << 32) | fst2; \
3015 FLOAT_TERNOP(mul, add)
3016 FLOAT_TERNOP(mul, sub)
3017 #undef FLOAT_TERNOP
3019 /* negated ternary operations */
3020 #define FLOAT_NTERNOP(name1, name2) \
3021 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3022 uint64_t fdt2) \
3024 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3025 fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3026 return float64_chs(fdt2); \
3029 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3030 uint32_t fst2) \
3032 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3033 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3034 return float32_chs(fst2); \
3037 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
3038 uint64_t fdt2) \
3040 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3041 uint32_t fsth0 = fdt0 >> 32; \
3042 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3043 uint32_t fsth1 = fdt1 >> 32; \
3044 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3045 uint32_t fsth2 = fdt2 >> 32; \
3047 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3048 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3049 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3050 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3051 fst2 = float32_chs(fst2); \
3052 fsth2 = float32_chs(fsth2); \
3053 return ((uint64_t)fsth2 << 32) | fst2; \
3056 FLOAT_NTERNOP(mul, add)
3057 FLOAT_NTERNOP(mul, sub)
3058 #undef FLOAT_NTERNOP
3060 /* MIPS specific binary operations */
3061 uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
3063 set_float_exception_flags(0, &env->active_fpu.fp_status);
3064 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3065 fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
3066 update_fcr31();
3067 return fdt2;
3070 uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
3072 set_float_exception_flags(0, &env->active_fpu.fp_status);
3073 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3074 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3075 update_fcr31();
3076 return fst2;
3079 uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
3081 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3082 uint32_t fsth0 = fdt0 >> 32;
3083 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3084 uint32_t fsth2 = fdt2 >> 32;
3086 set_float_exception_flags(0, &env->active_fpu.fp_status);
3087 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3088 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3089 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3090 fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
3091 update_fcr31();
3092 return ((uint64_t)fsth2 << 32) | fst2;
3095 uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
3097 set_float_exception_flags(0, &env->active_fpu.fp_status);
3098 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3099 fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
3100 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3101 update_fcr31();
3102 return fdt2;
3105 uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
3107 set_float_exception_flags(0, &env->active_fpu.fp_status);
3108 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3109 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3110 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3111 update_fcr31();
3112 return fst2;
3115 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
3117 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3118 uint32_t fsth0 = fdt0 >> 32;
3119 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3120 uint32_t fsth2 = fdt2 >> 32;
3122 set_float_exception_flags(0, &env->active_fpu.fp_status);
3123 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3124 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3125 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3126 fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3127 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3128 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3129 update_fcr31();
3130 return ((uint64_t)fsth2 << 32) | fst2;
3133 uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
3135 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3136 uint32_t fsth0 = fdt0 >> 32;
3137 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3138 uint32_t fsth1 = fdt1 >> 32;
3139 uint32_t fst2;
3140 uint32_t fsth2;
3142 set_float_exception_flags(0, &env->active_fpu.fp_status);
3143 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3144 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3145 update_fcr31();
3146 return ((uint64_t)fsth2 << 32) | fst2;
3149 uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
3151 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3152 uint32_t fsth0 = fdt0 >> 32;
3153 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3154 uint32_t fsth1 = fdt1 >> 32;
3155 uint32_t fst2;
3156 uint32_t fsth2;
3158 set_float_exception_flags(0, &env->active_fpu.fp_status);
3159 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3160 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3161 update_fcr31();
3162 return ((uint64_t)fsth2 << 32) | fst2;
3165 /* compare operations */
3166 #define FOP_COND_D(op, cond) \
3167 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3169 int c; \
3170 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3171 c = cond; \
3172 update_fcr31(); \
3173 if (c) \
3174 SET_FP_COND(cc, env->active_fpu); \
3175 else \
3176 CLEAR_FP_COND(cc, env->active_fpu); \
3178 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3180 int c; \
3181 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3182 fdt0 = float64_abs(fdt0); \
3183 fdt1 = float64_abs(fdt1); \
3184 c = cond; \
3185 update_fcr31(); \
3186 if (c) \
3187 SET_FP_COND(cc, env->active_fpu); \
3188 else \
3189 CLEAR_FP_COND(cc, env->active_fpu); \
3192 /* NOTE: the comma operator will make "cond" to eval to false,
3193 * but float64_unordered_quiet() is still called. */
3194 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3195 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3196 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3197 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3198 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3199 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3200 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3201 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3202 /* NOTE: the comma operator will make "cond" to eval to false,
3203 * but float64_unordered() is still called. */
3204 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3205 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3206 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3207 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3208 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3209 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3210 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3211 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3213 #define FOP_COND_S(op, cond) \
3214 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3216 int c; \
3217 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3218 c = cond; \
3219 update_fcr31(); \
3220 if (c) \
3221 SET_FP_COND(cc, env->active_fpu); \
3222 else \
3223 CLEAR_FP_COND(cc, env->active_fpu); \
3225 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3227 int c; \
3228 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3229 fst0 = float32_abs(fst0); \
3230 fst1 = float32_abs(fst1); \
3231 c = cond; \
3232 update_fcr31(); \
3233 if (c) \
3234 SET_FP_COND(cc, env->active_fpu); \
3235 else \
3236 CLEAR_FP_COND(cc, env->active_fpu); \
3239 /* NOTE: the comma operator will make "cond" to eval to false,
3240 * but float32_unordered_quiet() is still called. */
3241 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3242 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3243 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3244 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3245 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3246 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3247 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3248 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3249 /* NOTE: the comma operator will make "cond" to eval to false,
3250 * but float32_unordered() is still called. */
3251 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3252 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3253 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3254 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3255 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3256 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3257 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
3258 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3260 #define FOP_COND_PS(op, condl, condh) \
3261 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3263 uint32_t fst0, fsth0, fst1, fsth1; \
3264 int ch, cl; \
3265 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3266 fst0 = fdt0 & 0XFFFFFFFF; \
3267 fsth0 = fdt0 >> 32; \
3268 fst1 = fdt1 & 0XFFFFFFFF; \
3269 fsth1 = fdt1 >> 32; \
3270 cl = condl; \
3271 ch = condh; \
3272 update_fcr31(); \
3273 if (cl) \
3274 SET_FP_COND(cc, env->active_fpu); \
3275 else \
3276 CLEAR_FP_COND(cc, env->active_fpu); \
3277 if (ch) \
3278 SET_FP_COND(cc + 1, env->active_fpu); \
3279 else \
3280 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3282 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3284 uint32_t fst0, fsth0, fst1, fsth1; \
3285 int ch, cl; \
3286 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3287 fsth0 = float32_abs(fdt0 >> 32); \
3288 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3289 fsth1 = float32_abs(fdt1 >> 32); \
3290 cl = condl; \
3291 ch = condh; \
3292 update_fcr31(); \
3293 if (cl) \
3294 SET_FP_COND(cc, env->active_fpu); \
3295 else \
3296 CLEAR_FP_COND(cc, env->active_fpu); \
3297 if (ch) \
3298 SET_FP_COND(cc + 1, env->active_fpu); \
3299 else \
3300 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3303 /* NOTE: the comma operator will make "cond" to eval to false,
3304 * but float32_unordered_quiet() is still called. */
3305 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3306 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3307 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3308 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3309 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3310 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3311 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3312 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3313 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3314 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3315 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3316 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3317 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3318 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3319 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3320 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3321 /* NOTE: the comma operator will make "cond" to eval to false,
3322 * but float32_unordered() is still called. */
3323 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3324 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3325 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3326 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3327 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3328 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3329 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3330 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3331 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3332 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3333 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3334 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3335 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
3336 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3337 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3338 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))