hw/9pfs: Fix memleaks in some 9p operation
[qemu/kevin.git] / target-mips / op_helper.c
blob96e40c60186ce2ce98974988454a776cb73691e4
1 /*
2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include "cpu.h"
21 #include "dyngen-exec.h"
23 #include "host-utils.h"
25 #include "helper.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 #ifndef CONFIG_USER_ONLY
32 static inline void cpu_mips_tlb_flush (CPUState *env, int flush_global);
33 #endif
35 static inline void compute_hflags(CPUState *env)
37 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
38 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
39 MIPS_HFLAG_UX);
40 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
41 !(env->CP0_Status & (1 << CP0St_ERL)) &&
42 !(env->hflags & MIPS_HFLAG_DM)) {
43 env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
45 #if defined(TARGET_MIPS64)
46 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
47 (env->CP0_Status & (1 << CP0St_PX)) ||
48 (env->CP0_Status & (1 << CP0St_UX))) {
49 env->hflags |= MIPS_HFLAG_64;
51 if (env->CP0_Status & (1 << CP0St_UX)) {
52 env->hflags |= MIPS_HFLAG_UX;
54 #endif
55 if ((env->CP0_Status & (1 << CP0St_CU0)) ||
56 !(env->hflags & MIPS_HFLAG_KSU)) {
57 env->hflags |= MIPS_HFLAG_CP0;
59 if (env->CP0_Status & (1 << CP0St_CU1)) {
60 env->hflags |= MIPS_HFLAG_FPU;
62 if (env->CP0_Status & (1 << CP0St_FR)) {
63 env->hflags |= MIPS_HFLAG_F64;
65 if (env->insn_flags & ISA_MIPS32R2) {
66 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
67 env->hflags |= MIPS_HFLAG_COP1X;
69 } else if (env->insn_flags & ISA_MIPS32) {
70 if (env->hflags & MIPS_HFLAG_64) {
71 env->hflags |= MIPS_HFLAG_COP1X;
73 } else if (env->insn_flags & ISA_MIPS4) {
74 /* All supported MIPS IV CPUs use the XX (CU3) to enable
75 and disable the MIPS IV extensions to the MIPS III ISA.
76 Some other MIPS IV CPUs ignore the bit, so the check here
77 would be too restrictive for them. */
78 if (env->CP0_Status & (1 << CP0St_CU3)) {
79 env->hflags |= MIPS_HFLAG_COP1X;
84 /*****************************************************************************/
85 /* Exceptions processing helpers */
87 void helper_raise_exception_err (uint32_t exception, int error_code)
89 #if 1
90 if (exception < 0x100)
91 qemu_log("%s: %d %d\n", __func__, exception, error_code);
92 #endif
93 env->exception_index = exception;
94 env->error_code = error_code;
95 cpu_loop_exit(env);
98 void helper_raise_exception (uint32_t exception)
100 helper_raise_exception_err(exception, 0);
103 #if !defined(CONFIG_USER_ONLY)
104 static void do_restore_state (void *pc_ptr)
106 TranslationBlock *tb;
107 unsigned long pc = (unsigned long) pc_ptr;
109 tb = tb_find_pc (pc);
110 if (tb) {
111 cpu_restore_state(tb, env, pc);
114 #endif
116 #if defined(CONFIG_USER_ONLY)
117 #define HELPER_LD(name, insn, type) \
118 static inline type do_##name(target_ulong addr, int mem_idx) \
120 return (type) insn##_raw(addr); \
122 #else
123 #define HELPER_LD(name, insn, type) \
124 static inline type do_##name(target_ulong addr, int mem_idx) \
126 switch (mem_idx) \
128 case 0: return (type) insn##_kernel(addr); break; \
129 case 1: return (type) insn##_super(addr); break; \
130 default: \
131 case 2: return (type) insn##_user(addr); break; \
134 #endif
135 HELPER_LD(lbu, ldub, uint8_t)
136 HELPER_LD(lw, ldl, int32_t)
137 #ifdef TARGET_MIPS64
138 HELPER_LD(ld, ldq, int64_t)
139 #endif
140 #undef HELPER_LD
142 #if defined(CONFIG_USER_ONLY)
143 #define HELPER_ST(name, insn, type) \
144 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
146 insn##_raw(addr, val); \
148 #else
149 #define HELPER_ST(name, insn, type) \
150 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
152 switch (mem_idx) \
154 case 0: insn##_kernel(addr, val); break; \
155 case 1: insn##_super(addr, val); break; \
156 default: \
157 case 2: insn##_user(addr, val); break; \
160 #endif
161 HELPER_ST(sb, stb, uint8_t)
162 HELPER_ST(sw, stl, uint32_t)
163 #ifdef TARGET_MIPS64
164 HELPER_ST(sd, stq, uint64_t)
165 #endif
166 #undef HELPER_ST
168 target_ulong helper_clo (target_ulong arg1)
170 return clo32(arg1);
173 target_ulong helper_clz (target_ulong arg1)
175 return clz32(arg1);
178 #if defined(TARGET_MIPS64)
179 target_ulong helper_dclo (target_ulong arg1)
181 return clo64(arg1);
184 target_ulong helper_dclz (target_ulong arg1)
186 return clz64(arg1);
188 #endif /* TARGET_MIPS64 */
190 /* 64 bits arithmetic for 32 bits hosts */
191 static inline uint64_t get_HILO (void)
193 return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
196 static inline void set_HILO (uint64_t HILO)
198 env->active_tc.LO[0] = (int32_t)HILO;
199 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
202 static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
204 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
205 arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
208 static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
210 arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
211 env->active_tc.HI[0] = (int32_t)(HILO >> 32);
214 /* Multiplication variants of the vr54xx. */
215 target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
217 set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
219 return arg1;
222 target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
224 set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
226 return arg1;
229 target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
231 set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
233 return arg1;
236 target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
238 set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
240 return arg1;
243 target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
245 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
247 return arg1;
250 target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
252 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
254 return arg1;
257 target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
259 set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
261 return arg1;
264 target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
266 set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
268 return arg1;
271 target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
273 set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
275 return arg1;
278 target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
280 set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
282 return arg1;
285 target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
287 set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
289 return arg1;
292 target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
294 set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
296 return arg1;
299 target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
301 set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
303 return arg1;
306 target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
308 set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
310 return arg1;
313 #ifdef TARGET_MIPS64
314 void helper_dmult (target_ulong arg1, target_ulong arg2)
316 muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
319 void helper_dmultu (target_ulong arg1, target_ulong arg2)
321 mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
323 #endif
325 #ifndef CONFIG_USER_ONLY
327 static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
329 target_phys_addr_t lladdr;
331 lladdr = cpu_mips_translate_address(env, address, rw);
333 if (lladdr == -1LL) {
334 cpu_loop_exit(env);
335 } else {
336 return lladdr;
340 #define HELPER_LD_ATOMIC(name, insn) \
341 target_ulong helper_##name(target_ulong arg, int mem_idx) \
343 env->lladdr = do_translate_address(arg, 0); \
344 env->llval = do_##insn(arg, mem_idx); \
345 return env->llval; \
347 HELPER_LD_ATOMIC(ll, lw)
348 #ifdef TARGET_MIPS64
349 HELPER_LD_ATOMIC(lld, ld)
350 #endif
351 #undef HELPER_LD_ATOMIC
353 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
354 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
356 target_long tmp; \
358 if (arg2 & almask) { \
359 env->CP0_BadVAddr = arg2; \
360 helper_raise_exception(EXCP_AdES); \
362 if (do_translate_address(arg2, 1) == env->lladdr) { \
363 tmp = do_##ld_insn(arg2, mem_idx); \
364 if (tmp == env->llval) { \
365 do_##st_insn(arg2, arg1, mem_idx); \
366 return 1; \
369 return 0; \
371 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
372 #ifdef TARGET_MIPS64
373 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
374 #endif
375 #undef HELPER_ST_ATOMIC
376 #endif
378 #ifdef TARGET_WORDS_BIGENDIAN
379 #define GET_LMASK(v) ((v) & 3)
380 #define GET_OFFSET(addr, offset) (addr + (offset))
381 #else
382 #define GET_LMASK(v) (((v) & 3) ^ 3)
383 #define GET_OFFSET(addr, offset) (addr - (offset))
384 #endif
386 target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
388 target_ulong tmp;
390 tmp = do_lbu(arg2, mem_idx);
391 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
393 if (GET_LMASK(arg2) <= 2) {
394 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
395 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
398 if (GET_LMASK(arg2) <= 1) {
399 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
400 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
403 if (GET_LMASK(arg2) == 0) {
404 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
405 arg1 = (arg1 & 0xFFFFFF00) | tmp;
407 return (int32_t)arg1;
410 target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
412 target_ulong tmp;
414 tmp = do_lbu(arg2, mem_idx);
415 arg1 = (arg1 & 0xFFFFFF00) | tmp;
417 if (GET_LMASK(arg2) >= 1) {
418 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
419 arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
422 if (GET_LMASK(arg2) >= 2) {
423 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
424 arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
427 if (GET_LMASK(arg2) == 3) {
428 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
429 arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
431 return (int32_t)arg1;
434 void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
436 do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
438 if (GET_LMASK(arg2) <= 2)
439 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
441 if (GET_LMASK(arg2) <= 1)
442 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
444 if (GET_LMASK(arg2) == 0)
445 do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
448 void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
450 do_sb(arg2, (uint8_t)arg1, mem_idx);
452 if (GET_LMASK(arg2) >= 1)
453 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
455 if (GET_LMASK(arg2) >= 2)
456 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
458 if (GET_LMASK(arg2) == 3)
459 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
462 #if defined(TARGET_MIPS64)
463 /* "half" load and stores. We must do the memory access inline,
464 or fault handling won't work. */
466 #ifdef TARGET_WORDS_BIGENDIAN
467 #define GET_LMASK64(v) ((v) & 7)
468 #else
469 #define GET_LMASK64(v) (((v) & 7) ^ 7)
470 #endif
472 target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
474 uint64_t tmp;
476 tmp = do_lbu(arg2, mem_idx);
477 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
479 if (GET_LMASK64(arg2) <= 6) {
480 tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
481 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
484 if (GET_LMASK64(arg2) <= 5) {
485 tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
486 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
489 if (GET_LMASK64(arg2) <= 4) {
490 tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
491 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
494 if (GET_LMASK64(arg2) <= 3) {
495 tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
496 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
499 if (GET_LMASK64(arg2) <= 2) {
500 tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
501 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
504 if (GET_LMASK64(arg2) <= 1) {
505 tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
506 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
509 if (GET_LMASK64(arg2) == 0) {
510 tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
511 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
514 return arg1;
517 target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
519 uint64_t tmp;
521 tmp = do_lbu(arg2, mem_idx);
522 arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
524 if (GET_LMASK64(arg2) >= 1) {
525 tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
526 arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
529 if (GET_LMASK64(arg2) >= 2) {
530 tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
531 arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
534 if (GET_LMASK64(arg2) >= 3) {
535 tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
536 arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
539 if (GET_LMASK64(arg2) >= 4) {
540 tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
541 arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
544 if (GET_LMASK64(arg2) >= 5) {
545 tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
546 arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
549 if (GET_LMASK64(arg2) >= 6) {
550 tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
551 arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
554 if (GET_LMASK64(arg2) == 7) {
555 tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
556 arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
559 return arg1;
562 void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
564 do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
566 if (GET_LMASK64(arg2) <= 6)
567 do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
569 if (GET_LMASK64(arg2) <= 5)
570 do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
572 if (GET_LMASK64(arg2) <= 4)
573 do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
575 if (GET_LMASK64(arg2) <= 3)
576 do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
578 if (GET_LMASK64(arg2) <= 2)
579 do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
581 if (GET_LMASK64(arg2) <= 1)
582 do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
584 if (GET_LMASK64(arg2) <= 0)
585 do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
588 void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
590 do_sb(arg2, (uint8_t)arg1, mem_idx);
592 if (GET_LMASK64(arg2) >= 1)
593 do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
595 if (GET_LMASK64(arg2) >= 2)
596 do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
598 if (GET_LMASK64(arg2) >= 3)
599 do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
601 if (GET_LMASK64(arg2) >= 4)
602 do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
604 if (GET_LMASK64(arg2) >= 5)
605 do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
607 if (GET_LMASK64(arg2) >= 6)
608 do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
610 if (GET_LMASK64(arg2) == 7)
611 do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
613 #endif /* TARGET_MIPS64 */
615 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
617 void helper_lwm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
619 target_ulong base_reglist = reglist & 0xf;
620 target_ulong do_r31 = reglist & 0x10;
621 #ifdef CONFIG_USER_ONLY
622 #undef ldfun
623 #define ldfun ldl_raw
624 #else
625 uint32_t (*ldfun)(target_ulong);
627 switch (mem_idx)
629 case 0: ldfun = ldl_kernel; break;
630 case 1: ldfun = ldl_super; break;
631 default:
632 case 2: ldfun = ldl_user; break;
634 #endif
636 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
637 target_ulong i;
639 for (i = 0; i < base_reglist; i++) {
640 env->active_tc.gpr[multiple_regs[i]] = (target_long) ldfun(addr);
641 addr += 4;
645 if (do_r31) {
646 env->active_tc.gpr[31] = (target_long) ldfun(addr);
650 void helper_swm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
652 target_ulong base_reglist = reglist & 0xf;
653 target_ulong do_r31 = reglist & 0x10;
654 #ifdef CONFIG_USER_ONLY
655 #undef stfun
656 #define stfun stl_raw
657 #else
658 void (*stfun)(target_ulong, uint32_t);
660 switch (mem_idx)
662 case 0: stfun = stl_kernel; break;
663 case 1: stfun = stl_super; break;
664 default:
665 case 2: stfun = stl_user; break;
667 #endif
669 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
670 target_ulong i;
672 for (i = 0; i < base_reglist; i++) {
673 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
674 addr += 4;
678 if (do_r31) {
679 stfun(addr, env->active_tc.gpr[31]);
683 #if defined(TARGET_MIPS64)
684 void helper_ldm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
686 target_ulong base_reglist = reglist & 0xf;
687 target_ulong do_r31 = reglist & 0x10;
688 #ifdef CONFIG_USER_ONLY
689 #undef ldfun
690 #define ldfun ldq_raw
691 #else
692 uint64_t (*ldfun)(target_ulong);
694 switch (mem_idx)
696 case 0: ldfun = ldq_kernel; break;
697 case 1: ldfun = ldq_super; break;
698 default:
699 case 2: ldfun = ldq_user; break;
701 #endif
703 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
704 target_ulong i;
706 for (i = 0; i < base_reglist; i++) {
707 env->active_tc.gpr[multiple_regs[i]] = ldfun(addr);
708 addr += 8;
712 if (do_r31) {
713 env->active_tc.gpr[31] = ldfun(addr);
717 void helper_sdm (target_ulong addr, target_ulong reglist, uint32_t mem_idx)
719 target_ulong base_reglist = reglist & 0xf;
720 target_ulong do_r31 = reglist & 0x10;
721 #ifdef CONFIG_USER_ONLY
722 #undef stfun
723 #define stfun stq_raw
724 #else
725 void (*stfun)(target_ulong, uint64_t);
727 switch (mem_idx)
729 case 0: stfun = stq_kernel; break;
730 case 1: stfun = stq_super; break;
731 default:
732 case 2: stfun = stq_user; break;
734 #endif
736 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
737 target_ulong i;
739 for (i = 0; i < base_reglist; i++) {
740 stfun(addr, env->active_tc.gpr[multiple_regs[i]]);
741 addr += 8;
745 if (do_r31) {
746 stfun(addr, env->active_tc.gpr[31]);
749 #endif
751 #ifndef CONFIG_USER_ONLY
752 /* SMP helpers. */
753 static int mips_vpe_is_wfi(CPUState *c)
755 /* If the VPE is halted but otherwise active, it means it's waiting for
756 an interrupt. */
757 return c->halted && mips_vpe_active(c);
760 static inline void mips_vpe_wake(CPUState *c)
762 /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
763 because there might be other conditions that state that c should
764 be sleeping. */
765 cpu_interrupt(c, CPU_INTERRUPT_WAKE);
768 static inline void mips_vpe_sleep(CPUState *c)
770 /* The VPE was shut off, really go to bed.
771 Reset any old _WAKE requests. */
772 c->halted = 1;
773 cpu_reset_interrupt(c, CPU_INTERRUPT_WAKE);
776 static inline void mips_tc_wake(CPUState *c, int tc)
778 /* FIXME: TC reschedule. */
779 if (mips_vpe_active(c) && !mips_vpe_is_wfi(c)) {
780 mips_vpe_wake(c);
784 static inline void mips_tc_sleep(CPUState *c, int tc)
786 /* FIXME: TC reschedule. */
787 if (!mips_vpe_active(c)) {
788 mips_vpe_sleep(c);
792 /* tc should point to an int with the value of the global TC index.
793 This function will transform it into a local index within the
794 returned CPUState.
796 FIXME: This code assumes that all VPEs have the same number of TCs,
797 which depends on runtime setup. Can probably be fixed by
798 walking the list of CPUStates. */
799 static CPUState *mips_cpu_map_tc(int *tc)
801 CPUState *other;
802 int vpe_idx, nr_threads = env->nr_threads;
803 int tc_idx = *tc;
805 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
806 /* Not allowed to address other CPUs. */
807 *tc = env->current_tc;
808 return env;
811 vpe_idx = tc_idx / nr_threads;
812 *tc = tc_idx % nr_threads;
813 other = qemu_get_cpu(vpe_idx);
814 return other ? other : env;
817 /* The per VPE CP0_Status register shares some fields with the per TC
818 CP0_TCStatus registers. These fields are wired to the same registers,
819 so changes to either of them should be reflected on both registers.
821 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
823 These helper call synchronizes the regs for a given cpu. */
825 /* Called for updates to CP0_Status. */
826 static void sync_c0_status(CPUState *cpu, int tc)
828 int32_t tcstatus, *tcst;
829 uint32_t v = cpu->CP0_Status;
830 uint32_t cu, mx, asid, ksu;
831 uint32_t mask = ((1 << CP0TCSt_TCU3)
832 | (1 << CP0TCSt_TCU2)
833 | (1 << CP0TCSt_TCU1)
834 | (1 << CP0TCSt_TCU0)
835 | (1 << CP0TCSt_TMX)
836 | (3 << CP0TCSt_TKSU)
837 | (0xff << CP0TCSt_TASID));
839 cu = (v >> CP0St_CU0) & 0xf;
840 mx = (v >> CP0St_MX) & 0x1;
841 ksu = (v >> CP0St_KSU) & 0x3;
842 asid = env->CP0_EntryHi & 0xff;
844 tcstatus = cu << CP0TCSt_TCU0;
845 tcstatus |= mx << CP0TCSt_TMX;
846 tcstatus |= ksu << CP0TCSt_TKSU;
847 tcstatus |= asid;
849 if (tc == cpu->current_tc) {
850 tcst = &cpu->active_tc.CP0_TCStatus;
851 } else {
852 tcst = &cpu->tcs[tc].CP0_TCStatus;
855 *tcst &= ~mask;
856 *tcst |= tcstatus;
857 compute_hflags(cpu);
860 /* Called for updates to CP0_TCStatus. */
861 static void sync_c0_tcstatus(CPUState *cpu, int tc, target_ulong v)
863 uint32_t status;
864 uint32_t tcu, tmx, tasid, tksu;
865 uint32_t mask = ((1 << CP0St_CU3)
866 | (1 << CP0St_CU2)
867 | (1 << CP0St_CU1)
868 | (1 << CP0St_CU0)
869 | (1 << CP0St_MX)
870 | (3 << CP0St_KSU));
872 tcu = (v >> CP0TCSt_TCU0) & 0xf;
873 tmx = (v >> CP0TCSt_TMX) & 0x1;
874 tasid = v & 0xff;
875 tksu = (v >> CP0TCSt_TKSU) & 0x3;
877 status = tcu << CP0St_CU0;
878 status |= tmx << CP0St_MX;
879 status |= tksu << CP0St_KSU;
881 cpu->CP0_Status &= ~mask;
882 cpu->CP0_Status |= status;
884 /* Sync the TASID with EntryHi. */
885 cpu->CP0_EntryHi &= ~0xff;
886 cpu->CP0_EntryHi = tasid;
888 compute_hflags(cpu);
891 /* Called for updates to CP0_EntryHi. */
892 static void sync_c0_entryhi(CPUState *cpu, int tc)
894 int32_t *tcst;
895 uint32_t asid, v = cpu->CP0_EntryHi;
897 asid = v & 0xff;
899 if (tc == cpu->current_tc) {
900 tcst = &cpu->active_tc.CP0_TCStatus;
901 } else {
902 tcst = &cpu->tcs[tc].CP0_TCStatus;
905 *tcst &= ~0xff;
906 *tcst |= asid;
909 /* CP0 helpers */
910 target_ulong helper_mfc0_mvpcontrol (void)
912 return env->mvp->CP0_MVPControl;
915 target_ulong helper_mfc0_mvpconf0 (void)
917 return env->mvp->CP0_MVPConf0;
920 target_ulong helper_mfc0_mvpconf1 (void)
922 return env->mvp->CP0_MVPConf1;
925 target_ulong helper_mfc0_random (void)
927 return (int32_t)cpu_mips_get_random(env);
930 target_ulong helper_mfc0_tcstatus (void)
932 return env->active_tc.CP0_TCStatus;
935 target_ulong helper_mftc0_tcstatus(void)
937 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
938 CPUState *other = mips_cpu_map_tc(&other_tc);
940 if (other_tc == other->current_tc)
941 return other->active_tc.CP0_TCStatus;
942 else
943 return other->tcs[other_tc].CP0_TCStatus;
946 target_ulong helper_mfc0_tcbind (void)
948 return env->active_tc.CP0_TCBind;
951 target_ulong helper_mftc0_tcbind(void)
953 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
954 CPUState *other = mips_cpu_map_tc(&other_tc);
956 if (other_tc == other->current_tc)
957 return other->active_tc.CP0_TCBind;
958 else
959 return other->tcs[other_tc].CP0_TCBind;
962 target_ulong helper_mfc0_tcrestart (void)
964 return env->active_tc.PC;
967 target_ulong helper_mftc0_tcrestart(void)
969 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
970 CPUState *other = mips_cpu_map_tc(&other_tc);
972 if (other_tc == other->current_tc)
973 return other->active_tc.PC;
974 else
975 return other->tcs[other_tc].PC;
978 target_ulong helper_mfc0_tchalt (void)
980 return env->active_tc.CP0_TCHalt;
983 target_ulong helper_mftc0_tchalt(void)
985 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
986 CPUState *other = mips_cpu_map_tc(&other_tc);
988 if (other_tc == other->current_tc)
989 return other->active_tc.CP0_TCHalt;
990 else
991 return other->tcs[other_tc].CP0_TCHalt;
994 target_ulong helper_mfc0_tccontext (void)
996 return env->active_tc.CP0_TCContext;
999 target_ulong helper_mftc0_tccontext(void)
1001 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1002 CPUState *other = mips_cpu_map_tc(&other_tc);
1004 if (other_tc == other->current_tc)
1005 return other->active_tc.CP0_TCContext;
1006 else
1007 return other->tcs[other_tc].CP0_TCContext;
1010 target_ulong helper_mfc0_tcschedule (void)
1012 return env->active_tc.CP0_TCSchedule;
1015 target_ulong helper_mftc0_tcschedule(void)
1017 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1018 CPUState *other = mips_cpu_map_tc(&other_tc);
1020 if (other_tc == other->current_tc)
1021 return other->active_tc.CP0_TCSchedule;
1022 else
1023 return other->tcs[other_tc].CP0_TCSchedule;
1026 target_ulong helper_mfc0_tcschefback (void)
1028 return env->active_tc.CP0_TCScheFBack;
1031 target_ulong helper_mftc0_tcschefback(void)
1033 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1034 CPUState *other = mips_cpu_map_tc(&other_tc);
1036 if (other_tc == other->current_tc)
1037 return other->active_tc.CP0_TCScheFBack;
1038 else
1039 return other->tcs[other_tc].CP0_TCScheFBack;
1042 target_ulong helper_mfc0_count (void)
1044 return (int32_t)cpu_mips_get_count(env);
1047 target_ulong helper_mftc0_entryhi(void)
1049 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1050 CPUState *other = mips_cpu_map_tc(&other_tc);
1052 return other->CP0_EntryHi;
1055 target_ulong helper_mftc0_cause(void)
1057 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1058 int32_t tccause;
1059 CPUState *other = mips_cpu_map_tc(&other_tc);
1061 if (other_tc == other->current_tc) {
1062 tccause = other->CP0_Cause;
1063 } else {
1064 tccause = other->CP0_Cause;
1067 return tccause;
1070 target_ulong helper_mftc0_status(void)
1072 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1073 CPUState *other = mips_cpu_map_tc(&other_tc);
1075 return other->CP0_Status;
1078 target_ulong helper_mfc0_lladdr (void)
1080 return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
1083 target_ulong helper_mfc0_watchlo (uint32_t sel)
1085 return (int32_t)env->CP0_WatchLo[sel];
1088 target_ulong helper_mfc0_watchhi (uint32_t sel)
1090 return env->CP0_WatchHi[sel];
1093 target_ulong helper_mfc0_debug (void)
1095 target_ulong t0 = env->CP0_Debug;
1096 if (env->hflags & MIPS_HFLAG_DM)
1097 t0 |= 1 << CP0DB_DM;
1099 return t0;
1102 target_ulong helper_mftc0_debug(void)
1104 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1105 int32_t tcstatus;
1106 CPUState *other = mips_cpu_map_tc(&other_tc);
1108 if (other_tc == other->current_tc)
1109 tcstatus = other->active_tc.CP0_Debug_tcstatus;
1110 else
1111 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
1113 /* XXX: Might be wrong, check with EJTAG spec. */
1114 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1115 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1118 #if defined(TARGET_MIPS64)
1119 target_ulong helper_dmfc0_tcrestart (void)
1121 return env->active_tc.PC;
1124 target_ulong helper_dmfc0_tchalt (void)
1126 return env->active_tc.CP0_TCHalt;
1129 target_ulong helper_dmfc0_tccontext (void)
1131 return env->active_tc.CP0_TCContext;
1134 target_ulong helper_dmfc0_tcschedule (void)
1136 return env->active_tc.CP0_TCSchedule;
1139 target_ulong helper_dmfc0_tcschefback (void)
1141 return env->active_tc.CP0_TCScheFBack;
1144 target_ulong helper_dmfc0_lladdr (void)
1146 return env->lladdr >> env->CP0_LLAddr_shift;
1149 target_ulong helper_dmfc0_watchlo (uint32_t sel)
1151 return env->CP0_WatchLo[sel];
1153 #endif /* TARGET_MIPS64 */
1155 void helper_mtc0_index (target_ulong arg1)
1157 int num = 1;
1158 unsigned int tmp = env->tlb->nb_tlb;
1160 do {
1161 tmp >>= 1;
1162 num <<= 1;
1163 } while (tmp);
1164 env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
1167 void helper_mtc0_mvpcontrol (target_ulong arg1)
1169 uint32_t mask = 0;
1170 uint32_t newval;
1172 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
1173 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
1174 (1 << CP0MVPCo_EVP);
1175 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1176 mask |= (1 << CP0MVPCo_STLB);
1177 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
1179 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1181 env->mvp->CP0_MVPControl = newval;
1184 void helper_mtc0_vpecontrol (target_ulong arg1)
1186 uint32_t mask;
1187 uint32_t newval;
1189 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1190 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1191 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
1193 /* Yield scheduler intercept not implemented. */
1194 /* Gating storage scheduler intercept not implemented. */
1196 // TODO: Enable/disable TCs.
1198 env->CP0_VPEControl = newval;
1201 void helper_mttc0_vpecontrol(target_ulong arg1)
1203 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1204 CPUState *other = mips_cpu_map_tc(&other_tc);
1205 uint32_t mask;
1206 uint32_t newval;
1208 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
1209 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
1210 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
1212 /* TODO: Enable/disable TCs. */
1214 other->CP0_VPEControl = newval;
1217 target_ulong helper_mftc0_vpecontrol(void)
1219 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1220 CPUState *other = mips_cpu_map_tc(&other_tc);
1221 /* FIXME: Mask away return zero on read bits. */
1222 return other->CP0_VPEControl;
1225 target_ulong helper_mftc0_vpeconf0(void)
1227 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1228 CPUState *other = mips_cpu_map_tc(&other_tc);
1230 return other->CP0_VPEConf0;
1233 void helper_mtc0_vpeconf0 (target_ulong arg1)
1235 uint32_t mask = 0;
1236 uint32_t newval;
1238 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
1239 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
1240 mask |= (0xff << CP0VPEC0_XTC);
1241 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1243 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1245 // TODO: TC exclusive handling due to ERL/EXL.
1247 env->CP0_VPEConf0 = newval;
1250 void helper_mttc0_vpeconf0(target_ulong arg1)
1252 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1253 CPUState *other = mips_cpu_map_tc(&other_tc);
1254 uint32_t mask = 0;
1255 uint32_t newval;
1257 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
1258 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
1260 /* TODO: TC exclusive handling due to ERL/EXL. */
1261 other->CP0_VPEConf0 = newval;
1264 void helper_mtc0_vpeconf1 (target_ulong arg1)
1266 uint32_t mask = 0;
1267 uint32_t newval;
1269 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1270 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
1271 (0xff << CP0VPEC1_NCP1);
1272 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
1274 /* UDI not implemented. */
1275 /* CP2 not implemented. */
1277 // TODO: Handle FPU (CP1) binding.
1279 env->CP0_VPEConf1 = newval;
1282 void helper_mtc0_yqmask (target_ulong arg1)
1284 /* Yield qualifier inputs not implemented. */
1285 env->CP0_YQMask = 0x00000000;
1288 void helper_mtc0_vpeopt (target_ulong arg1)
1290 env->CP0_VPEOpt = arg1 & 0x0000ffff;
1293 void helper_mtc0_entrylo0 (target_ulong arg1)
1295 /* Large physaddr (PABITS) not implemented */
1296 /* 1k pages not implemented */
1297 env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
1300 void helper_mtc0_tcstatus (target_ulong arg1)
1302 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
1303 uint32_t newval;
1305 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
1307 env->active_tc.CP0_TCStatus = newval;
1308 sync_c0_tcstatus(env, env->current_tc, newval);
1311 void helper_mttc0_tcstatus (target_ulong arg1)
1313 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1314 CPUState *other = mips_cpu_map_tc(&other_tc);
1316 if (other_tc == other->current_tc)
1317 other->active_tc.CP0_TCStatus = arg1;
1318 else
1319 other->tcs[other_tc].CP0_TCStatus = arg1;
1320 sync_c0_tcstatus(other, other_tc, arg1);
1323 void helper_mtc0_tcbind (target_ulong arg1)
1325 uint32_t mask = (1 << CP0TCBd_TBE);
1326 uint32_t newval;
1328 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1329 mask |= (1 << CP0TCBd_CurVPE);
1330 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1331 env->active_tc.CP0_TCBind = newval;
1334 void helper_mttc0_tcbind (target_ulong arg1)
1336 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1337 uint32_t mask = (1 << CP0TCBd_TBE);
1338 uint32_t newval;
1339 CPUState *other = mips_cpu_map_tc(&other_tc);
1341 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
1342 mask |= (1 << CP0TCBd_CurVPE);
1343 if (other_tc == other->current_tc) {
1344 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
1345 other->active_tc.CP0_TCBind = newval;
1346 } else {
1347 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
1348 other->tcs[other_tc].CP0_TCBind = newval;
1352 void helper_mtc0_tcrestart (target_ulong arg1)
1354 env->active_tc.PC = arg1;
1355 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1356 env->lladdr = 0ULL;
1357 /* MIPS16 not implemented. */
1360 void helper_mttc0_tcrestart (target_ulong arg1)
1362 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1363 CPUState *other = mips_cpu_map_tc(&other_tc);
1365 if (other_tc == other->current_tc) {
1366 other->active_tc.PC = arg1;
1367 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1368 other->lladdr = 0ULL;
1369 /* MIPS16 not implemented. */
1370 } else {
1371 other->tcs[other_tc].PC = arg1;
1372 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
1373 other->lladdr = 0ULL;
1374 /* MIPS16 not implemented. */
1378 void helper_mtc0_tchalt (target_ulong arg1)
1380 env->active_tc.CP0_TCHalt = arg1 & 0x1;
1382 // TODO: Halt TC / Restart (if allocated+active) TC.
1383 if (env->active_tc.CP0_TCHalt & 1) {
1384 mips_tc_sleep(env, env->current_tc);
1385 } else {
1386 mips_tc_wake(env, env->current_tc);
1390 void helper_mttc0_tchalt (target_ulong arg1)
1392 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1393 CPUState *other = mips_cpu_map_tc(&other_tc);
1395 // TODO: Halt TC / Restart (if allocated+active) TC.
1397 if (other_tc == other->current_tc)
1398 other->active_tc.CP0_TCHalt = arg1;
1399 else
1400 other->tcs[other_tc].CP0_TCHalt = arg1;
1402 if (arg1 & 1) {
1403 mips_tc_sleep(other, other_tc);
1404 } else {
1405 mips_tc_wake(other, other_tc);
1409 void helper_mtc0_tccontext (target_ulong arg1)
1411 env->active_tc.CP0_TCContext = arg1;
1414 void helper_mttc0_tccontext (target_ulong arg1)
1416 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1417 CPUState *other = mips_cpu_map_tc(&other_tc);
1419 if (other_tc == other->current_tc)
1420 other->active_tc.CP0_TCContext = arg1;
1421 else
1422 other->tcs[other_tc].CP0_TCContext = arg1;
1425 void helper_mtc0_tcschedule (target_ulong arg1)
1427 env->active_tc.CP0_TCSchedule = arg1;
1430 void helper_mttc0_tcschedule (target_ulong arg1)
1432 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1433 CPUState *other = mips_cpu_map_tc(&other_tc);
1435 if (other_tc == other->current_tc)
1436 other->active_tc.CP0_TCSchedule = arg1;
1437 else
1438 other->tcs[other_tc].CP0_TCSchedule = arg1;
1441 void helper_mtc0_tcschefback (target_ulong arg1)
1443 env->active_tc.CP0_TCScheFBack = arg1;
1446 void helper_mttc0_tcschefback (target_ulong arg1)
1448 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1449 CPUState *other = mips_cpu_map_tc(&other_tc);
1451 if (other_tc == other->current_tc)
1452 other->active_tc.CP0_TCScheFBack = arg1;
1453 else
1454 other->tcs[other_tc].CP0_TCScheFBack = arg1;
1457 void helper_mtc0_entrylo1 (target_ulong arg1)
1459 /* Large physaddr (PABITS) not implemented */
1460 /* 1k pages not implemented */
1461 env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
1464 void helper_mtc0_context (target_ulong arg1)
1466 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
1469 void helper_mtc0_pagemask (target_ulong arg1)
1471 /* 1k pages not implemented */
1472 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
1475 void helper_mtc0_pagegrain (target_ulong arg1)
1477 /* SmartMIPS not implemented */
1478 /* Large physaddr (PABITS) not implemented */
1479 /* 1k pages not implemented */
1480 env->CP0_PageGrain = 0;
1483 void helper_mtc0_wired (target_ulong arg1)
1485 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1488 void helper_mtc0_srsconf0 (target_ulong arg1)
1490 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1493 void helper_mtc0_srsconf1 (target_ulong arg1)
1495 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1498 void helper_mtc0_srsconf2 (target_ulong arg1)
1500 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1503 void helper_mtc0_srsconf3 (target_ulong arg1)
1505 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1508 void helper_mtc0_srsconf4 (target_ulong arg1)
1510 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1513 void helper_mtc0_hwrena (target_ulong arg1)
1515 env->CP0_HWREna = arg1 & 0x0000000F;
1518 void helper_mtc0_count (target_ulong arg1)
1520 cpu_mips_store_count(env, arg1);
1523 void helper_mtc0_entryhi (target_ulong arg1)
1525 target_ulong old, val;
1527 /* 1k pages not implemented */
1528 val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
1529 #if defined(TARGET_MIPS64)
1530 val &= env->SEGMask;
1531 #endif
1532 old = env->CP0_EntryHi;
1533 env->CP0_EntryHi = val;
1534 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1535 sync_c0_entryhi(env, env->current_tc);
1537 /* If the ASID changes, flush qemu's TLB. */
1538 if ((old & 0xFF) != (val & 0xFF))
1539 cpu_mips_tlb_flush(env, 1);
1542 void helper_mttc0_entryhi(target_ulong arg1)
1544 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1545 CPUState *other = mips_cpu_map_tc(&other_tc);
1547 other->CP0_EntryHi = arg1;
1548 sync_c0_entryhi(other, other_tc);
1551 void helper_mtc0_compare (target_ulong arg1)
1553 cpu_mips_store_compare(env, arg1);
1556 void helper_mtc0_status (target_ulong arg1)
1558 uint32_t val, old;
1559 uint32_t mask = env->CP0_Status_rw_bitmask;
1561 val = arg1 & mask;
1562 old = env->CP0_Status;
1563 env->CP0_Status = (env->CP0_Status & ~mask) | val;
1564 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1565 sync_c0_status(env, env->current_tc);
1566 } else {
1567 compute_hflags(env);
1570 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1571 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1572 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1573 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1574 env->CP0_Cause);
1575 switch (env->hflags & MIPS_HFLAG_KSU) {
1576 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
1577 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
1578 case MIPS_HFLAG_KM: qemu_log("\n"); break;
1579 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
1584 void helper_mttc0_status(target_ulong arg1)
1586 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1587 CPUState *other = mips_cpu_map_tc(&other_tc);
1589 other->CP0_Status = arg1 & ~0xf1000018;
1590 sync_c0_status(other, other_tc);
1593 void helper_mtc0_intctl (target_ulong arg1)
1595 /* vectored interrupts not implemented, no performance counters. */
1596 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1599 void helper_mtc0_srsctl (target_ulong arg1)
1601 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1602 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1605 static void mtc0_cause(CPUState *cpu, target_ulong arg1)
1607 uint32_t mask = 0x00C00300;
1608 uint32_t old = cpu->CP0_Cause;
1609 int i;
1611 if (cpu->insn_flags & ISA_MIPS32R2) {
1612 mask |= 1 << CP0Ca_DC;
1615 cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask);
1617 if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) {
1618 if (cpu->CP0_Cause & (1 << CP0Ca_DC)) {
1619 cpu_mips_stop_count(cpu);
1620 } else {
1621 cpu_mips_start_count(cpu);
1625 /* Set/reset software interrupts */
1626 for (i = 0 ; i < 2 ; i++) {
1627 if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1628 cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i)));
1633 void helper_mtc0_cause(target_ulong arg1)
1635 mtc0_cause(env, arg1);
1638 void helper_mttc0_cause(target_ulong arg1)
1640 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1641 CPUState *other = mips_cpu_map_tc(&other_tc);
1643 mtc0_cause(other, arg1);
1646 target_ulong helper_mftc0_epc(void)
1648 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1649 CPUState *other = mips_cpu_map_tc(&other_tc);
1651 return other->CP0_EPC;
1654 target_ulong helper_mftc0_ebase(void)
1656 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1657 CPUState *other = mips_cpu_map_tc(&other_tc);
1659 return other->CP0_EBase;
1662 void helper_mtc0_ebase (target_ulong arg1)
1664 /* vectored interrupts not implemented */
1665 env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1668 void helper_mttc0_ebase(target_ulong arg1)
1670 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1671 CPUState *other = mips_cpu_map_tc(&other_tc);
1672 other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
1675 target_ulong helper_mftc0_configx(target_ulong idx)
1677 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1678 CPUState *other = mips_cpu_map_tc(&other_tc);
1680 switch (idx) {
1681 case 0: return other->CP0_Config0;
1682 case 1: return other->CP0_Config1;
1683 case 2: return other->CP0_Config2;
1684 case 3: return other->CP0_Config3;
1685 /* 4 and 5 are reserved. */
1686 case 6: return other->CP0_Config6;
1687 case 7: return other->CP0_Config7;
1688 default:
1689 break;
1691 return 0;
1694 void helper_mtc0_config0 (target_ulong arg1)
1696 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1699 void helper_mtc0_config2 (target_ulong arg1)
1701 /* tertiary/secondary caches not implemented */
1702 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1705 void helper_mtc0_lladdr (target_ulong arg1)
1707 target_long mask = env->CP0_LLAddr_rw_bitmask;
1708 arg1 = arg1 << env->CP0_LLAddr_shift;
1709 env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
1712 void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
1714 /* Watch exceptions for instructions, data loads, data stores
1715 not implemented. */
1716 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1719 void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
1721 env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
1722 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1725 void helper_mtc0_xcontext (target_ulong arg1)
1727 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1728 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1731 void helper_mtc0_framemask (target_ulong arg1)
1733 env->CP0_Framemask = arg1; /* XXX */
1736 void helper_mtc0_debug (target_ulong arg1)
1738 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1739 if (arg1 & (1 << CP0DB_DM))
1740 env->hflags |= MIPS_HFLAG_DM;
1741 else
1742 env->hflags &= ~MIPS_HFLAG_DM;
1745 void helper_mttc0_debug(target_ulong arg1)
1747 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1748 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1749 CPUState *other = mips_cpu_map_tc(&other_tc);
1751 /* XXX: Might be wrong, check with EJTAG spec. */
1752 if (other_tc == other->current_tc)
1753 other->active_tc.CP0_Debug_tcstatus = val;
1754 else
1755 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1756 other->CP0_Debug = (other->CP0_Debug &
1757 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1758 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1761 void helper_mtc0_performance0 (target_ulong arg1)
1763 env->CP0_Performance0 = arg1 & 0x000007ff;
1766 void helper_mtc0_taglo (target_ulong arg1)
1768 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1771 void helper_mtc0_datalo (target_ulong arg1)
1773 env->CP0_DataLo = arg1; /* XXX */
1776 void helper_mtc0_taghi (target_ulong arg1)
1778 env->CP0_TagHi = arg1; /* XXX */
1781 void helper_mtc0_datahi (target_ulong arg1)
1783 env->CP0_DataHi = arg1; /* XXX */
1786 /* MIPS MT functions */
1787 target_ulong helper_mftgpr(uint32_t sel)
1789 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1790 CPUState *other = mips_cpu_map_tc(&other_tc);
1792 if (other_tc == other->current_tc)
1793 return other->active_tc.gpr[sel];
1794 else
1795 return other->tcs[other_tc].gpr[sel];
1798 target_ulong helper_mftlo(uint32_t sel)
1800 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1801 CPUState *other = mips_cpu_map_tc(&other_tc);
1803 if (other_tc == other->current_tc)
1804 return other->active_tc.LO[sel];
1805 else
1806 return other->tcs[other_tc].LO[sel];
1809 target_ulong helper_mfthi(uint32_t sel)
1811 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1812 CPUState *other = mips_cpu_map_tc(&other_tc);
1814 if (other_tc == other->current_tc)
1815 return other->active_tc.HI[sel];
1816 else
1817 return other->tcs[other_tc].HI[sel];
1820 target_ulong helper_mftacx(uint32_t sel)
1822 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1823 CPUState *other = mips_cpu_map_tc(&other_tc);
1825 if (other_tc == other->current_tc)
1826 return other->active_tc.ACX[sel];
1827 else
1828 return other->tcs[other_tc].ACX[sel];
1831 target_ulong helper_mftdsp(void)
1833 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1834 CPUState *other = mips_cpu_map_tc(&other_tc);
1836 if (other_tc == other->current_tc)
1837 return other->active_tc.DSPControl;
1838 else
1839 return other->tcs[other_tc].DSPControl;
1842 void helper_mttgpr(target_ulong arg1, uint32_t sel)
1844 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1845 CPUState *other = mips_cpu_map_tc(&other_tc);
1847 if (other_tc == other->current_tc)
1848 other->active_tc.gpr[sel] = arg1;
1849 else
1850 other->tcs[other_tc].gpr[sel] = arg1;
1853 void helper_mttlo(target_ulong arg1, uint32_t sel)
1855 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1856 CPUState *other = mips_cpu_map_tc(&other_tc);
1858 if (other_tc == other->current_tc)
1859 other->active_tc.LO[sel] = arg1;
1860 else
1861 other->tcs[other_tc].LO[sel] = arg1;
1864 void helper_mtthi(target_ulong arg1, uint32_t sel)
1866 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1867 CPUState *other = mips_cpu_map_tc(&other_tc);
1869 if (other_tc == other->current_tc)
1870 other->active_tc.HI[sel] = arg1;
1871 else
1872 other->tcs[other_tc].HI[sel] = arg1;
1875 void helper_mttacx(target_ulong arg1, uint32_t sel)
1877 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1878 CPUState *other = mips_cpu_map_tc(&other_tc);
1880 if (other_tc == other->current_tc)
1881 other->active_tc.ACX[sel] = arg1;
1882 else
1883 other->tcs[other_tc].ACX[sel] = arg1;
1886 void helper_mttdsp(target_ulong arg1)
1888 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1889 CPUState *other = mips_cpu_map_tc(&other_tc);
1891 if (other_tc == other->current_tc)
1892 other->active_tc.DSPControl = arg1;
1893 else
1894 other->tcs[other_tc].DSPControl = arg1;
1897 /* MIPS MT functions */
1898 target_ulong helper_dmt(void)
1900 // TODO
1901 return 0;
1904 target_ulong helper_emt(void)
1906 // TODO
1907 return 0;
1910 target_ulong helper_dvpe(void)
1912 CPUState *other_cpu = first_cpu;
1913 target_ulong prev = env->mvp->CP0_MVPControl;
1915 do {
1916 /* Turn off all VPEs except the one executing the dvpe. */
1917 if (other_cpu != env) {
1918 other_cpu->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1919 mips_vpe_sleep(other_cpu);
1921 other_cpu = other_cpu->next_cpu;
1922 } while (other_cpu);
1923 return prev;
1926 target_ulong helper_evpe(void)
1928 CPUState *other_cpu = first_cpu;
1929 target_ulong prev = env->mvp->CP0_MVPControl;
1931 do {
1932 if (other_cpu != env
1933 /* If the VPE is WFI, dont distrub it's sleep. */
1934 && !mips_vpe_is_wfi(other_cpu)) {
1935 /* Enable the VPE. */
1936 other_cpu->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1937 mips_vpe_wake(other_cpu); /* And wake it up. */
1939 other_cpu = other_cpu->next_cpu;
1940 } while (other_cpu);
1941 return prev;
1943 #endif /* !CONFIG_USER_ONLY */
1945 void helper_fork(target_ulong arg1, target_ulong arg2)
1947 // arg1 = rt, arg2 = rs
1948 arg1 = 0;
1949 // TODO: store to TC register
1952 target_ulong helper_yield(target_ulong arg)
1954 target_long arg1 = arg;
1956 if (arg1 < 0) {
1957 /* No scheduling policy implemented. */
1958 if (arg1 != -2) {
1959 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
1960 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
1961 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1962 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
1963 helper_raise_exception(EXCP_THREAD);
1966 } else if (arg1 == 0) {
1967 if (0 /* TODO: TC underflow */) {
1968 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1969 helper_raise_exception(EXCP_THREAD);
1970 } else {
1971 // TODO: Deallocate TC
1973 } else if (arg1 > 0) {
1974 /* Yield qualifier inputs not implemented. */
1975 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
1976 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
1977 helper_raise_exception(EXCP_THREAD);
1979 return env->CP0_YQMask;
1982 #ifndef CONFIG_USER_ONLY
1983 /* TLB management */
1984 static void cpu_mips_tlb_flush (CPUState *env, int flush_global)
1986 /* Flush qemu's TLB and discard all shadowed entries. */
1987 tlb_flush (env, flush_global);
1988 env->tlb->tlb_in_use = env->tlb->nb_tlb;
1991 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
1993 /* Discard entries from env->tlb[first] onwards. */
1994 while (env->tlb->tlb_in_use > first) {
1995 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
1999 static void r4k_fill_tlb (int idx)
2001 r4k_tlb_t *tlb;
2003 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
2004 tlb = &env->tlb->mmu.r4k.tlb[idx];
2005 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
2006 #if defined(TARGET_MIPS64)
2007 tlb->VPN &= env->SEGMask;
2008 #endif
2009 tlb->ASID = env->CP0_EntryHi & 0xFF;
2010 tlb->PageMask = env->CP0_PageMask;
2011 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
2012 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
2013 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
2014 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
2015 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
2016 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
2017 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
2018 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
2019 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
2022 void r4k_helper_tlbwi (void)
2024 int idx;
2026 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2028 /* Discard cached TLB entries. We could avoid doing this if the
2029 tlbwi is just upgrading access permissions on the current entry;
2030 that might be a further win. */
2031 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
2033 r4k_invalidate_tlb(env, idx, 0);
2034 r4k_fill_tlb(idx);
2037 void r4k_helper_tlbwr (void)
2039 int r = cpu_mips_get_random(env);
2041 r4k_invalidate_tlb(env, r, 1);
2042 r4k_fill_tlb(r);
2045 void r4k_helper_tlbp (void)
2047 r4k_tlb_t *tlb;
2048 target_ulong mask;
2049 target_ulong tag;
2050 target_ulong VPN;
2051 uint8_t ASID;
2052 int i;
2054 ASID = env->CP0_EntryHi & 0xFF;
2055 for (i = 0; i < env->tlb->nb_tlb; i++) {
2056 tlb = &env->tlb->mmu.r4k.tlb[i];
2057 /* 1k pages are not supported. */
2058 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2059 tag = env->CP0_EntryHi & ~mask;
2060 VPN = tlb->VPN & ~mask;
2061 /* Check ASID, virtual page number & size */
2062 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2063 /* TLB match */
2064 env->CP0_Index = i;
2065 break;
2068 if (i == env->tlb->nb_tlb) {
2069 /* No match. Discard any shadow entries, if any of them match. */
2070 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
2071 tlb = &env->tlb->mmu.r4k.tlb[i];
2072 /* 1k pages are not supported. */
2073 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
2074 tag = env->CP0_EntryHi & ~mask;
2075 VPN = tlb->VPN & ~mask;
2076 /* Check ASID, virtual page number & size */
2077 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
2078 r4k_mips_tlb_flush_extra (env, i);
2079 break;
2083 env->CP0_Index |= 0x80000000;
2087 void r4k_helper_tlbr (void)
2089 r4k_tlb_t *tlb;
2090 uint8_t ASID;
2091 int idx;
2093 ASID = env->CP0_EntryHi & 0xFF;
2094 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
2095 tlb = &env->tlb->mmu.r4k.tlb[idx];
2097 /* If this will change the current ASID, flush qemu's TLB. */
2098 if (ASID != tlb->ASID)
2099 cpu_mips_tlb_flush (env, 1);
2101 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
2103 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
2104 env->CP0_PageMask = tlb->PageMask;
2105 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
2106 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
2107 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
2108 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
2111 void helper_tlbwi(void)
2113 env->tlb->helper_tlbwi();
2116 void helper_tlbwr(void)
2118 env->tlb->helper_tlbwr();
2121 void helper_tlbp(void)
2123 env->tlb->helper_tlbp();
2126 void helper_tlbr(void)
2128 env->tlb->helper_tlbr();
2131 /* Specials */
2132 target_ulong helper_di (void)
2134 target_ulong t0 = env->CP0_Status;
2136 env->CP0_Status = t0 & ~(1 << CP0St_IE);
2137 return t0;
2140 target_ulong helper_ei (void)
2142 target_ulong t0 = env->CP0_Status;
2144 env->CP0_Status = t0 | (1 << CP0St_IE);
2145 return t0;
2148 static void debug_pre_eret (void)
2150 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2151 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2152 env->active_tc.PC, env->CP0_EPC);
2153 if (env->CP0_Status & (1 << CP0St_ERL))
2154 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2155 if (env->hflags & MIPS_HFLAG_DM)
2156 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2157 qemu_log("\n");
2161 static void debug_post_eret (void)
2163 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
2164 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
2165 env->active_tc.PC, env->CP0_EPC);
2166 if (env->CP0_Status & (1 << CP0St_ERL))
2167 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
2168 if (env->hflags & MIPS_HFLAG_DM)
2169 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
2170 switch (env->hflags & MIPS_HFLAG_KSU) {
2171 case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
2172 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
2173 case MIPS_HFLAG_KM: qemu_log("\n"); break;
2174 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
2179 static void set_pc (target_ulong error_pc)
2181 env->active_tc.PC = error_pc & ~(target_ulong)1;
2182 if (error_pc & 1) {
2183 env->hflags |= MIPS_HFLAG_M16;
2184 } else {
2185 env->hflags &= ~(MIPS_HFLAG_M16);
2189 void helper_eret (void)
2191 debug_pre_eret();
2192 if (env->CP0_Status & (1 << CP0St_ERL)) {
2193 set_pc(env->CP0_ErrorEPC);
2194 env->CP0_Status &= ~(1 << CP0St_ERL);
2195 } else {
2196 set_pc(env->CP0_EPC);
2197 env->CP0_Status &= ~(1 << CP0St_EXL);
2199 compute_hflags(env);
2200 debug_post_eret();
2201 env->lladdr = 1;
2204 void helper_deret (void)
2206 debug_pre_eret();
2207 set_pc(env->CP0_DEPC);
2209 env->hflags &= MIPS_HFLAG_DM;
2210 compute_hflags(env);
2211 debug_post_eret();
2212 env->lladdr = 1;
2214 #endif /* !CONFIG_USER_ONLY */
2216 target_ulong helper_rdhwr_cpunum(void)
2218 if ((env->hflags & MIPS_HFLAG_CP0) ||
2219 (env->CP0_HWREna & (1 << 0)))
2220 return env->CP0_EBase & 0x3ff;
2221 else
2222 helper_raise_exception(EXCP_RI);
2224 return 0;
2227 target_ulong helper_rdhwr_synci_step(void)
2229 if ((env->hflags & MIPS_HFLAG_CP0) ||
2230 (env->CP0_HWREna & (1 << 1)))
2231 return env->SYNCI_Step;
2232 else
2233 helper_raise_exception(EXCP_RI);
2235 return 0;
2238 target_ulong helper_rdhwr_cc(void)
2240 if ((env->hflags & MIPS_HFLAG_CP0) ||
2241 (env->CP0_HWREna & (1 << 2)))
2242 return env->CP0_Count;
2243 else
2244 helper_raise_exception(EXCP_RI);
2246 return 0;
2249 target_ulong helper_rdhwr_ccres(void)
2251 if ((env->hflags & MIPS_HFLAG_CP0) ||
2252 (env->CP0_HWREna & (1 << 3)))
2253 return env->CCRes;
2254 else
2255 helper_raise_exception(EXCP_RI);
2257 return 0;
2260 void helper_pmon (int function)
2262 function /= 2;
2263 switch (function) {
2264 case 2: /* TODO: char inbyte(int waitflag); */
2265 if (env->active_tc.gpr[4] == 0)
2266 env->active_tc.gpr[2] = -1;
2267 /* Fall through */
2268 case 11: /* TODO: char inbyte (void); */
2269 env->active_tc.gpr[2] = -1;
2270 break;
2271 case 3:
2272 case 12:
2273 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
2274 break;
2275 case 17:
2276 break;
2277 case 158:
2279 unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
2280 printf("%s", fmt);
2282 break;
2286 void helper_wait (void)
2288 env->halted = 1;
2289 cpu_reset_interrupt(env, CPU_INTERRUPT_WAKE);
2290 helper_raise_exception(EXCP_HLT);
2293 #if !defined(CONFIG_USER_ONLY)
2295 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
2297 #define MMUSUFFIX _mmu
2298 #define ALIGNED_ONLY
2300 #define SHIFT 0
2301 #include "softmmu_template.h"
2303 #define SHIFT 1
2304 #include "softmmu_template.h"
2306 #define SHIFT 2
2307 #include "softmmu_template.h"
2309 #define SHIFT 3
2310 #include "softmmu_template.h"
2312 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
2314 env->CP0_BadVAddr = addr;
2315 do_restore_state (retaddr);
2316 helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
2319 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2321 TranslationBlock *tb;
2322 CPUState *saved_env;
2323 unsigned long pc;
2324 int ret;
2326 /* XXX: hack to restore env in all cases, even if not called from
2327 generated code */
2328 saved_env = env;
2329 env = cpu_single_env;
2330 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
2331 if (ret) {
2332 if (retaddr) {
2333 /* now we have a real cpu fault */
2334 pc = (unsigned long)retaddr;
2335 tb = tb_find_pc(pc);
2336 if (tb) {
2337 /* the PC is inside the translated code. It means that we have
2338 a virtual CPU fault */
2339 cpu_restore_state(tb, env, pc);
2342 helper_raise_exception_err(env->exception_index, env->error_code);
2344 env = saved_env;
2347 void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
2348 int is_write, int is_exec, int unused, int size)
2350 env = env1;
2352 if (is_exec)
2353 helper_raise_exception(EXCP_IBE);
2354 else
2355 helper_raise_exception(EXCP_DBE);
2357 #endif /* !CONFIG_USER_ONLY */
2359 /* Complex FPU operations which may need stack space. */
2361 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
2362 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2363 #define FLOAT_TWO32 make_float32(1 << 30)
2364 #define FLOAT_TWO64 make_float64(1ULL << 62)
2365 #define FLOAT_QNAN32 0x7fbfffff
2366 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2367 #define FLOAT_SNAN32 0x7fffffff
2368 #define FLOAT_SNAN64 0x7fffffffffffffffULL
2370 /* convert MIPS rounding mode in FCR31 to IEEE library */
2371 static unsigned int ieee_rm[] = {
2372 float_round_nearest_even,
2373 float_round_to_zero,
2374 float_round_up,
2375 float_round_down
2378 #define RESTORE_ROUNDING_MODE \
2379 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2381 #define RESTORE_FLUSH_MODE \
2382 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2384 target_ulong helper_cfc1 (uint32_t reg)
2386 target_ulong arg1;
2388 switch (reg) {
2389 case 0:
2390 arg1 = (int32_t)env->active_fpu.fcr0;
2391 break;
2392 case 25:
2393 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
2394 break;
2395 case 26:
2396 arg1 = env->active_fpu.fcr31 & 0x0003f07c;
2397 break;
2398 case 28:
2399 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
2400 break;
2401 default:
2402 arg1 = (int32_t)env->active_fpu.fcr31;
2403 break;
2406 return arg1;
2409 void helper_ctc1 (target_ulong arg1, uint32_t reg)
2411 switch(reg) {
2412 case 25:
2413 if (arg1 & 0xffffff00)
2414 return;
2415 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
2416 ((arg1 & 0x1) << 23);
2417 break;
2418 case 26:
2419 if (arg1 & 0x007c0000)
2420 return;
2421 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
2422 break;
2423 case 28:
2424 if (arg1 & 0x007c0000)
2425 return;
2426 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
2427 ((arg1 & 0x4) << 22);
2428 break;
2429 case 31:
2430 if (arg1 & 0x007c0000)
2431 return;
2432 env->active_fpu.fcr31 = arg1;
2433 break;
2434 default:
2435 return;
2437 /* set rounding mode */
2438 RESTORE_ROUNDING_MODE;
2439 /* set flush-to-zero mode */
2440 RESTORE_FLUSH_MODE;
2441 set_float_exception_flags(0, &env->active_fpu.fp_status);
2442 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
2443 helper_raise_exception(EXCP_FPE);
2446 static inline int ieee_ex_to_mips(int xcpt)
2448 int ret = 0;
2449 if (xcpt) {
2450 if (xcpt & float_flag_invalid) {
2451 ret |= FP_INVALID;
2453 if (xcpt & float_flag_overflow) {
2454 ret |= FP_OVERFLOW;
2456 if (xcpt & float_flag_underflow) {
2457 ret |= FP_UNDERFLOW;
2459 if (xcpt & float_flag_divbyzero) {
2460 ret |= FP_DIV0;
2462 if (xcpt & float_flag_inexact) {
2463 ret |= FP_INEXACT;
2466 return ret;
2469 static inline void update_fcr31(void)
2471 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
2473 SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
2474 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
2475 helper_raise_exception(EXCP_FPE);
2476 else
2477 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
2480 /* Float support.
2481 Single precition routines have a "s" suffix, double precision a
2482 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2483 paired single lower "pl", paired single upper "pu". */
2485 /* unary operations, modifying fp status */
2486 uint64_t helper_float_sqrt_d(uint64_t fdt0)
2488 return float64_sqrt(fdt0, &env->active_fpu.fp_status);
2491 uint32_t helper_float_sqrt_s(uint32_t fst0)
2493 return float32_sqrt(fst0, &env->active_fpu.fp_status);
2496 uint64_t helper_float_cvtd_s(uint32_t fst0)
2498 uint64_t fdt2;
2500 set_float_exception_flags(0, &env->active_fpu.fp_status);
2501 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
2502 update_fcr31();
2503 return fdt2;
2506 uint64_t helper_float_cvtd_w(uint32_t wt0)
2508 uint64_t fdt2;
2510 set_float_exception_flags(0, &env->active_fpu.fp_status);
2511 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
2512 update_fcr31();
2513 return fdt2;
2516 uint64_t helper_float_cvtd_l(uint64_t dt0)
2518 uint64_t fdt2;
2520 set_float_exception_flags(0, &env->active_fpu.fp_status);
2521 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
2522 update_fcr31();
2523 return fdt2;
2526 uint64_t helper_float_cvtl_d(uint64_t fdt0)
2528 uint64_t dt2;
2530 set_float_exception_flags(0, &env->active_fpu.fp_status);
2531 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2532 update_fcr31();
2533 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2534 dt2 = FLOAT_SNAN64;
2535 return dt2;
2538 uint64_t helper_float_cvtl_s(uint32_t fst0)
2540 uint64_t dt2;
2542 set_float_exception_flags(0, &env->active_fpu.fp_status);
2543 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2544 update_fcr31();
2545 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2546 dt2 = FLOAT_SNAN64;
2547 return dt2;
2550 uint64_t helper_float_cvtps_pw(uint64_t dt0)
2552 uint32_t fst2;
2553 uint32_t fsth2;
2555 set_float_exception_flags(0, &env->active_fpu.fp_status);
2556 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2557 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
2558 update_fcr31();
2559 return ((uint64_t)fsth2 << 32) | fst2;
2562 uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
2564 uint32_t wt2;
2565 uint32_t wth2;
2567 set_float_exception_flags(0, &env->active_fpu.fp_status);
2568 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2569 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
2570 update_fcr31();
2571 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
2572 wt2 = FLOAT_SNAN32;
2573 wth2 = FLOAT_SNAN32;
2575 return ((uint64_t)wth2 << 32) | wt2;
2578 uint32_t helper_float_cvts_d(uint64_t fdt0)
2580 uint32_t fst2;
2582 set_float_exception_flags(0, &env->active_fpu.fp_status);
2583 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
2584 update_fcr31();
2585 return fst2;
2588 uint32_t helper_float_cvts_w(uint32_t wt0)
2590 uint32_t fst2;
2592 set_float_exception_flags(0, &env->active_fpu.fp_status);
2593 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
2594 update_fcr31();
2595 return fst2;
2598 uint32_t helper_float_cvts_l(uint64_t dt0)
2600 uint32_t fst2;
2602 set_float_exception_flags(0, &env->active_fpu.fp_status);
2603 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
2604 update_fcr31();
2605 return fst2;
2608 uint32_t helper_float_cvts_pl(uint32_t wt0)
2610 uint32_t wt2;
2612 set_float_exception_flags(0, &env->active_fpu.fp_status);
2613 wt2 = wt0;
2614 update_fcr31();
2615 return wt2;
2618 uint32_t helper_float_cvts_pu(uint32_t wth0)
2620 uint32_t wt2;
2622 set_float_exception_flags(0, &env->active_fpu.fp_status);
2623 wt2 = wth0;
2624 update_fcr31();
2625 return wt2;
2628 uint32_t helper_float_cvtw_s(uint32_t fst0)
2630 uint32_t wt2;
2632 set_float_exception_flags(0, &env->active_fpu.fp_status);
2633 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2634 update_fcr31();
2635 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2636 wt2 = FLOAT_SNAN32;
2637 return wt2;
2640 uint32_t helper_float_cvtw_d(uint64_t fdt0)
2642 uint32_t wt2;
2644 set_float_exception_flags(0, &env->active_fpu.fp_status);
2645 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2646 update_fcr31();
2647 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2648 wt2 = FLOAT_SNAN32;
2649 return wt2;
2652 uint64_t helper_float_roundl_d(uint64_t fdt0)
2654 uint64_t dt2;
2656 set_float_exception_flags(0, &env->active_fpu.fp_status);
2657 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2658 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2659 RESTORE_ROUNDING_MODE;
2660 update_fcr31();
2661 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2662 dt2 = FLOAT_SNAN64;
2663 return dt2;
2666 uint64_t helper_float_roundl_s(uint32_t fst0)
2668 uint64_t dt2;
2670 set_float_exception_flags(0, &env->active_fpu.fp_status);
2671 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2672 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2673 RESTORE_ROUNDING_MODE;
2674 update_fcr31();
2675 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2676 dt2 = FLOAT_SNAN64;
2677 return dt2;
2680 uint32_t helper_float_roundw_d(uint64_t fdt0)
2682 uint32_t wt2;
2684 set_float_exception_flags(0, &env->active_fpu.fp_status);
2685 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2686 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2687 RESTORE_ROUNDING_MODE;
2688 update_fcr31();
2689 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2690 wt2 = FLOAT_SNAN32;
2691 return wt2;
2694 uint32_t helper_float_roundw_s(uint32_t fst0)
2696 uint32_t wt2;
2698 set_float_exception_flags(0, &env->active_fpu.fp_status);
2699 set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
2700 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2701 RESTORE_ROUNDING_MODE;
2702 update_fcr31();
2703 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2704 wt2 = FLOAT_SNAN32;
2705 return wt2;
2708 uint64_t helper_float_truncl_d(uint64_t fdt0)
2710 uint64_t dt2;
2712 set_float_exception_flags(0, &env->active_fpu.fp_status);
2713 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
2714 update_fcr31();
2715 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2716 dt2 = FLOAT_SNAN64;
2717 return dt2;
2720 uint64_t helper_float_truncl_s(uint32_t fst0)
2722 uint64_t dt2;
2724 set_float_exception_flags(0, &env->active_fpu.fp_status);
2725 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
2726 update_fcr31();
2727 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2728 dt2 = FLOAT_SNAN64;
2729 return dt2;
2732 uint32_t helper_float_truncw_d(uint64_t fdt0)
2734 uint32_t wt2;
2736 set_float_exception_flags(0, &env->active_fpu.fp_status);
2737 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
2738 update_fcr31();
2739 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2740 wt2 = FLOAT_SNAN32;
2741 return wt2;
2744 uint32_t helper_float_truncw_s(uint32_t fst0)
2746 uint32_t wt2;
2748 set_float_exception_flags(0, &env->active_fpu.fp_status);
2749 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
2750 update_fcr31();
2751 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2752 wt2 = FLOAT_SNAN32;
2753 return wt2;
2756 uint64_t helper_float_ceill_d(uint64_t fdt0)
2758 uint64_t dt2;
2760 set_float_exception_flags(0, &env->active_fpu.fp_status);
2761 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2762 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2763 RESTORE_ROUNDING_MODE;
2764 update_fcr31();
2765 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2766 dt2 = FLOAT_SNAN64;
2767 return dt2;
2770 uint64_t helper_float_ceill_s(uint32_t fst0)
2772 uint64_t dt2;
2774 set_float_exception_flags(0, &env->active_fpu.fp_status);
2775 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2776 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2777 RESTORE_ROUNDING_MODE;
2778 update_fcr31();
2779 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2780 dt2 = FLOAT_SNAN64;
2781 return dt2;
2784 uint32_t helper_float_ceilw_d(uint64_t fdt0)
2786 uint32_t wt2;
2788 set_float_exception_flags(0, &env->active_fpu.fp_status);
2789 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2790 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2791 RESTORE_ROUNDING_MODE;
2792 update_fcr31();
2793 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2794 wt2 = FLOAT_SNAN32;
2795 return wt2;
2798 uint32_t helper_float_ceilw_s(uint32_t fst0)
2800 uint32_t wt2;
2802 set_float_exception_flags(0, &env->active_fpu.fp_status);
2803 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
2804 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2805 RESTORE_ROUNDING_MODE;
2806 update_fcr31();
2807 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2808 wt2 = FLOAT_SNAN32;
2809 return wt2;
2812 uint64_t helper_float_floorl_d(uint64_t fdt0)
2814 uint64_t dt2;
2816 set_float_exception_flags(0, &env->active_fpu.fp_status);
2817 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2818 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
2819 RESTORE_ROUNDING_MODE;
2820 update_fcr31();
2821 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2822 dt2 = FLOAT_SNAN64;
2823 return dt2;
2826 uint64_t helper_float_floorl_s(uint32_t fst0)
2828 uint64_t dt2;
2830 set_float_exception_flags(0, &env->active_fpu.fp_status);
2831 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2832 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
2833 RESTORE_ROUNDING_MODE;
2834 update_fcr31();
2835 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2836 dt2 = FLOAT_SNAN64;
2837 return dt2;
2840 uint32_t helper_float_floorw_d(uint64_t fdt0)
2842 uint32_t wt2;
2844 set_float_exception_flags(0, &env->active_fpu.fp_status);
2845 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2846 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
2847 RESTORE_ROUNDING_MODE;
2848 update_fcr31();
2849 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2850 wt2 = FLOAT_SNAN32;
2851 return wt2;
2854 uint32_t helper_float_floorw_s(uint32_t fst0)
2856 uint32_t wt2;
2858 set_float_exception_flags(0, &env->active_fpu.fp_status);
2859 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
2860 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
2861 RESTORE_ROUNDING_MODE;
2862 update_fcr31();
2863 if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
2864 wt2 = FLOAT_SNAN32;
2865 return wt2;
2868 /* unary operations, not modifying fp status */
2869 #define FLOAT_UNOP(name) \
2870 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2872 return float64_ ## name(fdt0); \
2874 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2876 return float32_ ## name(fst0); \
2878 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2880 uint32_t wt0; \
2881 uint32_t wth0; \
2883 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2884 wth0 = float32_ ## name(fdt0 >> 32); \
2885 return ((uint64_t)wth0 << 32) | wt0; \
2887 FLOAT_UNOP(abs)
2888 FLOAT_UNOP(chs)
2889 #undef FLOAT_UNOP
2891 /* MIPS specific unary operations */
2892 uint64_t helper_float_recip_d(uint64_t fdt0)
2894 uint64_t fdt2;
2896 set_float_exception_flags(0, &env->active_fpu.fp_status);
2897 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2898 update_fcr31();
2899 return fdt2;
2902 uint32_t helper_float_recip_s(uint32_t fst0)
2904 uint32_t fst2;
2906 set_float_exception_flags(0, &env->active_fpu.fp_status);
2907 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2908 update_fcr31();
2909 return fst2;
2912 uint64_t helper_float_rsqrt_d(uint64_t fdt0)
2914 uint64_t fdt2;
2916 set_float_exception_flags(0, &env->active_fpu.fp_status);
2917 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2918 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2919 update_fcr31();
2920 return fdt2;
2923 uint32_t helper_float_rsqrt_s(uint32_t fst0)
2925 uint32_t fst2;
2927 set_float_exception_flags(0, &env->active_fpu.fp_status);
2928 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2929 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2930 update_fcr31();
2931 return fst2;
2934 uint64_t helper_float_recip1_d(uint64_t fdt0)
2936 uint64_t fdt2;
2938 set_float_exception_flags(0, &env->active_fpu.fp_status);
2939 fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
2940 update_fcr31();
2941 return fdt2;
2944 uint32_t helper_float_recip1_s(uint32_t fst0)
2946 uint32_t fst2;
2948 set_float_exception_flags(0, &env->active_fpu.fp_status);
2949 fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
2950 update_fcr31();
2951 return fst2;
2954 uint64_t helper_float_recip1_ps(uint64_t fdt0)
2956 uint32_t fst2;
2957 uint32_t fsth2;
2959 set_float_exception_flags(0, &env->active_fpu.fp_status);
2960 fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2961 fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
2962 update_fcr31();
2963 return ((uint64_t)fsth2 << 32) | fst2;
2966 uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
2968 uint64_t fdt2;
2970 set_float_exception_flags(0, &env->active_fpu.fp_status);
2971 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
2972 fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
2973 update_fcr31();
2974 return fdt2;
2977 uint32_t helper_float_rsqrt1_s(uint32_t fst0)
2979 uint32_t fst2;
2981 set_float_exception_flags(0, &env->active_fpu.fp_status);
2982 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
2983 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2984 update_fcr31();
2985 return fst2;
2988 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
2990 uint32_t fst2;
2991 uint32_t fsth2;
2993 set_float_exception_flags(0, &env->active_fpu.fp_status);
2994 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
2995 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
2996 fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
2997 fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
2998 update_fcr31();
2999 return ((uint64_t)fsth2 << 32) | fst2;
3002 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
3004 /* binary operations */
3005 #define FLOAT_BINOP(name) \
3006 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
3008 uint64_t dt2; \
3010 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3011 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
3012 update_fcr31(); \
3013 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3014 dt2 = FLOAT_QNAN64; \
3015 return dt2; \
3018 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
3020 uint32_t wt2; \
3022 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3023 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3024 update_fcr31(); \
3025 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3026 wt2 = FLOAT_QNAN32; \
3027 return wt2; \
3030 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
3032 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3033 uint32_t fsth0 = fdt0 >> 32; \
3034 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3035 uint32_t fsth1 = fdt1 >> 32; \
3036 uint32_t wt2; \
3037 uint32_t wth2; \
3039 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3040 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3041 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
3042 update_fcr31(); \
3043 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
3044 wt2 = FLOAT_QNAN32; \
3045 wth2 = FLOAT_QNAN32; \
3047 return ((uint64_t)wth2 << 32) | wt2; \
3050 FLOAT_BINOP(add)
3051 FLOAT_BINOP(sub)
3052 FLOAT_BINOP(mul)
3053 FLOAT_BINOP(div)
3054 #undef FLOAT_BINOP
3056 /* ternary operations */
3057 #define FLOAT_TERNOP(name1, name2) \
3058 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3059 uint64_t fdt2) \
3061 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3062 return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3065 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3066 uint32_t fst2) \
3068 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3069 return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3072 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
3073 uint64_t fdt2) \
3075 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3076 uint32_t fsth0 = fdt0 >> 32; \
3077 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3078 uint32_t fsth1 = fdt1 >> 32; \
3079 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3080 uint32_t fsth2 = fdt2 >> 32; \
3082 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3083 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3084 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3085 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3086 return ((uint64_t)fsth2 << 32) | fst2; \
3089 FLOAT_TERNOP(mul, add)
3090 FLOAT_TERNOP(mul, sub)
3091 #undef FLOAT_TERNOP
3093 /* negated ternary operations */
3094 #define FLOAT_NTERNOP(name1, name2) \
3095 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3096 uint64_t fdt2) \
3098 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3099 fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3100 return float64_chs(fdt2); \
3103 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3104 uint32_t fst2) \
3106 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3107 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3108 return float32_chs(fst2); \
3111 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
3112 uint64_t fdt2) \
3114 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3115 uint32_t fsth0 = fdt0 >> 32; \
3116 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3117 uint32_t fsth1 = fdt1 >> 32; \
3118 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3119 uint32_t fsth2 = fdt2 >> 32; \
3121 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3122 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3123 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3124 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3125 fst2 = float32_chs(fst2); \
3126 fsth2 = float32_chs(fsth2); \
3127 return ((uint64_t)fsth2 << 32) | fst2; \
3130 FLOAT_NTERNOP(mul, add)
3131 FLOAT_NTERNOP(mul, sub)
3132 #undef FLOAT_NTERNOP
3134 /* MIPS specific binary operations */
3135 uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
3137 set_float_exception_flags(0, &env->active_fpu.fp_status);
3138 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3139 fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
3140 update_fcr31();
3141 return fdt2;
3144 uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
3146 set_float_exception_flags(0, &env->active_fpu.fp_status);
3147 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3148 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3149 update_fcr31();
3150 return fst2;
3153 uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
3155 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3156 uint32_t fsth0 = fdt0 >> 32;
3157 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3158 uint32_t fsth2 = fdt2 >> 32;
3160 set_float_exception_flags(0, &env->active_fpu.fp_status);
3161 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3162 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3163 fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
3164 fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
3165 update_fcr31();
3166 return ((uint64_t)fsth2 << 32) | fst2;
3169 uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
3171 set_float_exception_flags(0, &env->active_fpu.fp_status);
3172 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
3173 fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
3174 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
3175 update_fcr31();
3176 return fdt2;
3179 uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
3181 set_float_exception_flags(0, &env->active_fpu.fp_status);
3182 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3183 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3184 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3185 update_fcr31();
3186 return fst2;
3189 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
3191 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3192 uint32_t fsth0 = fdt0 >> 32;
3193 uint32_t fst2 = fdt2 & 0XFFFFFFFF;
3194 uint32_t fsth2 = fdt2 >> 32;
3196 set_float_exception_flags(0, &env->active_fpu.fp_status);
3197 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
3198 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
3199 fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
3200 fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
3201 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
3202 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
3203 update_fcr31();
3204 return ((uint64_t)fsth2 << 32) | fst2;
3207 uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
3209 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3210 uint32_t fsth0 = fdt0 >> 32;
3211 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3212 uint32_t fsth1 = fdt1 >> 32;
3213 uint32_t fst2;
3214 uint32_t fsth2;
3216 set_float_exception_flags(0, &env->active_fpu.fp_status);
3217 fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
3218 fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
3219 update_fcr31();
3220 return ((uint64_t)fsth2 << 32) | fst2;
3223 uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
3225 uint32_t fst0 = fdt0 & 0XFFFFFFFF;
3226 uint32_t fsth0 = fdt0 >> 32;
3227 uint32_t fst1 = fdt1 & 0XFFFFFFFF;
3228 uint32_t fsth1 = fdt1 >> 32;
3229 uint32_t fst2;
3230 uint32_t fsth2;
3232 set_float_exception_flags(0, &env->active_fpu.fp_status);
3233 fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
3234 fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
3235 update_fcr31();
3236 return ((uint64_t)fsth2 << 32) | fst2;
3239 /* compare operations */
3240 #define FOP_COND_D(op, cond) \
3241 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3243 int c; \
3244 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3245 c = cond; \
3246 update_fcr31(); \
3247 if (c) \
3248 SET_FP_COND(cc, env->active_fpu); \
3249 else \
3250 CLEAR_FP_COND(cc, env->active_fpu); \
3252 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3254 int c; \
3255 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3256 fdt0 = float64_abs(fdt0); \
3257 fdt1 = float64_abs(fdt1); \
3258 c = cond; \
3259 update_fcr31(); \
3260 if (c) \
3261 SET_FP_COND(cc, env->active_fpu); \
3262 else \
3263 CLEAR_FP_COND(cc, env->active_fpu); \
3266 /* NOTE: the comma operator will make "cond" to eval to false,
3267 * but float64_unordered_quiet() is still called. */
3268 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3269 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
3270 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3271 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3272 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3273 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3274 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3275 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
3276 /* NOTE: the comma operator will make "cond" to eval to false,
3277 * but float64_unordered() is still called. */
3278 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
3279 FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
3280 FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3281 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
3282 FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3283 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
3284 FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3285 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
3287 #define FOP_COND_S(op, cond) \
3288 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3290 int c; \
3291 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3292 c = cond; \
3293 update_fcr31(); \
3294 if (c) \
3295 SET_FP_COND(cc, env->active_fpu); \
3296 else \
3297 CLEAR_FP_COND(cc, env->active_fpu); \
3299 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3301 int c; \
3302 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3303 fst0 = float32_abs(fst0); \
3304 fst1 = float32_abs(fst1); \
3305 c = cond; \
3306 update_fcr31(); \
3307 if (c) \
3308 SET_FP_COND(cc, env->active_fpu); \
3309 else \
3310 CLEAR_FP_COND(cc, env->active_fpu); \
3313 /* NOTE: the comma operator will make "cond" to eval to false,
3314 * but float32_unordered_quiet() is still called. */
3315 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
3316 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
3317 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3318 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
3319 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3320 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
3321 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3322 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
3323 /* NOTE: the comma operator will make "cond" to eval to false,
3324 * but float32_unordered() is still called. */
3325 FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
3326 FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
3327 FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3328 FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
3329 FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3330 FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
3331 FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
3332 FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
3334 #define FOP_COND_PS(op, condl, condh) \
3335 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3337 uint32_t fst0, fsth0, fst1, fsth1; \
3338 int ch, cl; \
3339 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3340 fst0 = fdt0 & 0XFFFFFFFF; \
3341 fsth0 = fdt0 >> 32; \
3342 fst1 = fdt1 & 0XFFFFFFFF; \
3343 fsth1 = fdt1 >> 32; \
3344 cl = condl; \
3345 ch = condh; \
3346 update_fcr31(); \
3347 if (cl) \
3348 SET_FP_COND(cc, env->active_fpu); \
3349 else \
3350 CLEAR_FP_COND(cc, env->active_fpu); \
3351 if (ch) \
3352 SET_FP_COND(cc + 1, env->active_fpu); \
3353 else \
3354 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3356 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3358 uint32_t fst0, fsth0, fst1, fsth1; \
3359 int ch, cl; \
3360 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3361 fsth0 = float32_abs(fdt0 >> 32); \
3362 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3363 fsth1 = float32_abs(fdt1 >> 32); \
3364 cl = condl; \
3365 ch = condh; \
3366 update_fcr31(); \
3367 if (cl) \
3368 SET_FP_COND(cc, env->active_fpu); \
3369 else \
3370 CLEAR_FP_COND(cc, env->active_fpu); \
3371 if (ch) \
3372 SET_FP_COND(cc + 1, env->active_fpu); \
3373 else \
3374 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3377 /* NOTE: the comma operator will make "cond" to eval to false,
3378 * but float32_unordered_quiet() is still called. */
3379 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
3380 (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3381 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
3382 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
3383 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3384 float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3385 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
3386 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3387 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3388 float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3389 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
3390 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3391 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3392 float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3393 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
3394 float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
3395 /* NOTE: the comma operator will make "cond" to eval to false,
3396 * but float32_unordered() is still called. */
3397 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
3398 (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
3399 FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
3400 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
3401 FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3402 float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3403 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
3404 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
3405 FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3406 float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3407 FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
3408 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
3409 FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
3410 float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
3411 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
3412 float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))