2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "dyngen-exec.h"
23 #include "host-utils.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 #ifndef CONFIG_USER_ONLY
32 static inline void cpu_mips_tlb_flush (CPUMIPSState
*env
, int flush_global
);
35 static inline void compute_hflags(CPUMIPSState
*env
)
37 env
->hflags
&= ~(MIPS_HFLAG_COP1X
| MIPS_HFLAG_64
| MIPS_HFLAG_CP0
|
38 MIPS_HFLAG_F64
| MIPS_HFLAG_FPU
| MIPS_HFLAG_KSU
|
40 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
41 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
42 !(env
->hflags
& MIPS_HFLAG_DM
)) {
43 env
->hflags
|= (env
->CP0_Status
>> CP0St_KSU
) & MIPS_HFLAG_KSU
;
45 #if defined(TARGET_MIPS64)
46 if (((env
->hflags
& MIPS_HFLAG_KSU
) != MIPS_HFLAG_UM
) ||
47 (env
->CP0_Status
& (1 << CP0St_PX
)) ||
48 (env
->CP0_Status
& (1 << CP0St_UX
))) {
49 env
->hflags
|= MIPS_HFLAG_64
;
51 if (env
->CP0_Status
& (1 << CP0St_UX
)) {
52 env
->hflags
|= MIPS_HFLAG_UX
;
55 if ((env
->CP0_Status
& (1 << CP0St_CU0
)) ||
56 !(env
->hflags
& MIPS_HFLAG_KSU
)) {
57 env
->hflags
|= MIPS_HFLAG_CP0
;
59 if (env
->CP0_Status
& (1 << CP0St_CU1
)) {
60 env
->hflags
|= MIPS_HFLAG_FPU
;
62 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
63 env
->hflags
|= MIPS_HFLAG_F64
;
65 if (env
->insn_flags
& ISA_MIPS32R2
) {
66 if (env
->active_fpu
.fcr0
& (1 << FCR0_F64
)) {
67 env
->hflags
|= MIPS_HFLAG_COP1X
;
69 } else if (env
->insn_flags
& ISA_MIPS32
) {
70 if (env
->hflags
& MIPS_HFLAG_64
) {
71 env
->hflags
|= MIPS_HFLAG_COP1X
;
73 } else if (env
->insn_flags
& ISA_MIPS4
) {
74 /* All supported MIPS IV CPUs use the XX (CU3) to enable
75 and disable the MIPS IV extensions to the MIPS III ISA.
76 Some other MIPS IV CPUs ignore the bit, so the check here
77 would be too restrictive for them. */
78 if (env
->CP0_Status
& (1 << CP0St_CU3
)) {
79 env
->hflags
|= MIPS_HFLAG_COP1X
;
84 /*****************************************************************************/
85 /* Exceptions processing helpers */
87 void helper_raise_exception_err (uint32_t exception
, int error_code
)
90 if (exception
< 0x100)
91 qemu_log("%s: %d %d\n", __func__
, exception
, error_code
);
93 env
->exception_index
= exception
;
94 env
->error_code
= error_code
;
98 void helper_raise_exception (uint32_t exception
)
100 helper_raise_exception_err(exception
, 0);
103 #if !defined(CONFIG_USER_ONLY)
104 static void do_restore_state(uintptr_t pc
)
106 TranslationBlock
*tb
;
108 tb
= tb_find_pc (pc
);
110 cpu_restore_state(tb
, env
, pc
);
115 #if defined(CONFIG_USER_ONLY)
116 #define HELPER_LD(name, insn, type) \
117 static inline type do_##name(target_ulong addr, int mem_idx) \
119 return (type) insn##_raw(addr); \
122 #define HELPER_LD(name, insn, type) \
123 static inline type do_##name(target_ulong addr, int mem_idx) \
127 case 0: return (type) insn##_kernel(addr); break; \
128 case 1: return (type) insn##_super(addr); break; \
130 case 2: return (type) insn##_user(addr); break; \
134 HELPER_LD(lbu
, ldub
, uint8_t)
135 HELPER_LD(lw
, ldl
, int32_t)
137 HELPER_LD(ld
, ldq
, int64_t)
141 #if defined(CONFIG_USER_ONLY)
142 #define HELPER_ST(name, insn, type) \
143 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
145 insn##_raw(addr, val); \
148 #define HELPER_ST(name, insn, type) \
149 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
153 case 0: insn##_kernel(addr, val); break; \
154 case 1: insn##_super(addr, val); break; \
156 case 2: insn##_user(addr, val); break; \
160 HELPER_ST(sb
, stb
, uint8_t)
161 HELPER_ST(sw
, stl
, uint32_t)
163 HELPER_ST(sd
, stq
, uint64_t)
167 target_ulong
helper_clo (target_ulong arg1
)
172 target_ulong
helper_clz (target_ulong arg1
)
177 #if defined(TARGET_MIPS64)
178 target_ulong
helper_dclo (target_ulong arg1
)
183 target_ulong
helper_dclz (target_ulong arg1
)
187 #endif /* TARGET_MIPS64 */
189 /* 64 bits arithmetic for 32 bits hosts */
190 static inline uint64_t get_HILO (void)
192 return ((uint64_t)(env
->active_tc
.HI
[0]) << 32) | (uint32_t)env
->active_tc
.LO
[0];
195 static inline void set_HIT0_LO (target_ulong arg1
, uint64_t HILO
)
197 env
->active_tc
.LO
[0] = (int32_t)(HILO
& 0xFFFFFFFF);
198 arg1
= env
->active_tc
.HI
[0] = (int32_t)(HILO
>> 32);
201 static inline void set_HI_LOT0 (target_ulong arg1
, uint64_t HILO
)
203 arg1
= env
->active_tc
.LO
[0] = (int32_t)(HILO
& 0xFFFFFFFF);
204 env
->active_tc
.HI
[0] = (int32_t)(HILO
>> 32);
207 /* Multiplication variants of the vr54xx. */
208 target_ulong
helper_muls (target_ulong arg1
, target_ulong arg2
)
210 set_HI_LOT0(arg1
, 0 - ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
215 target_ulong
helper_mulsu (target_ulong arg1
, target_ulong arg2
)
217 set_HI_LOT0(arg1
, 0 - ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
222 target_ulong
helper_macc (target_ulong arg1
, target_ulong arg2
)
224 set_HI_LOT0(arg1
, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
229 target_ulong
helper_macchi (target_ulong arg1
, target_ulong arg2
)
231 set_HIT0_LO(arg1
, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
236 target_ulong
helper_maccu (target_ulong arg1
, target_ulong arg2
)
238 set_HI_LOT0(arg1
, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
243 target_ulong
helper_macchiu (target_ulong arg1
, target_ulong arg2
)
245 set_HIT0_LO(arg1
, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
250 target_ulong
helper_msac (target_ulong arg1
, target_ulong arg2
)
252 set_HI_LOT0(arg1
, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
257 target_ulong
helper_msachi (target_ulong arg1
, target_ulong arg2
)
259 set_HIT0_LO(arg1
, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
264 target_ulong
helper_msacu (target_ulong arg1
, target_ulong arg2
)
266 set_HI_LOT0(arg1
, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
271 target_ulong
helper_msachiu (target_ulong arg1
, target_ulong arg2
)
273 set_HIT0_LO(arg1
, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
278 target_ulong
helper_mulhi (target_ulong arg1
, target_ulong arg2
)
280 set_HIT0_LO(arg1
, (int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
);
285 target_ulong
helper_mulhiu (target_ulong arg1
, target_ulong arg2
)
287 set_HIT0_LO(arg1
, (uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
);
292 target_ulong
helper_mulshi (target_ulong arg1
, target_ulong arg2
)
294 set_HIT0_LO(arg1
, 0 - ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
299 target_ulong
helper_mulshiu (target_ulong arg1
, target_ulong arg2
)
301 set_HIT0_LO(arg1
, 0 - ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
307 void helper_dmult (target_ulong arg1
, target_ulong arg2
)
309 muls64(&(env
->active_tc
.LO
[0]), &(env
->active_tc
.HI
[0]), arg1
, arg2
);
312 void helper_dmultu (target_ulong arg1
, target_ulong arg2
)
314 mulu64(&(env
->active_tc
.LO
[0]), &(env
->active_tc
.HI
[0]), arg1
, arg2
);
318 #ifndef CONFIG_USER_ONLY
320 static inline target_phys_addr_t
do_translate_address(target_ulong address
, int rw
)
322 target_phys_addr_t lladdr
;
324 lladdr
= cpu_mips_translate_address(env
, address
, rw
);
326 if (lladdr
== -1LL) {
333 #define HELPER_LD_ATOMIC(name, insn) \
334 target_ulong helper_##name(target_ulong arg, int mem_idx) \
336 env->lladdr = do_translate_address(arg, 0); \
337 env->llval = do_##insn(arg, mem_idx); \
340 HELPER_LD_ATOMIC(ll
, lw
)
342 HELPER_LD_ATOMIC(lld
, ld
)
344 #undef HELPER_LD_ATOMIC
346 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
347 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
351 if (arg2 & almask) { \
352 env->CP0_BadVAddr = arg2; \
353 helper_raise_exception(EXCP_AdES); \
355 if (do_translate_address(arg2, 1) == env->lladdr) { \
356 tmp = do_##ld_insn(arg2, mem_idx); \
357 if (tmp == env->llval) { \
358 do_##st_insn(arg2, arg1, mem_idx); \
364 HELPER_ST_ATOMIC(sc
, lw
, sw
, 0x3)
366 HELPER_ST_ATOMIC(scd
, ld
, sd
, 0x7)
368 #undef HELPER_ST_ATOMIC
371 #ifdef TARGET_WORDS_BIGENDIAN
372 #define GET_LMASK(v) ((v) & 3)
373 #define GET_OFFSET(addr, offset) (addr + (offset))
375 #define GET_LMASK(v) (((v) & 3) ^ 3)
376 #define GET_OFFSET(addr, offset) (addr - (offset))
379 target_ulong
helper_lwl(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
383 tmp
= do_lbu(arg2
, mem_idx
);
384 arg1
= (arg1
& 0x00FFFFFF) | (tmp
<< 24);
386 if (GET_LMASK(arg2
) <= 2) {
387 tmp
= do_lbu(GET_OFFSET(arg2
, 1), mem_idx
);
388 arg1
= (arg1
& 0xFF00FFFF) | (tmp
<< 16);
391 if (GET_LMASK(arg2
) <= 1) {
392 tmp
= do_lbu(GET_OFFSET(arg2
, 2), mem_idx
);
393 arg1
= (arg1
& 0xFFFF00FF) | (tmp
<< 8);
396 if (GET_LMASK(arg2
) == 0) {
397 tmp
= do_lbu(GET_OFFSET(arg2
, 3), mem_idx
);
398 arg1
= (arg1
& 0xFFFFFF00) | tmp
;
400 return (int32_t)arg1
;
403 target_ulong
helper_lwr(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
407 tmp
= do_lbu(arg2
, mem_idx
);
408 arg1
= (arg1
& 0xFFFFFF00) | tmp
;
410 if (GET_LMASK(arg2
) >= 1) {
411 tmp
= do_lbu(GET_OFFSET(arg2
, -1), mem_idx
);
412 arg1
= (arg1
& 0xFFFF00FF) | (tmp
<< 8);
415 if (GET_LMASK(arg2
) >= 2) {
416 tmp
= do_lbu(GET_OFFSET(arg2
, -2), mem_idx
);
417 arg1
= (arg1
& 0xFF00FFFF) | (tmp
<< 16);
420 if (GET_LMASK(arg2
) == 3) {
421 tmp
= do_lbu(GET_OFFSET(arg2
, -3), mem_idx
);
422 arg1
= (arg1
& 0x00FFFFFF) | (tmp
<< 24);
424 return (int32_t)arg1
;
427 void helper_swl(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
429 do_sb(arg2
, (uint8_t)(arg1
>> 24), mem_idx
);
431 if (GET_LMASK(arg2
) <= 2)
432 do_sb(GET_OFFSET(arg2
, 1), (uint8_t)(arg1
>> 16), mem_idx
);
434 if (GET_LMASK(arg2
) <= 1)
435 do_sb(GET_OFFSET(arg2
, 2), (uint8_t)(arg1
>> 8), mem_idx
);
437 if (GET_LMASK(arg2
) == 0)
438 do_sb(GET_OFFSET(arg2
, 3), (uint8_t)arg1
, mem_idx
);
441 void helper_swr(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
443 do_sb(arg2
, (uint8_t)arg1
, mem_idx
);
445 if (GET_LMASK(arg2
) >= 1)
446 do_sb(GET_OFFSET(arg2
, -1), (uint8_t)(arg1
>> 8), mem_idx
);
448 if (GET_LMASK(arg2
) >= 2)
449 do_sb(GET_OFFSET(arg2
, -2), (uint8_t)(arg1
>> 16), mem_idx
);
451 if (GET_LMASK(arg2
) == 3)
452 do_sb(GET_OFFSET(arg2
, -3), (uint8_t)(arg1
>> 24), mem_idx
);
455 #if defined(TARGET_MIPS64)
456 /* "half" load and stores. We must do the memory access inline,
457 or fault handling won't work. */
459 #ifdef TARGET_WORDS_BIGENDIAN
460 #define GET_LMASK64(v) ((v) & 7)
462 #define GET_LMASK64(v) (((v) & 7) ^ 7)
465 target_ulong
helper_ldl(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
469 tmp
= do_lbu(arg2
, mem_idx
);
470 arg1
= (arg1
& 0x00FFFFFFFFFFFFFFULL
) | (tmp
<< 56);
472 if (GET_LMASK64(arg2
) <= 6) {
473 tmp
= do_lbu(GET_OFFSET(arg2
, 1), mem_idx
);
474 arg1
= (arg1
& 0xFF00FFFFFFFFFFFFULL
) | (tmp
<< 48);
477 if (GET_LMASK64(arg2
) <= 5) {
478 tmp
= do_lbu(GET_OFFSET(arg2
, 2), mem_idx
);
479 arg1
= (arg1
& 0xFFFF00FFFFFFFFFFULL
) | (tmp
<< 40);
482 if (GET_LMASK64(arg2
) <= 4) {
483 tmp
= do_lbu(GET_OFFSET(arg2
, 3), mem_idx
);
484 arg1
= (arg1
& 0xFFFFFF00FFFFFFFFULL
) | (tmp
<< 32);
487 if (GET_LMASK64(arg2
) <= 3) {
488 tmp
= do_lbu(GET_OFFSET(arg2
, 4), mem_idx
);
489 arg1
= (arg1
& 0xFFFFFFFF00FFFFFFULL
) | (tmp
<< 24);
492 if (GET_LMASK64(arg2
) <= 2) {
493 tmp
= do_lbu(GET_OFFSET(arg2
, 5), mem_idx
);
494 arg1
= (arg1
& 0xFFFFFFFFFF00FFFFULL
) | (tmp
<< 16);
497 if (GET_LMASK64(arg2
) <= 1) {
498 tmp
= do_lbu(GET_OFFSET(arg2
, 6), mem_idx
);
499 arg1
= (arg1
& 0xFFFFFFFFFFFF00FFULL
) | (tmp
<< 8);
502 if (GET_LMASK64(arg2
) == 0) {
503 tmp
= do_lbu(GET_OFFSET(arg2
, 7), mem_idx
);
504 arg1
= (arg1
& 0xFFFFFFFFFFFFFF00ULL
) | tmp
;
510 target_ulong
helper_ldr(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
514 tmp
= do_lbu(arg2
, mem_idx
);
515 arg1
= (arg1
& 0xFFFFFFFFFFFFFF00ULL
) | tmp
;
517 if (GET_LMASK64(arg2
) >= 1) {
518 tmp
= do_lbu(GET_OFFSET(arg2
, -1), mem_idx
);
519 arg1
= (arg1
& 0xFFFFFFFFFFFF00FFULL
) | (tmp
<< 8);
522 if (GET_LMASK64(arg2
) >= 2) {
523 tmp
= do_lbu(GET_OFFSET(arg2
, -2), mem_idx
);
524 arg1
= (arg1
& 0xFFFFFFFFFF00FFFFULL
) | (tmp
<< 16);
527 if (GET_LMASK64(arg2
) >= 3) {
528 tmp
= do_lbu(GET_OFFSET(arg2
, -3), mem_idx
);
529 arg1
= (arg1
& 0xFFFFFFFF00FFFFFFULL
) | (tmp
<< 24);
532 if (GET_LMASK64(arg2
) >= 4) {
533 tmp
= do_lbu(GET_OFFSET(arg2
, -4), mem_idx
);
534 arg1
= (arg1
& 0xFFFFFF00FFFFFFFFULL
) | (tmp
<< 32);
537 if (GET_LMASK64(arg2
) >= 5) {
538 tmp
= do_lbu(GET_OFFSET(arg2
, -5), mem_idx
);
539 arg1
= (arg1
& 0xFFFF00FFFFFFFFFFULL
) | (tmp
<< 40);
542 if (GET_LMASK64(arg2
) >= 6) {
543 tmp
= do_lbu(GET_OFFSET(arg2
, -6), mem_idx
);
544 arg1
= (arg1
& 0xFF00FFFFFFFFFFFFULL
) | (tmp
<< 48);
547 if (GET_LMASK64(arg2
) == 7) {
548 tmp
= do_lbu(GET_OFFSET(arg2
, -7), mem_idx
);
549 arg1
= (arg1
& 0x00FFFFFFFFFFFFFFULL
) | (tmp
<< 56);
555 void helper_sdl(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
557 do_sb(arg2
, (uint8_t)(arg1
>> 56), mem_idx
);
559 if (GET_LMASK64(arg2
) <= 6)
560 do_sb(GET_OFFSET(arg2
, 1), (uint8_t)(arg1
>> 48), mem_idx
);
562 if (GET_LMASK64(arg2
) <= 5)
563 do_sb(GET_OFFSET(arg2
, 2), (uint8_t)(arg1
>> 40), mem_idx
);
565 if (GET_LMASK64(arg2
) <= 4)
566 do_sb(GET_OFFSET(arg2
, 3), (uint8_t)(arg1
>> 32), mem_idx
);
568 if (GET_LMASK64(arg2
) <= 3)
569 do_sb(GET_OFFSET(arg2
, 4), (uint8_t)(arg1
>> 24), mem_idx
);
571 if (GET_LMASK64(arg2
) <= 2)
572 do_sb(GET_OFFSET(arg2
, 5), (uint8_t)(arg1
>> 16), mem_idx
);
574 if (GET_LMASK64(arg2
) <= 1)
575 do_sb(GET_OFFSET(arg2
, 6), (uint8_t)(arg1
>> 8), mem_idx
);
577 if (GET_LMASK64(arg2
) <= 0)
578 do_sb(GET_OFFSET(arg2
, 7), (uint8_t)arg1
, mem_idx
);
581 void helper_sdr(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
583 do_sb(arg2
, (uint8_t)arg1
, mem_idx
);
585 if (GET_LMASK64(arg2
) >= 1)
586 do_sb(GET_OFFSET(arg2
, -1), (uint8_t)(arg1
>> 8), mem_idx
);
588 if (GET_LMASK64(arg2
) >= 2)
589 do_sb(GET_OFFSET(arg2
, -2), (uint8_t)(arg1
>> 16), mem_idx
);
591 if (GET_LMASK64(arg2
) >= 3)
592 do_sb(GET_OFFSET(arg2
, -3), (uint8_t)(arg1
>> 24), mem_idx
);
594 if (GET_LMASK64(arg2
) >= 4)
595 do_sb(GET_OFFSET(arg2
, -4), (uint8_t)(arg1
>> 32), mem_idx
);
597 if (GET_LMASK64(arg2
) >= 5)
598 do_sb(GET_OFFSET(arg2
, -5), (uint8_t)(arg1
>> 40), mem_idx
);
600 if (GET_LMASK64(arg2
) >= 6)
601 do_sb(GET_OFFSET(arg2
, -6), (uint8_t)(arg1
>> 48), mem_idx
);
603 if (GET_LMASK64(arg2
) == 7)
604 do_sb(GET_OFFSET(arg2
, -7), (uint8_t)(arg1
>> 56), mem_idx
);
606 #endif /* TARGET_MIPS64 */
608 static const int multiple_regs
[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
610 void helper_lwm (target_ulong addr
, target_ulong reglist
, uint32_t mem_idx
)
612 target_ulong base_reglist
= reglist
& 0xf;
613 target_ulong do_r31
= reglist
& 0x10;
614 #ifdef CONFIG_USER_ONLY
616 #define ldfun ldl_raw
618 uint32_t (*ldfun
)(target_ulong
);
622 case 0: ldfun
= ldl_kernel
; break;
623 case 1: ldfun
= ldl_super
; break;
625 case 2: ldfun
= ldl_user
; break;
629 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
632 for (i
= 0; i
< base_reglist
; i
++) {
633 env
->active_tc
.gpr
[multiple_regs
[i
]] = (target_long
) ldfun(addr
);
639 env
->active_tc
.gpr
[31] = (target_long
) ldfun(addr
);
643 void helper_swm (target_ulong addr
, target_ulong reglist
, uint32_t mem_idx
)
645 target_ulong base_reglist
= reglist
& 0xf;
646 target_ulong do_r31
= reglist
& 0x10;
647 #ifdef CONFIG_USER_ONLY
649 #define stfun stl_raw
651 void (*stfun
)(target_ulong
, uint32_t);
655 case 0: stfun
= stl_kernel
; break;
656 case 1: stfun
= stl_super
; break;
658 case 2: stfun
= stl_user
; break;
662 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
665 for (i
= 0; i
< base_reglist
; i
++) {
666 stfun(addr
, env
->active_tc
.gpr
[multiple_regs
[i
]]);
672 stfun(addr
, env
->active_tc
.gpr
[31]);
676 #if defined(TARGET_MIPS64)
677 void helper_ldm (target_ulong addr
, target_ulong reglist
, uint32_t mem_idx
)
679 target_ulong base_reglist
= reglist
& 0xf;
680 target_ulong do_r31
= reglist
& 0x10;
681 #ifdef CONFIG_USER_ONLY
683 #define ldfun ldq_raw
685 uint64_t (*ldfun
)(target_ulong
);
689 case 0: ldfun
= ldq_kernel
; break;
690 case 1: ldfun
= ldq_super
; break;
692 case 2: ldfun
= ldq_user
; break;
696 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
699 for (i
= 0; i
< base_reglist
; i
++) {
700 env
->active_tc
.gpr
[multiple_regs
[i
]] = ldfun(addr
);
706 env
->active_tc
.gpr
[31] = ldfun(addr
);
710 void helper_sdm (target_ulong addr
, target_ulong reglist
, uint32_t mem_idx
)
712 target_ulong base_reglist
= reglist
& 0xf;
713 target_ulong do_r31
= reglist
& 0x10;
714 #ifdef CONFIG_USER_ONLY
716 #define stfun stq_raw
718 void (*stfun
)(target_ulong
, uint64_t);
722 case 0: stfun
= stq_kernel
; break;
723 case 1: stfun
= stq_super
; break;
725 case 2: stfun
= stq_user
; break;
729 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
732 for (i
= 0; i
< base_reglist
; i
++) {
733 stfun(addr
, env
->active_tc
.gpr
[multiple_regs
[i
]]);
739 stfun(addr
, env
->active_tc
.gpr
[31]);
744 #ifndef CONFIG_USER_ONLY
746 static int mips_vpe_is_wfi(CPUMIPSState
*c
)
748 /* If the VPE is halted but otherwise active, it means it's waiting for
750 return c
->halted
&& mips_vpe_active(c
);
753 static inline void mips_vpe_wake(CPUMIPSState
*c
)
755 /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
756 because there might be other conditions that state that c should
758 cpu_interrupt(c
, CPU_INTERRUPT_WAKE
);
761 static inline void mips_vpe_sleep(CPUMIPSState
*c
)
763 /* The VPE was shut off, really go to bed.
764 Reset any old _WAKE requests. */
766 cpu_reset_interrupt(c
, CPU_INTERRUPT_WAKE
);
769 static inline void mips_tc_wake(CPUMIPSState
*c
, int tc
)
771 /* FIXME: TC reschedule. */
772 if (mips_vpe_active(c
) && !mips_vpe_is_wfi(c
)) {
777 static inline void mips_tc_sleep(CPUMIPSState
*c
, int tc
)
779 /* FIXME: TC reschedule. */
780 if (!mips_vpe_active(c
)) {
785 /* tc should point to an int with the value of the global TC index.
786 This function will transform it into a local index within the
787 returned CPUMIPSState.
789 FIXME: This code assumes that all VPEs have the same number of TCs,
790 which depends on runtime setup. Can probably be fixed by
791 walking the list of CPUMIPSStates. */
792 static CPUMIPSState
*mips_cpu_map_tc(int *tc
)
795 int vpe_idx
, nr_threads
= env
->nr_threads
;
798 if (!(env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
))) {
799 /* Not allowed to address other CPUs. */
800 *tc
= env
->current_tc
;
804 vpe_idx
= tc_idx
/ nr_threads
;
805 *tc
= tc_idx
% nr_threads
;
806 other
= qemu_get_cpu(vpe_idx
);
807 return other
? other
: env
;
810 /* The per VPE CP0_Status register shares some fields with the per TC
811 CP0_TCStatus registers. These fields are wired to the same registers,
812 so changes to either of them should be reflected on both registers.
814 Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
816 These helper call synchronizes the regs for a given cpu. */
818 /* Called for updates to CP0_Status. */
819 static void sync_c0_status(CPUMIPSState
*cpu
, int tc
)
821 int32_t tcstatus
, *tcst
;
822 uint32_t v
= cpu
->CP0_Status
;
823 uint32_t cu
, mx
, asid
, ksu
;
824 uint32_t mask
= ((1 << CP0TCSt_TCU3
)
825 | (1 << CP0TCSt_TCU2
)
826 | (1 << CP0TCSt_TCU1
)
827 | (1 << CP0TCSt_TCU0
)
829 | (3 << CP0TCSt_TKSU
)
830 | (0xff << CP0TCSt_TASID
));
832 cu
= (v
>> CP0St_CU0
) & 0xf;
833 mx
= (v
>> CP0St_MX
) & 0x1;
834 ksu
= (v
>> CP0St_KSU
) & 0x3;
835 asid
= env
->CP0_EntryHi
& 0xff;
837 tcstatus
= cu
<< CP0TCSt_TCU0
;
838 tcstatus
|= mx
<< CP0TCSt_TMX
;
839 tcstatus
|= ksu
<< CP0TCSt_TKSU
;
842 if (tc
== cpu
->current_tc
) {
843 tcst
= &cpu
->active_tc
.CP0_TCStatus
;
845 tcst
= &cpu
->tcs
[tc
].CP0_TCStatus
;
853 /* Called for updates to CP0_TCStatus. */
854 static void sync_c0_tcstatus(CPUMIPSState
*cpu
, int tc
, target_ulong v
)
857 uint32_t tcu
, tmx
, tasid
, tksu
;
858 uint32_t mask
= ((1 << CP0St_CU3
)
865 tcu
= (v
>> CP0TCSt_TCU0
) & 0xf;
866 tmx
= (v
>> CP0TCSt_TMX
) & 0x1;
868 tksu
= (v
>> CP0TCSt_TKSU
) & 0x3;
870 status
= tcu
<< CP0St_CU0
;
871 status
|= tmx
<< CP0St_MX
;
872 status
|= tksu
<< CP0St_KSU
;
874 cpu
->CP0_Status
&= ~mask
;
875 cpu
->CP0_Status
|= status
;
877 /* Sync the TASID with EntryHi. */
878 cpu
->CP0_EntryHi
&= ~0xff;
879 cpu
->CP0_EntryHi
= tasid
;
884 /* Called for updates to CP0_EntryHi. */
885 static void sync_c0_entryhi(CPUMIPSState
*cpu
, int tc
)
888 uint32_t asid
, v
= cpu
->CP0_EntryHi
;
892 if (tc
== cpu
->current_tc
) {
893 tcst
= &cpu
->active_tc
.CP0_TCStatus
;
895 tcst
= &cpu
->tcs
[tc
].CP0_TCStatus
;
903 target_ulong
helper_mfc0_mvpcontrol (void)
905 return env
->mvp
->CP0_MVPControl
;
908 target_ulong
helper_mfc0_mvpconf0 (void)
910 return env
->mvp
->CP0_MVPConf0
;
913 target_ulong
helper_mfc0_mvpconf1 (void)
915 return env
->mvp
->CP0_MVPConf1
;
918 target_ulong
helper_mfc0_random (void)
920 return (int32_t)cpu_mips_get_random(env
);
923 target_ulong
helper_mfc0_tcstatus (void)
925 return env
->active_tc
.CP0_TCStatus
;
928 target_ulong
helper_mftc0_tcstatus(void)
930 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
931 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
933 if (other_tc
== other
->current_tc
)
934 return other
->active_tc
.CP0_TCStatus
;
936 return other
->tcs
[other_tc
].CP0_TCStatus
;
939 target_ulong
helper_mfc0_tcbind (void)
941 return env
->active_tc
.CP0_TCBind
;
944 target_ulong
helper_mftc0_tcbind(void)
946 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
947 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
949 if (other_tc
== other
->current_tc
)
950 return other
->active_tc
.CP0_TCBind
;
952 return other
->tcs
[other_tc
].CP0_TCBind
;
955 target_ulong
helper_mfc0_tcrestart (void)
957 return env
->active_tc
.PC
;
960 target_ulong
helper_mftc0_tcrestart(void)
962 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
963 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
965 if (other_tc
== other
->current_tc
)
966 return other
->active_tc
.PC
;
968 return other
->tcs
[other_tc
].PC
;
971 target_ulong
helper_mfc0_tchalt (void)
973 return env
->active_tc
.CP0_TCHalt
;
976 target_ulong
helper_mftc0_tchalt(void)
978 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
979 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
981 if (other_tc
== other
->current_tc
)
982 return other
->active_tc
.CP0_TCHalt
;
984 return other
->tcs
[other_tc
].CP0_TCHalt
;
987 target_ulong
helper_mfc0_tccontext (void)
989 return env
->active_tc
.CP0_TCContext
;
992 target_ulong
helper_mftc0_tccontext(void)
994 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
995 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
997 if (other_tc
== other
->current_tc
)
998 return other
->active_tc
.CP0_TCContext
;
1000 return other
->tcs
[other_tc
].CP0_TCContext
;
1003 target_ulong
helper_mfc0_tcschedule (void)
1005 return env
->active_tc
.CP0_TCSchedule
;
1008 target_ulong
helper_mftc0_tcschedule(void)
1010 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1011 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1013 if (other_tc
== other
->current_tc
)
1014 return other
->active_tc
.CP0_TCSchedule
;
1016 return other
->tcs
[other_tc
].CP0_TCSchedule
;
1019 target_ulong
helper_mfc0_tcschefback (void)
1021 return env
->active_tc
.CP0_TCScheFBack
;
1024 target_ulong
helper_mftc0_tcschefback(void)
1026 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1027 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1029 if (other_tc
== other
->current_tc
)
1030 return other
->active_tc
.CP0_TCScheFBack
;
1032 return other
->tcs
[other_tc
].CP0_TCScheFBack
;
1035 target_ulong
helper_mfc0_count (void)
1037 return (int32_t)cpu_mips_get_count(env
);
1040 target_ulong
helper_mftc0_entryhi(void)
1042 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1043 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1045 return other
->CP0_EntryHi
;
1048 target_ulong
helper_mftc0_cause(void)
1050 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1052 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1054 if (other_tc
== other
->current_tc
) {
1055 tccause
= other
->CP0_Cause
;
1057 tccause
= other
->CP0_Cause
;
1063 target_ulong
helper_mftc0_status(void)
1065 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1066 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1068 return other
->CP0_Status
;
1071 target_ulong
helper_mfc0_lladdr (void)
1073 return (int32_t)(env
->lladdr
>> env
->CP0_LLAddr_shift
);
1076 target_ulong
helper_mfc0_watchlo (uint32_t sel
)
1078 return (int32_t)env
->CP0_WatchLo
[sel
];
1081 target_ulong
helper_mfc0_watchhi (uint32_t sel
)
1083 return env
->CP0_WatchHi
[sel
];
1086 target_ulong
helper_mfc0_debug (void)
1088 target_ulong t0
= env
->CP0_Debug
;
1089 if (env
->hflags
& MIPS_HFLAG_DM
)
1090 t0
|= 1 << CP0DB_DM
;
1095 target_ulong
helper_mftc0_debug(void)
1097 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1099 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1101 if (other_tc
== other
->current_tc
)
1102 tcstatus
= other
->active_tc
.CP0_Debug_tcstatus
;
1104 tcstatus
= other
->tcs
[other_tc
].CP0_Debug_tcstatus
;
1106 /* XXX: Might be wrong, check with EJTAG spec. */
1107 return (other
->CP0_Debug
& ~((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
))) |
1108 (tcstatus
& ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
)));
1111 #if defined(TARGET_MIPS64)
1112 target_ulong
helper_dmfc0_tcrestart (void)
1114 return env
->active_tc
.PC
;
1117 target_ulong
helper_dmfc0_tchalt (void)
1119 return env
->active_tc
.CP0_TCHalt
;
1122 target_ulong
helper_dmfc0_tccontext (void)
1124 return env
->active_tc
.CP0_TCContext
;
1127 target_ulong
helper_dmfc0_tcschedule (void)
1129 return env
->active_tc
.CP0_TCSchedule
;
1132 target_ulong
helper_dmfc0_tcschefback (void)
1134 return env
->active_tc
.CP0_TCScheFBack
;
1137 target_ulong
helper_dmfc0_lladdr (void)
1139 return env
->lladdr
>> env
->CP0_LLAddr_shift
;
1142 target_ulong
helper_dmfc0_watchlo (uint32_t sel
)
1144 return env
->CP0_WatchLo
[sel
];
1146 #endif /* TARGET_MIPS64 */
1148 void helper_mtc0_index (target_ulong arg1
)
1151 unsigned int tmp
= env
->tlb
->nb_tlb
;
1157 env
->CP0_Index
= (env
->CP0_Index
& 0x80000000) | (arg1
& (num
- 1));
1160 void helper_mtc0_mvpcontrol (target_ulong arg1
)
1165 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
))
1166 mask
|= (1 << CP0MVPCo_CPA
) | (1 << CP0MVPCo_VPC
) |
1167 (1 << CP0MVPCo_EVP
);
1168 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1169 mask
|= (1 << CP0MVPCo_STLB
);
1170 newval
= (env
->mvp
->CP0_MVPControl
& ~mask
) | (arg1
& mask
);
1172 // TODO: Enable/disable shared TLB, enable/disable VPEs.
1174 env
->mvp
->CP0_MVPControl
= newval
;
1177 void helper_mtc0_vpecontrol (target_ulong arg1
)
1182 mask
= (1 << CP0VPECo_YSI
) | (1 << CP0VPECo_GSI
) |
1183 (1 << CP0VPECo_TE
) | (0xff << CP0VPECo_TargTC
);
1184 newval
= (env
->CP0_VPEControl
& ~mask
) | (arg1
& mask
);
1186 /* Yield scheduler intercept not implemented. */
1187 /* Gating storage scheduler intercept not implemented. */
1189 // TODO: Enable/disable TCs.
1191 env
->CP0_VPEControl
= newval
;
1194 void helper_mttc0_vpecontrol(target_ulong arg1
)
1196 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1197 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1201 mask
= (1 << CP0VPECo_YSI
) | (1 << CP0VPECo_GSI
) |
1202 (1 << CP0VPECo_TE
) | (0xff << CP0VPECo_TargTC
);
1203 newval
= (other
->CP0_VPEControl
& ~mask
) | (arg1
& mask
);
1205 /* TODO: Enable/disable TCs. */
1207 other
->CP0_VPEControl
= newval
;
1210 target_ulong
helper_mftc0_vpecontrol(void)
1212 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1213 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1214 /* FIXME: Mask away return zero on read bits. */
1215 return other
->CP0_VPEControl
;
1218 target_ulong
helper_mftc0_vpeconf0(void)
1220 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1221 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1223 return other
->CP0_VPEConf0
;
1226 void helper_mtc0_vpeconf0 (target_ulong arg1
)
1231 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
)) {
1232 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_VPA
))
1233 mask
|= (0xff << CP0VPEC0_XTC
);
1234 mask
|= (1 << CP0VPEC0_MVP
) | (1 << CP0VPEC0_VPA
);
1236 newval
= (env
->CP0_VPEConf0
& ~mask
) | (arg1
& mask
);
1238 // TODO: TC exclusive handling due to ERL/EXL.
1240 env
->CP0_VPEConf0
= newval
;
1243 void helper_mttc0_vpeconf0(target_ulong arg1
)
1245 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1246 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1250 mask
|= (1 << CP0VPEC0_MVP
) | (1 << CP0VPEC0_VPA
);
1251 newval
= (other
->CP0_VPEConf0
& ~mask
) | (arg1
& mask
);
1253 /* TODO: TC exclusive handling due to ERL/EXL. */
1254 other
->CP0_VPEConf0
= newval
;
1257 void helper_mtc0_vpeconf1 (target_ulong arg1
)
1262 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1263 mask
|= (0xff << CP0VPEC1_NCX
) | (0xff << CP0VPEC1_NCP2
) |
1264 (0xff << CP0VPEC1_NCP1
);
1265 newval
= (env
->CP0_VPEConf1
& ~mask
) | (arg1
& mask
);
1267 /* UDI not implemented. */
1268 /* CP2 not implemented. */
1270 // TODO: Handle FPU (CP1) binding.
1272 env
->CP0_VPEConf1
= newval
;
1275 void helper_mtc0_yqmask (target_ulong arg1
)
1277 /* Yield qualifier inputs not implemented. */
1278 env
->CP0_YQMask
= 0x00000000;
1281 void helper_mtc0_vpeopt (target_ulong arg1
)
1283 env
->CP0_VPEOpt
= arg1
& 0x0000ffff;
1286 void helper_mtc0_entrylo0 (target_ulong arg1
)
1288 /* Large physaddr (PABITS) not implemented */
1289 /* 1k pages not implemented */
1290 env
->CP0_EntryLo0
= arg1
& 0x3FFFFFFF;
1293 void helper_mtc0_tcstatus (target_ulong arg1
)
1295 uint32_t mask
= env
->CP0_TCStatus_rw_bitmask
;
1298 newval
= (env
->active_tc
.CP0_TCStatus
& ~mask
) | (arg1
& mask
);
1300 env
->active_tc
.CP0_TCStatus
= newval
;
1301 sync_c0_tcstatus(env
, env
->current_tc
, newval
);
1304 void helper_mttc0_tcstatus (target_ulong arg1
)
1306 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1307 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1309 if (other_tc
== other
->current_tc
)
1310 other
->active_tc
.CP0_TCStatus
= arg1
;
1312 other
->tcs
[other_tc
].CP0_TCStatus
= arg1
;
1313 sync_c0_tcstatus(other
, other_tc
, arg1
);
1316 void helper_mtc0_tcbind (target_ulong arg1
)
1318 uint32_t mask
= (1 << CP0TCBd_TBE
);
1321 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1322 mask
|= (1 << CP0TCBd_CurVPE
);
1323 newval
= (env
->active_tc
.CP0_TCBind
& ~mask
) | (arg1
& mask
);
1324 env
->active_tc
.CP0_TCBind
= newval
;
1327 void helper_mttc0_tcbind (target_ulong arg1
)
1329 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1330 uint32_t mask
= (1 << CP0TCBd_TBE
);
1332 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1334 if (other
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1335 mask
|= (1 << CP0TCBd_CurVPE
);
1336 if (other_tc
== other
->current_tc
) {
1337 newval
= (other
->active_tc
.CP0_TCBind
& ~mask
) | (arg1
& mask
);
1338 other
->active_tc
.CP0_TCBind
= newval
;
1340 newval
= (other
->tcs
[other_tc
].CP0_TCBind
& ~mask
) | (arg1
& mask
);
1341 other
->tcs
[other_tc
].CP0_TCBind
= newval
;
1345 void helper_mtc0_tcrestart (target_ulong arg1
)
1347 env
->active_tc
.PC
= arg1
;
1348 env
->active_tc
.CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1350 /* MIPS16 not implemented. */
1353 void helper_mttc0_tcrestart (target_ulong arg1
)
1355 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1356 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1358 if (other_tc
== other
->current_tc
) {
1359 other
->active_tc
.PC
= arg1
;
1360 other
->active_tc
.CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1361 other
->lladdr
= 0ULL;
1362 /* MIPS16 not implemented. */
1364 other
->tcs
[other_tc
].PC
= arg1
;
1365 other
->tcs
[other_tc
].CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1366 other
->lladdr
= 0ULL;
1367 /* MIPS16 not implemented. */
1371 void helper_mtc0_tchalt (target_ulong arg1
)
1373 env
->active_tc
.CP0_TCHalt
= arg1
& 0x1;
1375 // TODO: Halt TC / Restart (if allocated+active) TC.
1376 if (env
->active_tc
.CP0_TCHalt
& 1) {
1377 mips_tc_sleep(env
, env
->current_tc
);
1379 mips_tc_wake(env
, env
->current_tc
);
1383 void helper_mttc0_tchalt (target_ulong arg1
)
1385 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1386 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1388 // TODO: Halt TC / Restart (if allocated+active) TC.
1390 if (other_tc
== other
->current_tc
)
1391 other
->active_tc
.CP0_TCHalt
= arg1
;
1393 other
->tcs
[other_tc
].CP0_TCHalt
= arg1
;
1396 mips_tc_sleep(other
, other_tc
);
1398 mips_tc_wake(other
, other_tc
);
1402 void helper_mtc0_tccontext (target_ulong arg1
)
1404 env
->active_tc
.CP0_TCContext
= arg1
;
1407 void helper_mttc0_tccontext (target_ulong arg1
)
1409 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1410 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1412 if (other_tc
== other
->current_tc
)
1413 other
->active_tc
.CP0_TCContext
= arg1
;
1415 other
->tcs
[other_tc
].CP0_TCContext
= arg1
;
1418 void helper_mtc0_tcschedule (target_ulong arg1
)
1420 env
->active_tc
.CP0_TCSchedule
= arg1
;
1423 void helper_mttc0_tcschedule (target_ulong arg1
)
1425 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1426 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1428 if (other_tc
== other
->current_tc
)
1429 other
->active_tc
.CP0_TCSchedule
= arg1
;
1431 other
->tcs
[other_tc
].CP0_TCSchedule
= arg1
;
1434 void helper_mtc0_tcschefback (target_ulong arg1
)
1436 env
->active_tc
.CP0_TCScheFBack
= arg1
;
1439 void helper_mttc0_tcschefback (target_ulong arg1
)
1441 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1442 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1444 if (other_tc
== other
->current_tc
)
1445 other
->active_tc
.CP0_TCScheFBack
= arg1
;
1447 other
->tcs
[other_tc
].CP0_TCScheFBack
= arg1
;
1450 void helper_mtc0_entrylo1 (target_ulong arg1
)
1452 /* Large physaddr (PABITS) not implemented */
1453 /* 1k pages not implemented */
1454 env
->CP0_EntryLo1
= arg1
& 0x3FFFFFFF;
1457 void helper_mtc0_context (target_ulong arg1
)
1459 env
->CP0_Context
= (env
->CP0_Context
& 0x007FFFFF) | (arg1
& ~0x007FFFFF);
1462 void helper_mtc0_pagemask (target_ulong arg1
)
1464 /* 1k pages not implemented */
1465 env
->CP0_PageMask
= arg1
& (0x1FFFFFFF & (TARGET_PAGE_MASK
<< 1));
1468 void helper_mtc0_pagegrain (target_ulong arg1
)
1470 /* SmartMIPS not implemented */
1471 /* Large physaddr (PABITS) not implemented */
1472 /* 1k pages not implemented */
1473 env
->CP0_PageGrain
= 0;
1476 void helper_mtc0_wired (target_ulong arg1
)
1478 env
->CP0_Wired
= arg1
% env
->tlb
->nb_tlb
;
1481 void helper_mtc0_srsconf0 (target_ulong arg1
)
1483 env
->CP0_SRSConf0
|= arg1
& env
->CP0_SRSConf0_rw_bitmask
;
1486 void helper_mtc0_srsconf1 (target_ulong arg1
)
1488 env
->CP0_SRSConf1
|= arg1
& env
->CP0_SRSConf1_rw_bitmask
;
1491 void helper_mtc0_srsconf2 (target_ulong arg1
)
1493 env
->CP0_SRSConf2
|= arg1
& env
->CP0_SRSConf2_rw_bitmask
;
1496 void helper_mtc0_srsconf3 (target_ulong arg1
)
1498 env
->CP0_SRSConf3
|= arg1
& env
->CP0_SRSConf3_rw_bitmask
;
1501 void helper_mtc0_srsconf4 (target_ulong arg1
)
1503 env
->CP0_SRSConf4
|= arg1
& env
->CP0_SRSConf4_rw_bitmask
;
1506 void helper_mtc0_hwrena (target_ulong arg1
)
1508 env
->CP0_HWREna
= arg1
& 0x0000000F;
1511 void helper_mtc0_count (target_ulong arg1
)
1513 cpu_mips_store_count(env
, arg1
);
1516 void helper_mtc0_entryhi (target_ulong arg1
)
1518 target_ulong old
, val
;
1520 /* 1k pages not implemented */
1521 val
= arg1
& ((TARGET_PAGE_MASK
<< 1) | 0xFF);
1522 #if defined(TARGET_MIPS64)
1523 val
&= env
->SEGMask
;
1525 old
= env
->CP0_EntryHi
;
1526 env
->CP0_EntryHi
= val
;
1527 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
1528 sync_c0_entryhi(env
, env
->current_tc
);
1530 /* If the ASID changes, flush qemu's TLB. */
1531 if ((old
& 0xFF) != (val
& 0xFF))
1532 cpu_mips_tlb_flush(env
, 1);
1535 void helper_mttc0_entryhi(target_ulong arg1
)
1537 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1538 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1540 other
->CP0_EntryHi
= arg1
;
1541 sync_c0_entryhi(other
, other_tc
);
1544 void helper_mtc0_compare (target_ulong arg1
)
1546 cpu_mips_store_compare(env
, arg1
);
1549 void helper_mtc0_status (target_ulong arg1
)
1552 uint32_t mask
= env
->CP0_Status_rw_bitmask
;
1555 old
= env
->CP0_Status
;
1556 env
->CP0_Status
= (env
->CP0_Status
& ~mask
) | val
;
1557 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
1558 sync_c0_status(env
, env
->current_tc
);
1560 compute_hflags(env
);
1563 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
1564 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1565 old
, old
& env
->CP0_Cause
& CP0Ca_IP_mask
,
1566 val
, val
& env
->CP0_Cause
& CP0Ca_IP_mask
,
1568 switch (env
->hflags
& MIPS_HFLAG_KSU
) {
1569 case MIPS_HFLAG_UM
: qemu_log(", UM\n"); break;
1570 case MIPS_HFLAG_SM
: qemu_log(", SM\n"); break;
1571 case MIPS_HFLAG_KM
: qemu_log("\n"); break;
1572 default: cpu_abort(env
, "Invalid MMU mode!\n"); break;
1577 void helper_mttc0_status(target_ulong arg1
)
1579 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1580 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1582 other
->CP0_Status
= arg1
& ~0xf1000018;
1583 sync_c0_status(other
, other_tc
);
1586 void helper_mtc0_intctl (target_ulong arg1
)
1588 /* vectored interrupts not implemented, no performance counters. */
1589 env
->CP0_IntCtl
= (env
->CP0_IntCtl
& ~0x000003e0) | (arg1
& 0x000003e0);
1592 void helper_mtc0_srsctl (target_ulong arg1
)
1594 uint32_t mask
= (0xf << CP0SRSCtl_ESS
) | (0xf << CP0SRSCtl_PSS
);
1595 env
->CP0_SRSCtl
= (env
->CP0_SRSCtl
& ~mask
) | (arg1
& mask
);
1598 static void mtc0_cause(CPUMIPSState
*cpu
, target_ulong arg1
)
1600 uint32_t mask
= 0x00C00300;
1601 uint32_t old
= cpu
->CP0_Cause
;
1604 if (cpu
->insn_flags
& ISA_MIPS32R2
) {
1605 mask
|= 1 << CP0Ca_DC
;
1608 cpu
->CP0_Cause
= (cpu
->CP0_Cause
& ~mask
) | (arg1
& mask
);
1610 if ((old
^ cpu
->CP0_Cause
) & (1 << CP0Ca_DC
)) {
1611 if (cpu
->CP0_Cause
& (1 << CP0Ca_DC
)) {
1612 cpu_mips_stop_count(cpu
);
1614 cpu_mips_start_count(cpu
);
1618 /* Set/reset software interrupts */
1619 for (i
= 0 ; i
< 2 ; i
++) {
1620 if ((old
^ cpu
->CP0_Cause
) & (1 << (CP0Ca_IP
+ i
))) {
1621 cpu_mips_soft_irq(cpu
, i
, cpu
->CP0_Cause
& (1 << (CP0Ca_IP
+ i
)));
1626 void helper_mtc0_cause(target_ulong arg1
)
1628 mtc0_cause(env
, arg1
);
1631 void helper_mttc0_cause(target_ulong arg1
)
1633 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1634 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1636 mtc0_cause(other
, arg1
);
1639 target_ulong
helper_mftc0_epc(void)
1641 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1642 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1644 return other
->CP0_EPC
;
1647 target_ulong
helper_mftc0_ebase(void)
1649 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1650 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1652 return other
->CP0_EBase
;
1655 void helper_mtc0_ebase (target_ulong arg1
)
1657 /* vectored interrupts not implemented */
1658 env
->CP0_EBase
= (env
->CP0_EBase
& ~0x3FFFF000) | (arg1
& 0x3FFFF000);
1661 void helper_mttc0_ebase(target_ulong arg1
)
1663 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1664 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1665 other
->CP0_EBase
= (other
->CP0_EBase
& ~0x3FFFF000) | (arg1
& 0x3FFFF000);
1668 target_ulong
helper_mftc0_configx(target_ulong idx
)
1670 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1671 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1674 case 0: return other
->CP0_Config0
;
1675 case 1: return other
->CP0_Config1
;
1676 case 2: return other
->CP0_Config2
;
1677 case 3: return other
->CP0_Config3
;
1678 /* 4 and 5 are reserved. */
1679 case 6: return other
->CP0_Config6
;
1680 case 7: return other
->CP0_Config7
;
1687 void helper_mtc0_config0 (target_ulong arg1
)
1689 env
->CP0_Config0
= (env
->CP0_Config0
& 0x81FFFFF8) | (arg1
& 0x00000007);
1692 void helper_mtc0_config2 (target_ulong arg1
)
1694 /* tertiary/secondary caches not implemented */
1695 env
->CP0_Config2
= (env
->CP0_Config2
& 0x8FFF0FFF);
1698 void helper_mtc0_lladdr (target_ulong arg1
)
1700 target_long mask
= env
->CP0_LLAddr_rw_bitmask
;
1701 arg1
= arg1
<< env
->CP0_LLAddr_shift
;
1702 env
->lladdr
= (env
->lladdr
& ~mask
) | (arg1
& mask
);
1705 void helper_mtc0_watchlo (target_ulong arg1
, uint32_t sel
)
1707 /* Watch exceptions for instructions, data loads, data stores
1709 env
->CP0_WatchLo
[sel
] = (arg1
& ~0x7);
1712 void helper_mtc0_watchhi (target_ulong arg1
, uint32_t sel
)
1714 env
->CP0_WatchHi
[sel
] = (arg1
& 0x40FF0FF8);
1715 env
->CP0_WatchHi
[sel
] &= ~(env
->CP0_WatchHi
[sel
] & arg1
& 0x7);
1718 void helper_mtc0_xcontext (target_ulong arg1
)
1720 target_ulong mask
= (1ULL << (env
->SEGBITS
- 7)) - 1;
1721 env
->CP0_XContext
= (env
->CP0_XContext
& mask
) | (arg1
& ~mask
);
1724 void helper_mtc0_framemask (target_ulong arg1
)
1726 env
->CP0_Framemask
= arg1
; /* XXX */
1729 void helper_mtc0_debug (target_ulong arg1
)
1731 env
->CP0_Debug
= (env
->CP0_Debug
& 0x8C03FC1F) | (arg1
& 0x13300120);
1732 if (arg1
& (1 << CP0DB_DM
))
1733 env
->hflags
|= MIPS_HFLAG_DM
;
1735 env
->hflags
&= ~MIPS_HFLAG_DM
;
1738 void helper_mttc0_debug(target_ulong arg1
)
1740 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1741 uint32_t val
= arg1
& ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
));
1742 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1744 /* XXX: Might be wrong, check with EJTAG spec. */
1745 if (other_tc
== other
->current_tc
)
1746 other
->active_tc
.CP0_Debug_tcstatus
= val
;
1748 other
->tcs
[other_tc
].CP0_Debug_tcstatus
= val
;
1749 other
->CP0_Debug
= (other
->CP0_Debug
&
1750 ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
))) |
1751 (arg1
& ~((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
)));
1754 void helper_mtc0_performance0 (target_ulong arg1
)
1756 env
->CP0_Performance0
= arg1
& 0x000007ff;
1759 void helper_mtc0_taglo (target_ulong arg1
)
1761 env
->CP0_TagLo
= arg1
& 0xFFFFFCF6;
1764 void helper_mtc0_datalo (target_ulong arg1
)
1766 env
->CP0_DataLo
= arg1
; /* XXX */
1769 void helper_mtc0_taghi (target_ulong arg1
)
1771 env
->CP0_TagHi
= arg1
; /* XXX */
1774 void helper_mtc0_datahi (target_ulong arg1
)
1776 env
->CP0_DataHi
= arg1
; /* XXX */
1779 /* MIPS MT functions */
1780 target_ulong
helper_mftgpr(uint32_t sel
)
1782 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1783 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1785 if (other_tc
== other
->current_tc
)
1786 return other
->active_tc
.gpr
[sel
];
1788 return other
->tcs
[other_tc
].gpr
[sel
];
1791 target_ulong
helper_mftlo(uint32_t sel
)
1793 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1794 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1796 if (other_tc
== other
->current_tc
)
1797 return other
->active_tc
.LO
[sel
];
1799 return other
->tcs
[other_tc
].LO
[sel
];
1802 target_ulong
helper_mfthi(uint32_t sel
)
1804 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1805 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1807 if (other_tc
== other
->current_tc
)
1808 return other
->active_tc
.HI
[sel
];
1810 return other
->tcs
[other_tc
].HI
[sel
];
1813 target_ulong
helper_mftacx(uint32_t sel
)
1815 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1816 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1818 if (other_tc
== other
->current_tc
)
1819 return other
->active_tc
.ACX
[sel
];
1821 return other
->tcs
[other_tc
].ACX
[sel
];
1824 target_ulong
helper_mftdsp(void)
1826 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1827 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1829 if (other_tc
== other
->current_tc
)
1830 return other
->active_tc
.DSPControl
;
1832 return other
->tcs
[other_tc
].DSPControl
;
1835 void helper_mttgpr(target_ulong arg1
, uint32_t sel
)
1837 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1838 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1840 if (other_tc
== other
->current_tc
)
1841 other
->active_tc
.gpr
[sel
] = arg1
;
1843 other
->tcs
[other_tc
].gpr
[sel
] = arg1
;
1846 void helper_mttlo(target_ulong arg1
, uint32_t sel
)
1848 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1849 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1851 if (other_tc
== other
->current_tc
)
1852 other
->active_tc
.LO
[sel
] = arg1
;
1854 other
->tcs
[other_tc
].LO
[sel
] = arg1
;
1857 void helper_mtthi(target_ulong arg1
, uint32_t sel
)
1859 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1860 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1862 if (other_tc
== other
->current_tc
)
1863 other
->active_tc
.HI
[sel
] = arg1
;
1865 other
->tcs
[other_tc
].HI
[sel
] = arg1
;
1868 void helper_mttacx(target_ulong arg1
, uint32_t sel
)
1870 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1871 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1873 if (other_tc
== other
->current_tc
)
1874 other
->active_tc
.ACX
[sel
] = arg1
;
1876 other
->tcs
[other_tc
].ACX
[sel
] = arg1
;
1879 void helper_mttdsp(target_ulong arg1
)
1881 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1882 CPUMIPSState
*other
= mips_cpu_map_tc(&other_tc
);
1884 if (other_tc
== other
->current_tc
)
1885 other
->active_tc
.DSPControl
= arg1
;
1887 other
->tcs
[other_tc
].DSPControl
= arg1
;
1890 /* MIPS MT functions */
1891 target_ulong
helper_dmt(void)
1897 target_ulong
helper_emt(void)
1903 target_ulong
helper_dvpe(void)
1905 CPUMIPSState
*other_cpu
= first_cpu
;
1906 target_ulong prev
= env
->mvp
->CP0_MVPControl
;
1909 /* Turn off all VPEs except the one executing the dvpe. */
1910 if (other_cpu
!= env
) {
1911 other_cpu
->mvp
->CP0_MVPControl
&= ~(1 << CP0MVPCo_EVP
);
1912 mips_vpe_sleep(other_cpu
);
1914 other_cpu
= other_cpu
->next_cpu
;
1915 } while (other_cpu
);
1919 target_ulong
helper_evpe(void)
1921 CPUMIPSState
*other_cpu
= first_cpu
;
1922 target_ulong prev
= env
->mvp
->CP0_MVPControl
;
1925 if (other_cpu
!= env
1926 /* If the VPE is WFI, don't disturb its sleep. */
1927 && !mips_vpe_is_wfi(other_cpu
)) {
1928 /* Enable the VPE. */
1929 other_cpu
->mvp
->CP0_MVPControl
|= (1 << CP0MVPCo_EVP
);
1930 mips_vpe_wake(other_cpu
); /* And wake it up. */
1932 other_cpu
= other_cpu
->next_cpu
;
1933 } while (other_cpu
);
1936 #endif /* !CONFIG_USER_ONLY */
1938 void helper_fork(target_ulong arg1
, target_ulong arg2
)
1940 // arg1 = rt, arg2 = rs
1942 // TODO: store to TC register
1945 target_ulong
helper_yield(target_ulong arg
)
1947 target_long arg1
= arg
;
1950 /* No scheduling policy implemented. */
1952 if (env
->CP0_VPEControl
& (1 << CP0VPECo_YSI
) &&
1953 env
->active_tc
.CP0_TCStatus
& (1 << CP0TCSt_DT
)) {
1954 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
1955 env
->CP0_VPEControl
|= 4 << CP0VPECo_EXCPT
;
1956 helper_raise_exception(EXCP_THREAD
);
1959 } else if (arg1
== 0) {
1960 if (0 /* TODO: TC underflow */) {
1961 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
1962 helper_raise_exception(EXCP_THREAD
);
1964 // TODO: Deallocate TC
1966 } else if (arg1
> 0) {
1967 /* Yield qualifier inputs not implemented. */
1968 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
1969 env
->CP0_VPEControl
|= 2 << CP0VPECo_EXCPT
;
1970 helper_raise_exception(EXCP_THREAD
);
1972 return env
->CP0_YQMask
;
1975 #ifndef CONFIG_USER_ONLY
1976 /* TLB management */
1977 static void cpu_mips_tlb_flush (CPUMIPSState
*env
, int flush_global
)
1979 /* Flush qemu's TLB and discard all shadowed entries. */
1980 tlb_flush (env
, flush_global
);
1981 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
1984 static void r4k_mips_tlb_flush_extra (CPUMIPSState
*env
, int first
)
1986 /* Discard entries from env->tlb[first] onwards. */
1987 while (env
->tlb
->tlb_in_use
> first
) {
1988 r4k_invalidate_tlb(env
, --env
->tlb
->tlb_in_use
, 0);
1992 static void r4k_fill_tlb (int idx
)
1996 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1997 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
1998 tlb
->VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
1999 #if defined(TARGET_MIPS64)
2000 tlb
->VPN
&= env
->SEGMask
;
2002 tlb
->ASID
= env
->CP0_EntryHi
& 0xFF;
2003 tlb
->PageMask
= env
->CP0_PageMask
;
2004 tlb
->G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
2005 tlb
->V0
= (env
->CP0_EntryLo0
& 2) != 0;
2006 tlb
->D0
= (env
->CP0_EntryLo0
& 4) != 0;
2007 tlb
->C0
= (env
->CP0_EntryLo0
>> 3) & 0x7;
2008 tlb
->PFN
[0] = (env
->CP0_EntryLo0
>> 6) << 12;
2009 tlb
->V1
= (env
->CP0_EntryLo1
& 2) != 0;
2010 tlb
->D1
= (env
->CP0_EntryLo1
& 4) != 0;
2011 tlb
->C1
= (env
->CP0_EntryLo1
>> 3) & 0x7;
2012 tlb
->PFN
[1] = (env
->CP0_EntryLo1
>> 6) << 12;
2015 void r4k_helper_tlbwi (void)
2019 idx
= (env
->CP0_Index
& ~0x80000000) % env
->tlb
->nb_tlb
;
2021 /* Discard cached TLB entries. We could avoid doing this if the
2022 tlbwi is just upgrading access permissions on the current entry;
2023 that might be a further win. */
2024 r4k_mips_tlb_flush_extra (env
, env
->tlb
->nb_tlb
);
2026 r4k_invalidate_tlb(env
, idx
, 0);
2030 void r4k_helper_tlbwr (void)
2032 int r
= cpu_mips_get_random(env
);
2034 r4k_invalidate_tlb(env
, r
, 1);
2038 void r4k_helper_tlbp (void)
2047 ASID
= env
->CP0_EntryHi
& 0xFF;
2048 for (i
= 0; i
< env
->tlb
->nb_tlb
; i
++) {
2049 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
2050 /* 1k pages are not supported. */
2051 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
2052 tag
= env
->CP0_EntryHi
& ~mask
;
2053 VPN
= tlb
->VPN
& ~mask
;
2054 /* Check ASID, virtual page number & size */
2055 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
2061 if (i
== env
->tlb
->nb_tlb
) {
2062 /* No match. Discard any shadow entries, if any of them match. */
2063 for (i
= env
->tlb
->nb_tlb
; i
< env
->tlb
->tlb_in_use
; i
++) {
2064 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
2065 /* 1k pages are not supported. */
2066 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
2067 tag
= env
->CP0_EntryHi
& ~mask
;
2068 VPN
= tlb
->VPN
& ~mask
;
2069 /* Check ASID, virtual page number & size */
2070 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
2071 r4k_mips_tlb_flush_extra (env
, i
);
2076 env
->CP0_Index
|= 0x80000000;
2080 void r4k_helper_tlbr (void)
2086 ASID
= env
->CP0_EntryHi
& 0xFF;
2087 idx
= (env
->CP0_Index
& ~0x80000000) % env
->tlb
->nb_tlb
;
2088 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
2090 /* If this will change the current ASID, flush qemu's TLB. */
2091 if (ASID
!= tlb
->ASID
)
2092 cpu_mips_tlb_flush (env
, 1);
2094 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
2096 env
->CP0_EntryHi
= tlb
->VPN
| tlb
->ASID
;
2097 env
->CP0_PageMask
= tlb
->PageMask
;
2098 env
->CP0_EntryLo0
= tlb
->G
| (tlb
->V0
<< 1) | (tlb
->D0
<< 2) |
2099 (tlb
->C0
<< 3) | (tlb
->PFN
[0] >> 6);
2100 env
->CP0_EntryLo1
= tlb
->G
| (tlb
->V1
<< 1) | (tlb
->D1
<< 2) |
2101 (tlb
->C1
<< 3) | (tlb
->PFN
[1] >> 6);
2104 void helper_tlbwi(void)
2106 env
->tlb
->helper_tlbwi();
2109 void helper_tlbwr(void)
2111 env
->tlb
->helper_tlbwr();
2114 void helper_tlbp(void)
2116 env
->tlb
->helper_tlbp();
2119 void helper_tlbr(void)
2121 env
->tlb
->helper_tlbr();
2125 target_ulong
helper_di (void)
2127 target_ulong t0
= env
->CP0_Status
;
2129 env
->CP0_Status
= t0
& ~(1 << CP0St_IE
);
2133 target_ulong
helper_ei (void)
2135 target_ulong t0
= env
->CP0_Status
;
2137 env
->CP0_Status
= t0
| (1 << CP0St_IE
);
2141 static void debug_pre_eret (void)
2143 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
2144 qemu_log("ERET: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
2145 env
->active_tc
.PC
, env
->CP0_EPC
);
2146 if (env
->CP0_Status
& (1 << CP0St_ERL
))
2147 qemu_log(" ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
2148 if (env
->hflags
& MIPS_HFLAG_DM
)
2149 qemu_log(" DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
2154 static void debug_post_eret (void)
2156 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
2157 qemu_log(" => PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
2158 env
->active_tc
.PC
, env
->CP0_EPC
);
2159 if (env
->CP0_Status
& (1 << CP0St_ERL
))
2160 qemu_log(" ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
2161 if (env
->hflags
& MIPS_HFLAG_DM
)
2162 qemu_log(" DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
2163 switch (env
->hflags
& MIPS_HFLAG_KSU
) {
2164 case MIPS_HFLAG_UM
: qemu_log(", UM\n"); break;
2165 case MIPS_HFLAG_SM
: qemu_log(", SM\n"); break;
2166 case MIPS_HFLAG_KM
: qemu_log("\n"); break;
2167 default: cpu_abort(env
, "Invalid MMU mode!\n"); break;
2172 static void set_pc (target_ulong error_pc
)
2174 env
->active_tc
.PC
= error_pc
& ~(target_ulong
)1;
2176 env
->hflags
|= MIPS_HFLAG_M16
;
2178 env
->hflags
&= ~(MIPS_HFLAG_M16
);
2182 void helper_eret (void)
2185 if (env
->CP0_Status
& (1 << CP0St_ERL
)) {
2186 set_pc(env
->CP0_ErrorEPC
);
2187 env
->CP0_Status
&= ~(1 << CP0St_ERL
);
2189 set_pc(env
->CP0_EPC
);
2190 env
->CP0_Status
&= ~(1 << CP0St_EXL
);
2192 compute_hflags(env
);
2197 void helper_deret (void)
2200 set_pc(env
->CP0_DEPC
);
2202 env
->hflags
&= MIPS_HFLAG_DM
;
2203 compute_hflags(env
);
2207 #endif /* !CONFIG_USER_ONLY */
2209 target_ulong
helper_rdhwr_cpunum(void)
2211 if ((env
->hflags
& MIPS_HFLAG_CP0
) ||
2212 (env
->CP0_HWREna
& (1 << 0)))
2213 return env
->CP0_EBase
& 0x3ff;
2215 helper_raise_exception(EXCP_RI
);
2220 target_ulong
helper_rdhwr_synci_step(void)
2222 if ((env
->hflags
& MIPS_HFLAG_CP0
) ||
2223 (env
->CP0_HWREna
& (1 << 1)))
2224 return env
->SYNCI_Step
;
2226 helper_raise_exception(EXCP_RI
);
2231 target_ulong
helper_rdhwr_cc(void)
2233 if ((env
->hflags
& MIPS_HFLAG_CP0
) ||
2234 (env
->CP0_HWREna
& (1 << 2)))
2235 return env
->CP0_Count
;
2237 helper_raise_exception(EXCP_RI
);
2242 target_ulong
helper_rdhwr_ccres(void)
2244 if ((env
->hflags
& MIPS_HFLAG_CP0
) ||
2245 (env
->CP0_HWREna
& (1 << 3)))
2248 helper_raise_exception(EXCP_RI
);
2253 void helper_pmon (int function
)
2257 case 2: /* TODO: char inbyte(int waitflag); */
2258 if (env
->active_tc
.gpr
[4] == 0)
2259 env
->active_tc
.gpr
[2] = -1;
2261 case 11: /* TODO: char inbyte (void); */
2262 env
->active_tc
.gpr
[2] = -1;
2266 printf("%c", (char)(env
->active_tc
.gpr
[4] & 0xFF));
2272 unsigned char *fmt
= (void *)(uintptr_t)env
->active_tc
.gpr
[4];
2279 void helper_wait (void)
2282 cpu_reset_interrupt(env
, CPU_INTERRUPT_WAKE
);
2283 helper_raise_exception(EXCP_HLT
);
2286 #if !defined(CONFIG_USER_ONLY)
2288 static void QEMU_NORETURN
do_unaligned_access(target_ulong addr
, int is_write
,
2289 int is_user
, uintptr_t retaddr
);
2291 #define MMUSUFFIX _mmu
2292 #define ALIGNED_ONLY
2295 #include "softmmu_template.h"
2298 #include "softmmu_template.h"
2301 #include "softmmu_template.h"
2304 #include "softmmu_template.h"
2306 static void do_unaligned_access(target_ulong addr
, int is_write
,
2307 int is_user
, uintptr_t retaddr
)
2309 env
->CP0_BadVAddr
= addr
;
2310 do_restore_state (retaddr
);
2311 helper_raise_exception ((is_write
== 1) ? EXCP_AdES
: EXCP_AdEL
);
2314 void tlb_fill(CPUMIPSState
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
2317 TranslationBlock
*tb
;
2318 CPUMIPSState
*saved_env
;
2323 ret
= cpu_mips_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
2326 /* now we have a real cpu fault */
2327 tb
= tb_find_pc(retaddr
);
2329 /* the PC is inside the translated code. It means that we have
2330 a virtual CPU fault */
2331 cpu_restore_state(tb
, env
, retaddr
);
2334 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
2339 void cpu_unassigned_access(CPUMIPSState
*env1
, target_phys_addr_t addr
,
2340 int is_write
, int is_exec
, int unused
, int size
)
2345 helper_raise_exception(EXCP_IBE
);
2347 helper_raise_exception(EXCP_DBE
);
2349 #endif /* !CONFIG_USER_ONLY */
2351 /* Complex FPU operations which may need stack space. */
2353 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
2354 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2355 #define FLOAT_TWO32 make_float32(1 << 30)
2356 #define FLOAT_TWO64 make_float64(1ULL << 62)
2357 #define FLOAT_QNAN32 0x7fbfffff
2358 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2359 #define FLOAT_SNAN32 0x7fffffff
2360 #define FLOAT_SNAN64 0x7fffffffffffffffULL
2362 /* convert MIPS rounding mode in FCR31 to IEEE library */
2363 static unsigned int ieee_rm
[] = {
2364 float_round_nearest_even
,
2365 float_round_to_zero
,
2370 #define RESTORE_ROUNDING_MODE \
2371 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2373 #define RESTORE_FLUSH_MODE \
2374 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2376 target_ulong
helper_cfc1 (uint32_t reg
)
2382 arg1
= (int32_t)env
->active_fpu
.fcr0
;
2385 arg1
= ((env
->active_fpu
.fcr31
>> 24) & 0xfe) | ((env
->active_fpu
.fcr31
>> 23) & 0x1);
2388 arg1
= env
->active_fpu
.fcr31
& 0x0003f07c;
2391 arg1
= (env
->active_fpu
.fcr31
& 0x00000f83) | ((env
->active_fpu
.fcr31
>> 22) & 0x4);
2394 arg1
= (int32_t)env
->active_fpu
.fcr31
;
2401 void helper_ctc1 (target_ulong arg1
, uint32_t reg
)
2405 if (arg1
& 0xffffff00)
2407 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0x017fffff) | ((arg1
& 0xfe) << 24) |
2408 ((arg1
& 0x1) << 23);
2411 if (arg1
& 0x007c0000)
2413 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0xfffc0f83) | (arg1
& 0x0003f07c);
2416 if (arg1
& 0x007c0000)
2418 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0xfefff07c) | (arg1
& 0x00000f83) |
2419 ((arg1
& 0x4) << 22);
2422 if (arg1
& 0x007c0000)
2424 env
->active_fpu
.fcr31
= arg1
;
2429 /* set rounding mode */
2430 RESTORE_ROUNDING_MODE
;
2431 /* set flush-to-zero mode */
2433 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2434 if ((GET_FP_ENABLE(env
->active_fpu
.fcr31
) | 0x20) & GET_FP_CAUSE(env
->active_fpu
.fcr31
))
2435 helper_raise_exception(EXCP_FPE
);
2438 static inline int ieee_ex_to_mips(int xcpt
)
2442 if (xcpt
& float_flag_invalid
) {
2445 if (xcpt
& float_flag_overflow
) {
2448 if (xcpt
& float_flag_underflow
) {
2449 ret
|= FP_UNDERFLOW
;
2451 if (xcpt
& float_flag_divbyzero
) {
2454 if (xcpt
& float_flag_inexact
) {
2461 static inline void update_fcr31(void)
2463 int tmp
= ieee_ex_to_mips(get_float_exception_flags(&env
->active_fpu
.fp_status
));
2465 SET_FP_CAUSE(env
->active_fpu
.fcr31
, tmp
);
2466 if (GET_FP_ENABLE(env
->active_fpu
.fcr31
) & tmp
)
2467 helper_raise_exception(EXCP_FPE
);
2469 UPDATE_FP_FLAGS(env
->active_fpu
.fcr31
, tmp
);
2473 Single precition routines have a "s" suffix, double precision a
2474 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2475 paired single lower "pl", paired single upper "pu". */
2477 /* unary operations, modifying fp status */
2478 uint64_t helper_float_sqrt_d(uint64_t fdt0
)
2480 return float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
2483 uint32_t helper_float_sqrt_s(uint32_t fst0
)
2485 return float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
2488 uint64_t helper_float_cvtd_s(uint32_t fst0
)
2492 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2493 fdt2
= float32_to_float64(fst0
, &env
->active_fpu
.fp_status
);
2498 uint64_t helper_float_cvtd_w(uint32_t wt0
)
2502 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2503 fdt2
= int32_to_float64(wt0
, &env
->active_fpu
.fp_status
);
2508 uint64_t helper_float_cvtd_l(uint64_t dt0
)
2512 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2513 fdt2
= int64_to_float64(dt0
, &env
->active_fpu
.fp_status
);
2518 uint64_t helper_float_cvtl_d(uint64_t fdt0
)
2522 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2523 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2525 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2530 uint64_t helper_float_cvtl_s(uint32_t fst0
)
2534 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2535 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2537 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2542 uint64_t helper_float_cvtps_pw(uint64_t dt0
)
2547 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2548 fst2
= int32_to_float32(dt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
2549 fsth2
= int32_to_float32(dt0
>> 32, &env
->active_fpu
.fp_status
);
2551 return ((uint64_t)fsth2
<< 32) | fst2
;
2554 uint64_t helper_float_cvtpw_ps(uint64_t fdt0
)
2559 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2560 wt2
= float32_to_int32(fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
2561 wth2
= float32_to_int32(fdt0
>> 32, &env
->active_fpu
.fp_status
);
2563 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
)) {
2565 wth2
= FLOAT_SNAN32
;
2567 return ((uint64_t)wth2
<< 32) | wt2
;
2570 uint32_t helper_float_cvts_d(uint64_t fdt0
)
2574 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2575 fst2
= float64_to_float32(fdt0
, &env
->active_fpu
.fp_status
);
2580 uint32_t helper_float_cvts_w(uint32_t wt0
)
2584 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2585 fst2
= int32_to_float32(wt0
, &env
->active_fpu
.fp_status
);
2590 uint32_t helper_float_cvts_l(uint64_t dt0
)
2594 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2595 fst2
= int64_to_float32(dt0
, &env
->active_fpu
.fp_status
);
2600 uint32_t helper_float_cvts_pl(uint32_t wt0
)
2604 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2610 uint32_t helper_float_cvts_pu(uint32_t wth0
)
2614 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2620 uint32_t helper_float_cvtw_s(uint32_t fst0
)
2624 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2625 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
2627 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2632 uint32_t helper_float_cvtw_d(uint64_t fdt0
)
2636 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2637 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
2639 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2644 uint64_t helper_float_roundl_d(uint64_t fdt0
)
2648 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2649 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2650 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2651 RESTORE_ROUNDING_MODE
;
2653 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2658 uint64_t helper_float_roundl_s(uint32_t fst0
)
2662 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2663 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2664 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2665 RESTORE_ROUNDING_MODE
;
2667 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2672 uint32_t helper_float_roundw_d(uint64_t fdt0
)
2676 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2677 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2678 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
2679 RESTORE_ROUNDING_MODE
;
2681 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2686 uint32_t helper_float_roundw_s(uint32_t fst0
)
2690 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2691 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2692 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
2693 RESTORE_ROUNDING_MODE
;
2695 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2700 uint64_t helper_float_truncl_d(uint64_t fdt0
)
2704 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2705 dt2
= float64_to_int64_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
2707 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2712 uint64_t helper_float_truncl_s(uint32_t fst0
)
2716 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2717 dt2
= float32_to_int64_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
2719 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2724 uint32_t helper_float_truncw_d(uint64_t fdt0
)
2728 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2729 wt2
= float64_to_int32_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
2731 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2736 uint32_t helper_float_truncw_s(uint32_t fst0
)
2740 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2741 wt2
= float32_to_int32_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
2743 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2748 uint64_t helper_float_ceill_d(uint64_t fdt0
)
2752 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2753 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
2754 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2755 RESTORE_ROUNDING_MODE
;
2757 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2762 uint64_t helper_float_ceill_s(uint32_t fst0
)
2766 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2767 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
2768 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2769 RESTORE_ROUNDING_MODE
;
2771 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2776 uint32_t helper_float_ceilw_d(uint64_t fdt0
)
2780 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2781 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
2782 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
2783 RESTORE_ROUNDING_MODE
;
2785 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2790 uint32_t helper_float_ceilw_s(uint32_t fst0
)
2794 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2795 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
2796 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
2797 RESTORE_ROUNDING_MODE
;
2799 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2804 uint64_t helper_float_floorl_d(uint64_t fdt0
)
2808 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2809 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
2810 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2811 RESTORE_ROUNDING_MODE
;
2813 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2818 uint64_t helper_float_floorl_s(uint32_t fst0
)
2822 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2823 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
2824 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2825 RESTORE_ROUNDING_MODE
;
2827 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2832 uint32_t helper_float_floorw_d(uint64_t fdt0
)
2836 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2837 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
2838 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
2839 RESTORE_ROUNDING_MODE
;
2841 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2846 uint32_t helper_float_floorw_s(uint32_t fst0
)
2850 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2851 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
2852 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
2853 RESTORE_ROUNDING_MODE
;
2855 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2860 /* unary operations, not modifying fp status */
2861 #define FLOAT_UNOP(name) \
2862 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2864 return float64_ ## name(fdt0); \
2866 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2868 return float32_ ## name(fst0); \
2870 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2875 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2876 wth0 = float32_ ## name(fdt0 >> 32); \
2877 return ((uint64_t)wth0 << 32) | wt0; \
2883 /* MIPS specific unary operations */
2884 uint64_t helper_float_recip_d(uint64_t fdt0
)
2888 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2889 fdt2
= float64_div(FLOAT_ONE64
, fdt0
, &env
->active_fpu
.fp_status
);
2894 uint32_t helper_float_recip_s(uint32_t fst0
)
2898 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2899 fst2
= float32_div(FLOAT_ONE32
, fst0
, &env
->active_fpu
.fp_status
);
2904 uint64_t helper_float_rsqrt_d(uint64_t fdt0
)
2908 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2909 fdt2
= float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
2910 fdt2
= float64_div(FLOAT_ONE64
, fdt2
, &env
->active_fpu
.fp_status
);
2915 uint32_t helper_float_rsqrt_s(uint32_t fst0
)
2919 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2920 fst2
= float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
2921 fst2
= float32_div(FLOAT_ONE32
, fst2
, &env
->active_fpu
.fp_status
);
2926 uint64_t helper_float_recip1_d(uint64_t fdt0
)
2930 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2931 fdt2
= float64_div(FLOAT_ONE64
, fdt0
, &env
->active_fpu
.fp_status
);
2936 uint32_t helper_float_recip1_s(uint32_t fst0
)
2940 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2941 fst2
= float32_div(FLOAT_ONE32
, fst0
, &env
->active_fpu
.fp_status
);
2946 uint64_t helper_float_recip1_ps(uint64_t fdt0
)
2951 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2952 fst2
= float32_div(FLOAT_ONE32
, fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
2953 fsth2
= float32_div(FLOAT_ONE32
, fdt0
>> 32, &env
->active_fpu
.fp_status
);
2955 return ((uint64_t)fsth2
<< 32) | fst2
;
2958 uint64_t helper_float_rsqrt1_d(uint64_t fdt0
)
2962 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2963 fdt2
= float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
2964 fdt2
= float64_div(FLOAT_ONE64
, fdt2
, &env
->active_fpu
.fp_status
);
2969 uint32_t helper_float_rsqrt1_s(uint32_t fst0
)
2973 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2974 fst2
= float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
2975 fst2
= float32_div(FLOAT_ONE32
, fst2
, &env
->active_fpu
.fp_status
);
2980 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0
)
2985 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2986 fst2
= float32_sqrt(fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
2987 fsth2
= float32_sqrt(fdt0
>> 32, &env
->active_fpu
.fp_status
);
2988 fst2
= float32_div(FLOAT_ONE32
, fst2
, &env
->active_fpu
.fp_status
);
2989 fsth2
= float32_div(FLOAT_ONE32
, fsth2
, &env
->active_fpu
.fp_status
);
2991 return ((uint64_t)fsth2
<< 32) | fst2
;
2994 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2996 /* binary operations */
2997 #define FLOAT_BINOP(name) \
2998 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
3002 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3003 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
3005 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3006 dt2 = FLOAT_QNAN64; \
3010 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
3014 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3015 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3017 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
3018 wt2 = FLOAT_QNAN32; \
3022 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
3024 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3025 uint32_t fsth0 = fdt0 >> 32; \
3026 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3027 uint32_t fsth1 = fdt1 >> 32; \
3031 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3032 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
3033 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
3035 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
3036 wt2 = FLOAT_QNAN32; \
3037 wth2 = FLOAT_QNAN32; \
3039 return ((uint64_t)wth2 << 32) | wt2; \
3048 /* ternary operations */
3049 #define FLOAT_TERNOP(name1, name2) \
3050 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3053 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3054 return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3057 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3060 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3061 return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3064 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
3067 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3068 uint32_t fsth0 = fdt0 >> 32; \
3069 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3070 uint32_t fsth1 = fdt1 >> 32; \
3071 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3072 uint32_t fsth2 = fdt2 >> 32; \
3074 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3075 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3076 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3077 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3078 return ((uint64_t)fsth2 << 32) | fst2; \
3081 FLOAT_TERNOP(mul
, add
)
3082 FLOAT_TERNOP(mul
, sub
)
3085 /* negated ternary operations */
3086 #define FLOAT_NTERNOP(name1, name2) \
3087 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
3090 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
3091 fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
3092 return float64_chs(fdt2); \
3095 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
3098 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3099 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3100 return float32_chs(fst2); \
3103 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
3106 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
3107 uint32_t fsth0 = fdt0 >> 32; \
3108 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
3109 uint32_t fsth1 = fdt1 >> 32; \
3110 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
3111 uint32_t fsth2 = fdt2 >> 32; \
3113 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
3114 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
3115 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
3116 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
3117 fst2 = float32_chs(fst2); \
3118 fsth2 = float32_chs(fsth2); \
3119 return ((uint64_t)fsth2 << 32) | fst2; \
3122 FLOAT_NTERNOP(mul
, add
)
3123 FLOAT_NTERNOP(mul
, sub
)
3124 #undef FLOAT_NTERNOP
3126 /* MIPS specific binary operations */
3127 uint64_t helper_float_recip2_d(uint64_t fdt0
, uint64_t fdt2
)
3129 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
3130 fdt2
= float64_mul(fdt0
, fdt2
, &env
->active_fpu
.fp_status
);
3131 fdt2
= float64_chs(float64_sub(fdt2
, FLOAT_ONE64
, &env
->active_fpu
.fp_status
));
3136 uint32_t helper_float_recip2_s(uint32_t fst0
, uint32_t fst2
)
3138 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
3139 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
3140 fst2
= float32_chs(float32_sub(fst2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
));
3145 uint64_t helper_float_recip2_ps(uint64_t fdt0
, uint64_t fdt2
)
3147 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
3148 uint32_t fsth0
= fdt0
>> 32;
3149 uint32_t fst2
= fdt2
& 0XFFFFFFFF;
3150 uint32_t fsth2
= fdt2
>> 32;
3152 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
3153 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
3154 fsth2
= float32_mul(fsth0
, fsth2
, &env
->active_fpu
.fp_status
);
3155 fst2
= float32_chs(float32_sub(fst2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
));
3156 fsth2
= float32_chs(float32_sub(fsth2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
));
3158 return ((uint64_t)fsth2
<< 32) | fst2
;
3161 uint64_t helper_float_rsqrt2_d(uint64_t fdt0
, uint64_t fdt2
)
3163 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
3164 fdt2
= float64_mul(fdt0
, fdt2
, &env
->active_fpu
.fp_status
);
3165 fdt2
= float64_sub(fdt2
, FLOAT_ONE64
, &env
->active_fpu
.fp_status
);
3166 fdt2
= float64_chs(float64_div(fdt2
, FLOAT_TWO64
, &env
->active_fpu
.fp_status
));
3171 uint32_t helper_float_rsqrt2_s(uint32_t fst0
, uint32_t fst2
)
3173 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
3174 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
3175 fst2
= float32_sub(fst2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
);
3176 fst2
= float32_chs(float32_div(fst2
, FLOAT_TWO32
, &env
->active_fpu
.fp_status
));
3181 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0
, uint64_t fdt2
)
3183 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
3184 uint32_t fsth0
= fdt0
>> 32;
3185 uint32_t fst2
= fdt2
& 0XFFFFFFFF;
3186 uint32_t fsth2
= fdt2
>> 32;
3188 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
3189 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
3190 fsth2
= float32_mul(fsth0
, fsth2
, &env
->active_fpu
.fp_status
);
3191 fst2
= float32_sub(fst2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
);
3192 fsth2
= float32_sub(fsth2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
);
3193 fst2
= float32_chs(float32_div(fst2
, FLOAT_TWO32
, &env
->active_fpu
.fp_status
));
3194 fsth2
= float32_chs(float32_div(fsth2
, FLOAT_TWO32
, &env
->active_fpu
.fp_status
));
3196 return ((uint64_t)fsth2
<< 32) | fst2
;
3199 uint64_t helper_float_addr_ps(uint64_t fdt0
, uint64_t fdt1
)
3201 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
3202 uint32_t fsth0
= fdt0
>> 32;
3203 uint32_t fst1
= fdt1
& 0XFFFFFFFF;
3204 uint32_t fsth1
= fdt1
>> 32;
3208 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
3209 fst2
= float32_add (fst0
, fsth0
, &env
->active_fpu
.fp_status
);
3210 fsth2
= float32_add (fst1
, fsth1
, &env
->active_fpu
.fp_status
);
3212 return ((uint64_t)fsth2
<< 32) | fst2
;
3215 uint64_t helper_float_mulr_ps(uint64_t fdt0
, uint64_t fdt1
)
3217 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
3218 uint32_t fsth0
= fdt0
>> 32;
3219 uint32_t fst1
= fdt1
& 0XFFFFFFFF;
3220 uint32_t fsth1
= fdt1
>> 32;
3224 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
3225 fst2
= float32_mul (fst0
, fsth0
, &env
->active_fpu
.fp_status
);
3226 fsth2
= float32_mul (fst1
, fsth1
, &env
->active_fpu
.fp_status
);
3228 return ((uint64_t)fsth2
<< 32) | fst2
;
3231 /* compare operations */
3232 #define FOP_COND_D(op, cond) \
3233 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3236 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3240 SET_FP_COND(cc, env->active_fpu); \
3242 CLEAR_FP_COND(cc, env->active_fpu); \
3244 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3247 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3248 fdt0 = float64_abs(fdt0); \
3249 fdt1 = float64_abs(fdt1); \
3253 SET_FP_COND(cc, env->active_fpu); \
3255 CLEAR_FP_COND(cc, env->active_fpu); \
3258 /* NOTE: the comma operator will make "cond" to eval to false,
3259 * but float64_unordered_quiet() is still called. */
3260 FOP_COND_D(f
, (float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
), 0))
3261 FOP_COND_D(un
, float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
))
3262 FOP_COND_D(eq
, float64_eq_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3263 FOP_COND_D(ueq
, float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_eq_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3264 FOP_COND_D(olt
, float64_lt_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3265 FOP_COND_D(ult
, float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_lt_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3266 FOP_COND_D(ole
, float64_le_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3267 FOP_COND_D(ule
, float64_unordered_quiet(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_le_quiet(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3268 /* NOTE: the comma operator will make "cond" to eval to false,
3269 * but float64_unordered() is still called. */
3270 FOP_COND_D(sf
, (float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
), 0))
3271 FOP_COND_D(ngle
,float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
))
3272 FOP_COND_D(seq
, float64_eq(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3273 FOP_COND_D(ngl
, float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_eq(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3274 FOP_COND_D(lt
, float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3275 FOP_COND_D(nge
, float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3276 FOP_COND_D(le
, float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3277 FOP_COND_D(ngt
, float64_unordered(fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
3279 #define FOP_COND_S(op, cond) \
3280 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3283 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3287 SET_FP_COND(cc, env->active_fpu); \
3289 CLEAR_FP_COND(cc, env->active_fpu); \
3291 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
3294 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3295 fst0 = float32_abs(fst0); \
3296 fst1 = float32_abs(fst1); \
3300 SET_FP_COND(cc, env->active_fpu); \
3302 CLEAR_FP_COND(cc, env->active_fpu); \
3305 /* NOTE: the comma operator will make "cond" to eval to false,
3306 * but float32_unordered_quiet() is still called. */
3307 FOP_COND_S(f
, (float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
), 0))
3308 FOP_COND_S(un
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
))
3309 FOP_COND_S(eq
, float32_eq_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3310 FOP_COND_S(ueq
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3311 FOP_COND_S(olt
, float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3312 FOP_COND_S(ult
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3313 FOP_COND_S(ole
, float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3314 FOP_COND_S(ule
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3315 /* NOTE: the comma operator will make "cond" to eval to false,
3316 * but float32_unordered() is still called. */
3317 FOP_COND_S(sf
, (float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
), 0))
3318 FOP_COND_S(ngle
,float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
))
3319 FOP_COND_S(seq
, float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3320 FOP_COND_S(ngl
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3321 FOP_COND_S(lt
, float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3322 FOP_COND_S(nge
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3323 FOP_COND_S(le
, float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3324 FOP_COND_S(ngt
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
))
3326 #define FOP_COND_PS(op, condl, condh) \
3327 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3329 uint32_t fst0, fsth0, fst1, fsth1; \
3331 set_float_exception_flags(0, &env->active_fpu.fp_status); \
3332 fst0 = fdt0 & 0XFFFFFFFF; \
3333 fsth0 = fdt0 >> 32; \
3334 fst1 = fdt1 & 0XFFFFFFFF; \
3335 fsth1 = fdt1 >> 32; \
3340 SET_FP_COND(cc, env->active_fpu); \
3342 CLEAR_FP_COND(cc, env->active_fpu); \
3344 SET_FP_COND(cc + 1, env->active_fpu); \
3346 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3348 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3350 uint32_t fst0, fsth0, fst1, fsth1; \
3352 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3353 fsth0 = float32_abs(fdt0 >> 32); \
3354 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3355 fsth1 = float32_abs(fdt1 >> 32); \
3360 SET_FP_COND(cc, env->active_fpu); \
3362 CLEAR_FP_COND(cc, env->active_fpu); \
3364 SET_FP_COND(cc + 1, env->active_fpu); \
3366 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3369 /* NOTE: the comma operator will make "cond" to eval to false,
3370 * but float32_unordered_quiet() is still called. */
3371 FOP_COND_PS(f
, (float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
), 0),
3372 (float32_unordered_quiet(fsth1
, fsth0
, &env
->active_fpu
.fp_status
), 0))
3373 FOP_COND_PS(un
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
),
3374 float32_unordered_quiet(fsth1
, fsth0
, &env
->active_fpu
.fp_status
))
3375 FOP_COND_PS(eq
, float32_eq_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3376 float32_eq_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3377 FOP_COND_PS(ueq
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3378 float32_unordered_quiet(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_eq_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3379 FOP_COND_PS(olt
, float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3380 float32_lt_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3381 FOP_COND_PS(ult
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3382 float32_unordered_quiet(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_lt_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3383 FOP_COND_PS(ole
, float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3384 float32_le_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3385 FOP_COND_PS(ule
, float32_unordered_quiet(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le_quiet(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3386 float32_unordered_quiet(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_le_quiet(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3387 /* NOTE: the comma operator will make "cond" to eval to false,
3388 * but float32_unordered() is still called. */
3389 FOP_COND_PS(sf
, (float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
), 0),
3390 (float32_unordered(fsth1
, fsth0
, &env
->active_fpu
.fp_status
), 0))
3391 FOP_COND_PS(ngle
,float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
),
3392 float32_unordered(fsth1
, fsth0
, &env
->active_fpu
.fp_status
))
3393 FOP_COND_PS(seq
, float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3394 float32_eq(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3395 FOP_COND_PS(ngl
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3396 float32_unordered(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_eq(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3397 FOP_COND_PS(lt
, float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3398 float32_lt(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3399 FOP_COND_PS(nge
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3400 float32_unordered(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_lt(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3401 FOP_COND_PS(le
, float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3402 float32_le(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3403 FOP_COND_PS(ngt
, float32_unordered(fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3404 float32_unordered(fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_le(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))