2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "host-utils.h"
26 #ifndef CONFIG_USER_ONLY
27 static inline void cpu_mips_tlb_flush (CPUState
*env
, int flush_global
);
30 /*****************************************************************************/
31 /* Exceptions processing helpers */
33 void helper_raise_exception_err (uint32_t exception
, int error_code
)
36 if (exception
< 0x100)
37 qemu_log("%s: %d %d\n", __func__
, exception
, error_code
);
39 env
->exception_index
= exception
;
40 env
->error_code
= error_code
;
44 void helper_raise_exception (uint32_t exception
)
46 helper_raise_exception_err(exception
, 0);
49 void helper_interrupt_restart (void)
51 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
52 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
53 !(env
->hflags
& MIPS_HFLAG_DM
) &&
54 (env
->CP0_Status
& (1 << CP0St_IE
)) &&
55 (env
->CP0_Status
& env
->CP0_Cause
& CP0Ca_IP_mask
)) {
56 env
->CP0_Cause
&= ~(0x1f << CP0Ca_EC
);
57 helper_raise_exception(EXCP_EXT_INTERRUPT
);
61 #if !defined(CONFIG_USER_ONLY)
62 static void do_restore_state (void *pc_ptr
)
65 unsigned long pc
= (unsigned long) pc_ptr
;
69 cpu_restore_state (tb
, env
, pc
, NULL
);
74 #if defined(CONFIG_USER_ONLY)
75 #define HELPER_LD(name, insn, type) \
76 static inline type do_##name(target_ulong addr, int mem_idx) \
78 return (type) insn##_raw(addr); \
81 #define HELPER_LD(name, insn, type) \
82 static inline type do_##name(target_ulong addr, int mem_idx) \
86 case 0: return (type) insn##_kernel(addr); break; \
87 case 1: return (type) insn##_super(addr); break; \
89 case 2: return (type) insn##_user(addr); break; \
93 HELPER_LD(lbu
, ldub
, uint8_t)
94 HELPER_LD(lw
, ldl
, int32_t)
96 HELPER_LD(ld
, ldq
, int64_t)
100 #if defined(CONFIG_USER_ONLY)
101 #define HELPER_ST(name, insn, type) \
102 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
104 insn##_raw(addr, val); \
107 #define HELPER_ST(name, insn, type) \
108 static inline void do_##name(target_ulong addr, type val, int mem_idx) \
112 case 0: insn##_kernel(addr, val); break; \
113 case 1: insn##_super(addr, val); break; \
115 case 2: insn##_user(addr, val); break; \
119 HELPER_ST(sb
, stb
, uint8_t)
120 HELPER_ST(sw
, stl
, uint32_t)
122 HELPER_ST(sd
, stq
, uint64_t)
126 target_ulong
helper_clo (target_ulong arg1
)
131 target_ulong
helper_clz (target_ulong arg1
)
136 #if defined(TARGET_MIPS64)
137 target_ulong
helper_dclo (target_ulong arg1
)
142 target_ulong
helper_dclz (target_ulong arg1
)
146 #endif /* TARGET_MIPS64 */
148 /* 64 bits arithmetic for 32 bits hosts */
149 static inline uint64_t get_HILO (void)
151 return ((uint64_t)(env
->active_tc
.HI
[0]) << 32) | (uint32_t)env
->active_tc
.LO
[0];
154 static inline void set_HILO (uint64_t HILO
)
156 env
->active_tc
.LO
[0] = (int32_t)HILO
;
157 env
->active_tc
.HI
[0] = (int32_t)(HILO
>> 32);
160 static inline void set_HIT0_LO (target_ulong arg1
, uint64_t HILO
)
162 env
->active_tc
.LO
[0] = (int32_t)(HILO
& 0xFFFFFFFF);
163 arg1
= env
->active_tc
.HI
[0] = (int32_t)(HILO
>> 32);
166 static inline void set_HI_LOT0 (target_ulong arg1
, uint64_t HILO
)
168 arg1
= env
->active_tc
.LO
[0] = (int32_t)(HILO
& 0xFFFFFFFF);
169 env
->active_tc
.HI
[0] = (int32_t)(HILO
>> 32);
172 /* Multiplication variants of the vr54xx. */
173 target_ulong
helper_muls (target_ulong arg1
, target_ulong arg2
)
175 set_HI_LOT0(arg1
, 0 - ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
180 target_ulong
helper_mulsu (target_ulong arg1
, target_ulong arg2
)
182 set_HI_LOT0(arg1
, 0 - ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
187 target_ulong
helper_macc (target_ulong arg1
, target_ulong arg2
)
189 set_HI_LOT0(arg1
, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
194 target_ulong
helper_macchi (target_ulong arg1
, target_ulong arg2
)
196 set_HIT0_LO(arg1
, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
201 target_ulong
helper_maccu (target_ulong arg1
, target_ulong arg2
)
203 set_HI_LOT0(arg1
, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
208 target_ulong
helper_macchiu (target_ulong arg1
, target_ulong arg2
)
210 set_HIT0_LO(arg1
, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
215 target_ulong
helper_msac (target_ulong arg1
, target_ulong arg2
)
217 set_HI_LOT0(arg1
, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
222 target_ulong
helper_msachi (target_ulong arg1
, target_ulong arg2
)
224 set_HIT0_LO(arg1
, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
229 target_ulong
helper_msacu (target_ulong arg1
, target_ulong arg2
)
231 set_HI_LOT0(arg1
, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
236 target_ulong
helper_msachiu (target_ulong arg1
, target_ulong arg2
)
238 set_HIT0_LO(arg1
, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
243 target_ulong
helper_mulhi (target_ulong arg1
, target_ulong arg2
)
245 set_HIT0_LO(arg1
, (int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
);
250 target_ulong
helper_mulhiu (target_ulong arg1
, target_ulong arg2
)
252 set_HIT0_LO(arg1
, (uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
);
257 target_ulong
helper_mulshi (target_ulong arg1
, target_ulong arg2
)
259 set_HIT0_LO(arg1
, 0 - ((int64_t)(int32_t)arg1
* (int64_t)(int32_t)arg2
));
264 target_ulong
helper_mulshiu (target_ulong arg1
, target_ulong arg2
)
266 set_HIT0_LO(arg1
, 0 - ((uint64_t)(uint32_t)arg1
* (uint64_t)(uint32_t)arg2
));
272 void helper_dmult (target_ulong arg1
, target_ulong arg2
)
274 muls64(&(env
->active_tc
.LO
[0]), &(env
->active_tc
.HI
[0]), arg1
, arg2
);
277 void helper_dmultu (target_ulong arg1
, target_ulong arg2
)
279 mulu64(&(env
->active_tc
.LO
[0]), &(env
->active_tc
.HI
[0]), arg1
, arg2
);
283 #ifndef CONFIG_USER_ONLY
285 static inline target_phys_addr_t
do_translate_address(target_ulong address
, int rw
)
287 target_phys_addr_t lladdr
;
289 lladdr
= cpu_mips_translate_address(env
, address
, rw
);
291 if (lladdr
== -1LL) {
298 #define HELPER_LD_ATOMIC(name, insn) \
299 target_ulong helper_##name(target_ulong arg, int mem_idx) \
301 env->lladdr = do_translate_address(arg, 0); \
302 env->llval = do_##insn(arg, mem_idx); \
305 HELPER_LD_ATOMIC(ll
, lw
)
307 HELPER_LD_ATOMIC(lld
, ld
)
309 #undef HELPER_LD_ATOMIC
311 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
312 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
316 if (arg2 & almask) { \
317 env->CP0_BadVAddr = arg2; \
318 helper_raise_exception(EXCP_AdES); \
320 if (do_translate_address(arg2, 1) == env->lladdr) { \
321 tmp = do_##ld_insn(arg2, mem_idx); \
322 if (tmp == env->llval) { \
323 do_##st_insn(arg2, arg1, mem_idx); \
329 HELPER_ST_ATOMIC(sc
, lw
, sw
, 0x3)
331 HELPER_ST_ATOMIC(scd
, ld
, sd
, 0x7)
333 #undef HELPER_ST_ATOMIC
336 #ifdef TARGET_WORDS_BIGENDIAN
337 #define GET_LMASK(v) ((v) & 3)
338 #define GET_OFFSET(addr, offset) (addr + (offset))
340 #define GET_LMASK(v) (((v) & 3) ^ 3)
341 #define GET_OFFSET(addr, offset) (addr - (offset))
344 target_ulong
helper_lwl(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
348 tmp
= do_lbu(arg2
, mem_idx
);
349 arg1
= (arg1
& 0x00FFFFFF) | (tmp
<< 24);
351 if (GET_LMASK(arg2
) <= 2) {
352 tmp
= do_lbu(GET_OFFSET(arg2
, 1), mem_idx
);
353 arg1
= (arg1
& 0xFF00FFFF) | (tmp
<< 16);
356 if (GET_LMASK(arg2
) <= 1) {
357 tmp
= do_lbu(GET_OFFSET(arg2
, 2), mem_idx
);
358 arg1
= (arg1
& 0xFFFF00FF) | (tmp
<< 8);
361 if (GET_LMASK(arg2
) == 0) {
362 tmp
= do_lbu(GET_OFFSET(arg2
, 3), mem_idx
);
363 arg1
= (arg1
& 0xFFFFFF00) | tmp
;
365 return (int32_t)arg1
;
368 target_ulong
helper_lwr(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
372 tmp
= do_lbu(arg2
, mem_idx
);
373 arg1
= (arg1
& 0xFFFFFF00) | tmp
;
375 if (GET_LMASK(arg2
) >= 1) {
376 tmp
= do_lbu(GET_OFFSET(arg2
, -1), mem_idx
);
377 arg1
= (arg1
& 0xFFFF00FF) | (tmp
<< 8);
380 if (GET_LMASK(arg2
) >= 2) {
381 tmp
= do_lbu(GET_OFFSET(arg2
, -2), mem_idx
);
382 arg1
= (arg1
& 0xFF00FFFF) | (tmp
<< 16);
385 if (GET_LMASK(arg2
) == 3) {
386 tmp
= do_lbu(GET_OFFSET(arg2
, -3), mem_idx
);
387 arg1
= (arg1
& 0x00FFFFFF) | (tmp
<< 24);
389 return (int32_t)arg1
;
392 void helper_swl(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
394 do_sb(arg2
, (uint8_t)(arg1
>> 24), mem_idx
);
396 if (GET_LMASK(arg2
) <= 2)
397 do_sb(GET_OFFSET(arg2
, 1), (uint8_t)(arg1
>> 16), mem_idx
);
399 if (GET_LMASK(arg2
) <= 1)
400 do_sb(GET_OFFSET(arg2
, 2), (uint8_t)(arg1
>> 8), mem_idx
);
402 if (GET_LMASK(arg2
) == 0)
403 do_sb(GET_OFFSET(arg2
, 3), (uint8_t)arg1
, mem_idx
);
406 void helper_swr(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
408 do_sb(arg2
, (uint8_t)arg1
, mem_idx
);
410 if (GET_LMASK(arg2
) >= 1)
411 do_sb(GET_OFFSET(arg2
, -1), (uint8_t)(arg1
>> 8), mem_idx
);
413 if (GET_LMASK(arg2
) >= 2)
414 do_sb(GET_OFFSET(arg2
, -2), (uint8_t)(arg1
>> 16), mem_idx
);
416 if (GET_LMASK(arg2
) == 3)
417 do_sb(GET_OFFSET(arg2
, -3), (uint8_t)(arg1
>> 24), mem_idx
);
420 #if defined(TARGET_MIPS64)
421 /* "half" load and stores. We must do the memory access inline,
422 or fault handling won't work. */
424 #ifdef TARGET_WORDS_BIGENDIAN
425 #define GET_LMASK64(v) ((v) & 7)
427 #define GET_LMASK64(v) (((v) & 7) ^ 7)
430 target_ulong
helper_ldl(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
434 tmp
= do_lbu(arg2
, mem_idx
);
435 arg1
= (arg1
& 0x00FFFFFFFFFFFFFFULL
) | (tmp
<< 56);
437 if (GET_LMASK64(arg2
) <= 6) {
438 tmp
= do_lbu(GET_OFFSET(arg2
, 1), mem_idx
);
439 arg1
= (arg1
& 0xFF00FFFFFFFFFFFFULL
) | (tmp
<< 48);
442 if (GET_LMASK64(arg2
) <= 5) {
443 tmp
= do_lbu(GET_OFFSET(arg2
, 2), mem_idx
);
444 arg1
= (arg1
& 0xFFFF00FFFFFFFFFFULL
) | (tmp
<< 40);
447 if (GET_LMASK64(arg2
) <= 4) {
448 tmp
= do_lbu(GET_OFFSET(arg2
, 3), mem_idx
);
449 arg1
= (arg1
& 0xFFFFFF00FFFFFFFFULL
) | (tmp
<< 32);
452 if (GET_LMASK64(arg2
) <= 3) {
453 tmp
= do_lbu(GET_OFFSET(arg2
, 4), mem_idx
);
454 arg1
= (arg1
& 0xFFFFFFFF00FFFFFFULL
) | (tmp
<< 24);
457 if (GET_LMASK64(arg2
) <= 2) {
458 tmp
= do_lbu(GET_OFFSET(arg2
, 5), mem_idx
);
459 arg1
= (arg1
& 0xFFFFFFFFFF00FFFFULL
) | (tmp
<< 16);
462 if (GET_LMASK64(arg2
) <= 1) {
463 tmp
= do_lbu(GET_OFFSET(arg2
, 6), mem_idx
);
464 arg1
= (arg1
& 0xFFFFFFFFFFFF00FFULL
) | (tmp
<< 8);
467 if (GET_LMASK64(arg2
) == 0) {
468 tmp
= do_lbu(GET_OFFSET(arg2
, 7), mem_idx
);
469 arg1
= (arg1
& 0xFFFFFFFFFFFFFF00ULL
) | tmp
;
475 target_ulong
helper_ldr(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
479 tmp
= do_lbu(arg2
, mem_idx
);
480 arg1
= (arg1
& 0xFFFFFFFFFFFFFF00ULL
) | tmp
;
482 if (GET_LMASK64(arg2
) >= 1) {
483 tmp
= do_lbu(GET_OFFSET(arg2
, -1), mem_idx
);
484 arg1
= (arg1
& 0xFFFFFFFFFFFF00FFULL
) | (tmp
<< 8);
487 if (GET_LMASK64(arg2
) >= 2) {
488 tmp
= do_lbu(GET_OFFSET(arg2
, -2), mem_idx
);
489 arg1
= (arg1
& 0xFFFFFFFFFF00FFFFULL
) | (tmp
<< 16);
492 if (GET_LMASK64(arg2
) >= 3) {
493 tmp
= do_lbu(GET_OFFSET(arg2
, -3), mem_idx
);
494 arg1
= (arg1
& 0xFFFFFFFF00FFFFFFULL
) | (tmp
<< 24);
497 if (GET_LMASK64(arg2
) >= 4) {
498 tmp
= do_lbu(GET_OFFSET(arg2
, -4), mem_idx
);
499 arg1
= (arg1
& 0xFFFFFF00FFFFFFFFULL
) | (tmp
<< 32);
502 if (GET_LMASK64(arg2
) >= 5) {
503 tmp
= do_lbu(GET_OFFSET(arg2
, -5), mem_idx
);
504 arg1
= (arg1
& 0xFFFF00FFFFFFFFFFULL
) | (tmp
<< 40);
507 if (GET_LMASK64(arg2
) >= 6) {
508 tmp
= do_lbu(GET_OFFSET(arg2
, -6), mem_idx
);
509 arg1
= (arg1
& 0xFF00FFFFFFFFFFFFULL
) | (tmp
<< 48);
512 if (GET_LMASK64(arg2
) == 7) {
513 tmp
= do_lbu(GET_OFFSET(arg2
, -7), mem_idx
);
514 arg1
= (arg1
& 0x00FFFFFFFFFFFFFFULL
) | (tmp
<< 56);
520 void helper_sdl(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
522 do_sb(arg2
, (uint8_t)(arg1
>> 56), mem_idx
);
524 if (GET_LMASK64(arg2
) <= 6)
525 do_sb(GET_OFFSET(arg2
, 1), (uint8_t)(arg1
>> 48), mem_idx
);
527 if (GET_LMASK64(arg2
) <= 5)
528 do_sb(GET_OFFSET(arg2
, 2), (uint8_t)(arg1
>> 40), mem_idx
);
530 if (GET_LMASK64(arg2
) <= 4)
531 do_sb(GET_OFFSET(arg2
, 3), (uint8_t)(arg1
>> 32), mem_idx
);
533 if (GET_LMASK64(arg2
) <= 3)
534 do_sb(GET_OFFSET(arg2
, 4), (uint8_t)(arg1
>> 24), mem_idx
);
536 if (GET_LMASK64(arg2
) <= 2)
537 do_sb(GET_OFFSET(arg2
, 5), (uint8_t)(arg1
>> 16), mem_idx
);
539 if (GET_LMASK64(arg2
) <= 1)
540 do_sb(GET_OFFSET(arg2
, 6), (uint8_t)(arg1
>> 8), mem_idx
);
542 if (GET_LMASK64(arg2
) <= 0)
543 do_sb(GET_OFFSET(arg2
, 7), (uint8_t)arg1
, mem_idx
);
546 void helper_sdr(target_ulong arg1
, target_ulong arg2
, int mem_idx
)
548 do_sb(arg2
, (uint8_t)arg1
, mem_idx
);
550 if (GET_LMASK64(arg2
) >= 1)
551 do_sb(GET_OFFSET(arg2
, -1), (uint8_t)(arg1
>> 8), mem_idx
);
553 if (GET_LMASK64(arg2
) >= 2)
554 do_sb(GET_OFFSET(arg2
, -2), (uint8_t)(arg1
>> 16), mem_idx
);
556 if (GET_LMASK64(arg2
) >= 3)
557 do_sb(GET_OFFSET(arg2
, -3), (uint8_t)(arg1
>> 24), mem_idx
);
559 if (GET_LMASK64(arg2
) >= 4)
560 do_sb(GET_OFFSET(arg2
, -4), (uint8_t)(arg1
>> 32), mem_idx
);
562 if (GET_LMASK64(arg2
) >= 5)
563 do_sb(GET_OFFSET(arg2
, -5), (uint8_t)(arg1
>> 40), mem_idx
);
565 if (GET_LMASK64(arg2
) >= 6)
566 do_sb(GET_OFFSET(arg2
, -6), (uint8_t)(arg1
>> 48), mem_idx
);
568 if (GET_LMASK64(arg2
) == 7)
569 do_sb(GET_OFFSET(arg2
, -7), (uint8_t)(arg1
>> 56), mem_idx
);
571 #endif /* TARGET_MIPS64 */
573 static const int multiple_regs
[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
575 void helper_lwm (target_ulong addr
, target_ulong reglist
, uint32_t mem_idx
)
577 target_ulong base_reglist
= reglist
& 0xf;
578 target_ulong do_r31
= reglist
& 0x10;
579 #ifdef CONFIG_USER_ONLY
581 #define ldfun ldl_raw
583 uint32_t (*ldfun
)(target_ulong
);
587 case 0: ldfun
= ldl_kernel
; break;
588 case 1: ldfun
= ldl_super
; break;
590 case 2: ldfun
= ldl_user
; break;
594 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
597 for (i
= 0; i
< base_reglist
; i
++) {
598 env
->active_tc
.gpr
[multiple_regs
[i
]] = (target_long
) ldfun(addr
);
604 env
->active_tc
.gpr
[31] = (target_long
) ldfun(addr
);
608 void helper_swm (target_ulong addr
, target_ulong reglist
, uint32_t mem_idx
)
610 target_ulong base_reglist
= reglist
& 0xf;
611 target_ulong do_r31
= reglist
& 0x10;
612 #ifdef CONFIG_USER_ONLY
614 #define stfun stl_raw
616 void (*stfun
)(target_ulong
, uint32_t);
620 case 0: stfun
= stl_kernel
; break;
621 case 1: stfun
= stl_super
; break;
623 case 2: stfun
= stl_user
; break;
627 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
630 for (i
= 0; i
< base_reglist
; i
++) {
631 stfun(addr
, env
->active_tc
.gpr
[multiple_regs
[i
]]);
637 stfun(addr
, env
->active_tc
.gpr
[31]);
641 #if defined(TARGET_MIPS64)
642 void helper_ldm (target_ulong addr
, target_ulong reglist
, uint32_t mem_idx
)
644 target_ulong base_reglist
= reglist
& 0xf;
645 target_ulong do_r31
= reglist
& 0x10;
646 #ifdef CONFIG_USER_ONLY
648 #define ldfun ldq_raw
650 uint64_t (*ldfun
)(target_ulong
);
654 case 0: ldfun
= ldq_kernel
; break;
655 case 1: ldfun
= ldq_super
; break;
657 case 2: ldfun
= ldq_user
; break;
661 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
664 for (i
= 0; i
< base_reglist
; i
++) {
665 env
->active_tc
.gpr
[multiple_regs
[i
]] = ldfun(addr
);
671 env
->active_tc
.gpr
[31] = ldfun(addr
);
675 void helper_sdm (target_ulong addr
, target_ulong reglist
, uint32_t mem_idx
)
677 target_ulong base_reglist
= reglist
& 0xf;
678 target_ulong do_r31
= reglist
& 0x10;
679 #ifdef CONFIG_USER_ONLY
681 #define stfun stq_raw
683 void (*stfun
)(target_ulong
, uint64_t);
687 case 0: stfun
= stq_kernel
; break;
688 case 1: stfun
= stq_super
; break;
690 case 2: stfun
= stq_user
; break;
694 if (base_reglist
> 0 && base_reglist
<= ARRAY_SIZE (multiple_regs
)) {
697 for (i
= 0; i
< base_reglist
; i
++) {
698 stfun(addr
, env
->active_tc
.gpr
[multiple_regs
[i
]]);
704 stfun(addr
, env
->active_tc
.gpr
[31]);
709 #ifndef CONFIG_USER_ONLY
711 target_ulong
helper_mfc0_mvpcontrol (void)
713 return env
->mvp
->CP0_MVPControl
;
716 target_ulong
helper_mfc0_mvpconf0 (void)
718 return env
->mvp
->CP0_MVPConf0
;
721 target_ulong
helper_mfc0_mvpconf1 (void)
723 return env
->mvp
->CP0_MVPConf1
;
726 target_ulong
helper_mfc0_random (void)
728 return (int32_t)cpu_mips_get_random(env
);
731 target_ulong
helper_mfc0_tcstatus (void)
733 return env
->active_tc
.CP0_TCStatus
;
736 target_ulong
helper_mftc0_tcstatus(void)
738 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
740 if (other_tc
== env
->current_tc
)
741 return env
->active_tc
.CP0_TCStatus
;
743 return env
->tcs
[other_tc
].CP0_TCStatus
;
746 target_ulong
helper_mfc0_tcbind (void)
748 return env
->active_tc
.CP0_TCBind
;
751 target_ulong
helper_mftc0_tcbind(void)
753 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
755 if (other_tc
== env
->current_tc
)
756 return env
->active_tc
.CP0_TCBind
;
758 return env
->tcs
[other_tc
].CP0_TCBind
;
761 target_ulong
helper_mfc0_tcrestart (void)
763 return env
->active_tc
.PC
;
766 target_ulong
helper_mftc0_tcrestart(void)
768 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
770 if (other_tc
== env
->current_tc
)
771 return env
->active_tc
.PC
;
773 return env
->tcs
[other_tc
].PC
;
776 target_ulong
helper_mfc0_tchalt (void)
778 return env
->active_tc
.CP0_TCHalt
;
781 target_ulong
helper_mftc0_tchalt(void)
783 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
785 if (other_tc
== env
->current_tc
)
786 return env
->active_tc
.CP0_TCHalt
;
788 return env
->tcs
[other_tc
].CP0_TCHalt
;
791 target_ulong
helper_mfc0_tccontext (void)
793 return env
->active_tc
.CP0_TCContext
;
796 target_ulong
helper_mftc0_tccontext(void)
798 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
800 if (other_tc
== env
->current_tc
)
801 return env
->active_tc
.CP0_TCContext
;
803 return env
->tcs
[other_tc
].CP0_TCContext
;
806 target_ulong
helper_mfc0_tcschedule (void)
808 return env
->active_tc
.CP0_TCSchedule
;
811 target_ulong
helper_mftc0_tcschedule(void)
813 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
815 if (other_tc
== env
->current_tc
)
816 return env
->active_tc
.CP0_TCSchedule
;
818 return env
->tcs
[other_tc
].CP0_TCSchedule
;
821 target_ulong
helper_mfc0_tcschefback (void)
823 return env
->active_tc
.CP0_TCScheFBack
;
826 target_ulong
helper_mftc0_tcschefback(void)
828 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
830 if (other_tc
== env
->current_tc
)
831 return env
->active_tc
.CP0_TCScheFBack
;
833 return env
->tcs
[other_tc
].CP0_TCScheFBack
;
836 target_ulong
helper_mfc0_count (void)
838 return (int32_t)cpu_mips_get_count(env
);
841 target_ulong
helper_mftc0_entryhi(void)
843 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
846 if (other_tc
== env
->current_tc
)
847 tcstatus
= env
->active_tc
.CP0_TCStatus
;
849 tcstatus
= env
->tcs
[other_tc
].CP0_TCStatus
;
851 return (env
->CP0_EntryHi
& ~0xff) | (tcstatus
& 0xff);
854 target_ulong
helper_mftc0_status(void)
856 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
860 if (other_tc
== env
->current_tc
)
861 tcstatus
= env
->active_tc
.CP0_TCStatus
;
863 tcstatus
= env
->tcs
[other_tc
].CP0_TCStatus
;
865 t0
= env
->CP0_Status
& ~0xf1000018;
866 t0
|= tcstatus
& (0xf << CP0TCSt_TCU0
);
867 t0
|= (tcstatus
& (1 << CP0TCSt_TMX
)) >> (CP0TCSt_TMX
- CP0St_MX
);
868 t0
|= (tcstatus
& (0x3 << CP0TCSt_TKSU
)) >> (CP0TCSt_TKSU
- CP0St_KSU
);
873 target_ulong
helper_mfc0_lladdr (void)
875 return (int32_t)(env
->lladdr
>> env
->CP0_LLAddr_shift
);
878 target_ulong
helper_mfc0_watchlo (uint32_t sel
)
880 return (int32_t)env
->CP0_WatchLo
[sel
];
883 target_ulong
helper_mfc0_watchhi (uint32_t sel
)
885 return env
->CP0_WatchHi
[sel
];
888 target_ulong
helper_mfc0_debug (void)
890 target_ulong t0
= env
->CP0_Debug
;
891 if (env
->hflags
& MIPS_HFLAG_DM
)
897 target_ulong
helper_mftc0_debug(void)
899 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
902 if (other_tc
== env
->current_tc
)
903 tcstatus
= env
->active_tc
.CP0_Debug_tcstatus
;
905 tcstatus
= env
->tcs
[other_tc
].CP0_Debug_tcstatus
;
907 /* XXX: Might be wrong, check with EJTAG spec. */
908 return (env
->CP0_Debug
& ~((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
))) |
909 (tcstatus
& ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
)));
912 #if defined(TARGET_MIPS64)
913 target_ulong
helper_dmfc0_tcrestart (void)
915 return env
->active_tc
.PC
;
918 target_ulong
helper_dmfc0_tchalt (void)
920 return env
->active_tc
.CP0_TCHalt
;
923 target_ulong
helper_dmfc0_tccontext (void)
925 return env
->active_tc
.CP0_TCContext
;
928 target_ulong
helper_dmfc0_tcschedule (void)
930 return env
->active_tc
.CP0_TCSchedule
;
933 target_ulong
helper_dmfc0_tcschefback (void)
935 return env
->active_tc
.CP0_TCScheFBack
;
938 target_ulong
helper_dmfc0_lladdr (void)
940 return env
->lladdr
>> env
->CP0_LLAddr_shift
;
943 target_ulong
helper_dmfc0_watchlo (uint32_t sel
)
945 return env
->CP0_WatchLo
[sel
];
947 #endif /* TARGET_MIPS64 */
949 void helper_mtc0_index (target_ulong arg1
)
952 unsigned int tmp
= env
->tlb
->nb_tlb
;
958 env
->CP0_Index
= (env
->CP0_Index
& 0x80000000) | (arg1
& (num
- 1));
961 void helper_mtc0_mvpcontrol (target_ulong arg1
)
966 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
))
967 mask
|= (1 << CP0MVPCo_CPA
) | (1 << CP0MVPCo_VPC
) |
969 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
970 mask
|= (1 << CP0MVPCo_STLB
);
971 newval
= (env
->mvp
->CP0_MVPControl
& ~mask
) | (arg1
& mask
);
973 // TODO: Enable/disable shared TLB, enable/disable VPEs.
975 env
->mvp
->CP0_MVPControl
= newval
;
978 void helper_mtc0_vpecontrol (target_ulong arg1
)
983 mask
= (1 << CP0VPECo_YSI
) | (1 << CP0VPECo_GSI
) |
984 (1 << CP0VPECo_TE
) | (0xff << CP0VPECo_TargTC
);
985 newval
= (env
->CP0_VPEControl
& ~mask
) | (arg1
& mask
);
987 /* Yield scheduler intercept not implemented. */
988 /* Gating storage scheduler intercept not implemented. */
990 // TODO: Enable/disable TCs.
992 env
->CP0_VPEControl
= newval
;
995 void helper_mtc0_vpeconf0 (target_ulong arg1
)
1000 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_MVP
)) {
1001 if (env
->CP0_VPEConf0
& (1 << CP0VPEC0_VPA
))
1002 mask
|= (0xff << CP0VPEC0_XTC
);
1003 mask
|= (1 << CP0VPEC0_MVP
) | (1 << CP0VPEC0_VPA
);
1005 newval
= (env
->CP0_VPEConf0
& ~mask
) | (arg1
& mask
);
1007 // TODO: TC exclusive handling due to ERL/EXL.
1009 env
->CP0_VPEConf0
= newval
;
1012 void helper_mtc0_vpeconf1 (target_ulong arg1
)
1017 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1018 mask
|= (0xff << CP0VPEC1_NCX
) | (0xff << CP0VPEC1_NCP2
) |
1019 (0xff << CP0VPEC1_NCP1
);
1020 newval
= (env
->CP0_VPEConf1
& ~mask
) | (arg1
& mask
);
1022 /* UDI not implemented. */
1023 /* CP2 not implemented. */
1025 // TODO: Handle FPU (CP1) binding.
1027 env
->CP0_VPEConf1
= newval
;
1030 void helper_mtc0_yqmask (target_ulong arg1
)
1032 /* Yield qualifier inputs not implemented. */
1033 env
->CP0_YQMask
= 0x00000000;
1036 void helper_mtc0_vpeopt (target_ulong arg1
)
1038 env
->CP0_VPEOpt
= arg1
& 0x0000ffff;
1041 void helper_mtc0_entrylo0 (target_ulong arg1
)
1043 /* Large physaddr (PABITS) not implemented */
1044 /* 1k pages not implemented */
1045 env
->CP0_EntryLo0
= arg1
& 0x3FFFFFFF;
1048 void helper_mtc0_tcstatus (target_ulong arg1
)
1050 uint32_t mask
= env
->CP0_TCStatus_rw_bitmask
;
1053 newval
= (env
->active_tc
.CP0_TCStatus
& ~mask
) | (arg1
& mask
);
1055 // TODO: Sync with CP0_Status.
1057 env
->active_tc
.CP0_TCStatus
= newval
;
1060 void helper_mttc0_tcstatus (target_ulong arg1
)
1062 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1064 // TODO: Sync with CP0_Status.
1066 if (other_tc
== env
->current_tc
)
1067 env
->active_tc
.CP0_TCStatus
= arg1
;
1069 env
->tcs
[other_tc
].CP0_TCStatus
= arg1
;
1072 void helper_mtc0_tcbind (target_ulong arg1
)
1074 uint32_t mask
= (1 << CP0TCBd_TBE
);
1077 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1078 mask
|= (1 << CP0TCBd_CurVPE
);
1079 newval
= (env
->active_tc
.CP0_TCBind
& ~mask
) | (arg1
& mask
);
1080 env
->active_tc
.CP0_TCBind
= newval
;
1083 void helper_mttc0_tcbind (target_ulong arg1
)
1085 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1086 uint32_t mask
= (1 << CP0TCBd_TBE
);
1089 if (env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_VPC
))
1090 mask
|= (1 << CP0TCBd_CurVPE
);
1091 if (other_tc
== env
->current_tc
) {
1092 newval
= (env
->active_tc
.CP0_TCBind
& ~mask
) | (arg1
& mask
);
1093 env
->active_tc
.CP0_TCBind
= newval
;
1095 newval
= (env
->tcs
[other_tc
].CP0_TCBind
& ~mask
) | (arg1
& mask
);
1096 env
->tcs
[other_tc
].CP0_TCBind
= newval
;
1100 void helper_mtc0_tcrestart (target_ulong arg1
)
1102 env
->active_tc
.PC
= arg1
;
1103 env
->active_tc
.CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1105 /* MIPS16 not implemented. */
1108 void helper_mttc0_tcrestart (target_ulong arg1
)
1110 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1112 if (other_tc
== env
->current_tc
) {
1113 env
->active_tc
.PC
= arg1
;
1114 env
->active_tc
.CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1116 /* MIPS16 not implemented. */
1118 env
->tcs
[other_tc
].PC
= arg1
;
1119 env
->tcs
[other_tc
].CP0_TCStatus
&= ~(1 << CP0TCSt_TDS
);
1121 /* MIPS16 not implemented. */
1125 void helper_mtc0_tchalt (target_ulong arg1
)
1127 env
->active_tc
.CP0_TCHalt
= arg1
& 0x1;
1129 // TODO: Halt TC / Restart (if allocated+active) TC.
1132 void helper_mttc0_tchalt (target_ulong arg1
)
1134 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1136 // TODO: Halt TC / Restart (if allocated+active) TC.
1138 if (other_tc
== env
->current_tc
)
1139 env
->active_tc
.CP0_TCHalt
= arg1
;
1141 env
->tcs
[other_tc
].CP0_TCHalt
= arg1
;
1144 void helper_mtc0_tccontext (target_ulong arg1
)
1146 env
->active_tc
.CP0_TCContext
= arg1
;
1149 void helper_mttc0_tccontext (target_ulong arg1
)
1151 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1153 if (other_tc
== env
->current_tc
)
1154 env
->active_tc
.CP0_TCContext
= arg1
;
1156 env
->tcs
[other_tc
].CP0_TCContext
= arg1
;
1159 void helper_mtc0_tcschedule (target_ulong arg1
)
1161 env
->active_tc
.CP0_TCSchedule
= arg1
;
1164 void helper_mttc0_tcschedule (target_ulong arg1
)
1166 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1168 if (other_tc
== env
->current_tc
)
1169 env
->active_tc
.CP0_TCSchedule
= arg1
;
1171 env
->tcs
[other_tc
].CP0_TCSchedule
= arg1
;
1174 void helper_mtc0_tcschefback (target_ulong arg1
)
1176 env
->active_tc
.CP0_TCScheFBack
= arg1
;
1179 void helper_mttc0_tcschefback (target_ulong arg1
)
1181 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1183 if (other_tc
== env
->current_tc
)
1184 env
->active_tc
.CP0_TCScheFBack
= arg1
;
1186 env
->tcs
[other_tc
].CP0_TCScheFBack
= arg1
;
1189 void helper_mtc0_entrylo1 (target_ulong arg1
)
1191 /* Large physaddr (PABITS) not implemented */
1192 /* 1k pages not implemented */
1193 env
->CP0_EntryLo1
= arg1
& 0x3FFFFFFF;
1196 void helper_mtc0_context (target_ulong arg1
)
1198 env
->CP0_Context
= (env
->CP0_Context
& 0x007FFFFF) | (arg1
& ~0x007FFFFF);
1201 void helper_mtc0_pagemask (target_ulong arg1
)
1203 /* 1k pages not implemented */
1204 env
->CP0_PageMask
= arg1
& (0x1FFFFFFF & (TARGET_PAGE_MASK
<< 1));
1207 void helper_mtc0_pagegrain (target_ulong arg1
)
1209 /* SmartMIPS not implemented */
1210 /* Large physaddr (PABITS) not implemented */
1211 /* 1k pages not implemented */
1212 env
->CP0_PageGrain
= 0;
1215 void helper_mtc0_wired (target_ulong arg1
)
1217 env
->CP0_Wired
= arg1
% env
->tlb
->nb_tlb
;
1220 void helper_mtc0_srsconf0 (target_ulong arg1
)
1222 env
->CP0_SRSConf0
|= arg1
& env
->CP0_SRSConf0_rw_bitmask
;
1225 void helper_mtc0_srsconf1 (target_ulong arg1
)
1227 env
->CP0_SRSConf1
|= arg1
& env
->CP0_SRSConf1_rw_bitmask
;
1230 void helper_mtc0_srsconf2 (target_ulong arg1
)
1232 env
->CP0_SRSConf2
|= arg1
& env
->CP0_SRSConf2_rw_bitmask
;
1235 void helper_mtc0_srsconf3 (target_ulong arg1
)
1237 env
->CP0_SRSConf3
|= arg1
& env
->CP0_SRSConf3_rw_bitmask
;
1240 void helper_mtc0_srsconf4 (target_ulong arg1
)
1242 env
->CP0_SRSConf4
|= arg1
& env
->CP0_SRSConf4_rw_bitmask
;
1245 void helper_mtc0_hwrena (target_ulong arg1
)
1247 env
->CP0_HWREna
= arg1
& 0x0000000F;
1250 void helper_mtc0_count (target_ulong arg1
)
1252 cpu_mips_store_count(env
, arg1
);
1255 void helper_mtc0_entryhi (target_ulong arg1
)
1257 target_ulong old
, val
;
1259 /* 1k pages not implemented */
1260 val
= arg1
& ((TARGET_PAGE_MASK
<< 1) | 0xFF);
1261 #if defined(TARGET_MIPS64)
1262 val
&= env
->SEGMask
;
1264 old
= env
->CP0_EntryHi
;
1265 env
->CP0_EntryHi
= val
;
1266 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
1267 uint32_t tcst
= env
->active_tc
.CP0_TCStatus
& ~0xff;
1268 env
->active_tc
.CP0_TCStatus
= tcst
| (val
& 0xff);
1270 /* If the ASID changes, flush qemu's TLB. */
1271 if ((old
& 0xFF) != (val
& 0xFF))
1272 cpu_mips_tlb_flush(env
, 1);
1275 void helper_mttc0_entryhi(target_ulong arg1
)
1277 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1280 env
->CP0_EntryHi
= (env
->CP0_EntryHi
& 0xff) | (arg1
& ~0xff);
1281 if (other_tc
== env
->current_tc
) {
1282 tcstatus
= (env
->active_tc
.CP0_TCStatus
& ~0xff) | (arg1
& 0xff);
1283 env
->active_tc
.CP0_TCStatus
= tcstatus
;
1285 tcstatus
= (env
->tcs
[other_tc
].CP0_TCStatus
& ~0xff) | (arg1
& 0xff);
1286 env
->tcs
[other_tc
].CP0_TCStatus
= tcstatus
;
1290 void helper_mtc0_compare (target_ulong arg1
)
1292 cpu_mips_store_compare(env
, arg1
);
1295 void helper_mtc0_status (target_ulong arg1
)
1298 uint32_t mask
= env
->CP0_Status_rw_bitmask
;
1301 old
= env
->CP0_Status
;
1302 env
->CP0_Status
= (env
->CP0_Status
& ~mask
) | val
;
1303 compute_hflags(env
);
1304 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
1305 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1306 old
, old
& env
->CP0_Cause
& CP0Ca_IP_mask
,
1307 val
, val
& env
->CP0_Cause
& CP0Ca_IP_mask
,
1309 switch (env
->hflags
& MIPS_HFLAG_KSU
) {
1310 case MIPS_HFLAG_UM
: qemu_log(", UM\n"); break;
1311 case MIPS_HFLAG_SM
: qemu_log(", SM\n"); break;
1312 case MIPS_HFLAG_KM
: qemu_log("\n"); break;
1313 default: cpu_abort(env
, "Invalid MMU mode!\n"); break;
1316 cpu_mips_update_irq(env
);
1319 void helper_mttc0_status(target_ulong arg1
)
1321 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1322 int32_t tcstatus
= env
->tcs
[other_tc
].CP0_TCStatus
;
1324 env
->CP0_Status
= arg1
& ~0xf1000018;
1325 tcstatus
= (tcstatus
& ~(0xf << CP0TCSt_TCU0
)) | (arg1
& (0xf << CP0St_CU0
));
1326 tcstatus
= (tcstatus
& ~(1 << CP0TCSt_TMX
)) | ((arg1
& (1 << CP0St_MX
)) << (CP0TCSt_TMX
- CP0St_MX
));
1327 tcstatus
= (tcstatus
& ~(0x3 << CP0TCSt_TKSU
)) | ((arg1
& (0x3 << CP0St_KSU
)) << (CP0TCSt_TKSU
- CP0St_KSU
));
1328 if (other_tc
== env
->current_tc
)
1329 env
->active_tc
.CP0_TCStatus
= tcstatus
;
1331 env
->tcs
[other_tc
].CP0_TCStatus
= tcstatus
;
1334 void helper_mtc0_intctl (target_ulong arg1
)
1336 /* vectored interrupts not implemented, no performance counters. */
1337 env
->CP0_IntCtl
= (env
->CP0_IntCtl
& ~0x000002e0) | (arg1
& 0x000002e0);
1340 void helper_mtc0_srsctl (target_ulong arg1
)
1342 uint32_t mask
= (0xf << CP0SRSCtl_ESS
) | (0xf << CP0SRSCtl_PSS
);
1343 env
->CP0_SRSCtl
= (env
->CP0_SRSCtl
& ~mask
) | (arg1
& mask
);
1346 void helper_mtc0_cause (target_ulong arg1
)
1348 uint32_t mask
= 0x00C00300;
1349 uint32_t old
= env
->CP0_Cause
;
1351 if (env
->insn_flags
& ISA_MIPS32R2
)
1352 mask
|= 1 << CP0Ca_DC
;
1354 env
->CP0_Cause
= (env
->CP0_Cause
& ~mask
) | (arg1
& mask
);
1356 if ((old
^ env
->CP0_Cause
) & (1 << CP0Ca_DC
)) {
1357 if (env
->CP0_Cause
& (1 << CP0Ca_DC
))
1358 cpu_mips_stop_count(env
);
1360 cpu_mips_start_count(env
);
1363 /* Handle the software interrupt as an hardware one, as they
1365 if (arg1
& CP0Ca_IP_mask
) {
1366 cpu_mips_update_irq(env
);
1370 void helper_mtc0_ebase (target_ulong arg1
)
1372 /* vectored interrupts not implemented */
1373 /* Multi-CPU not implemented */
1374 env
->CP0_EBase
= 0x80000000 | (arg1
& 0x3FFFF000);
1377 void helper_mtc0_config0 (target_ulong arg1
)
1379 env
->CP0_Config0
= (env
->CP0_Config0
& 0x81FFFFF8) | (arg1
& 0x00000007);
1382 void helper_mtc0_config2 (target_ulong arg1
)
1384 /* tertiary/secondary caches not implemented */
1385 env
->CP0_Config2
= (env
->CP0_Config2
& 0x8FFF0FFF);
1388 void helper_mtc0_lladdr (target_ulong arg1
)
1390 target_long mask
= env
->CP0_LLAddr_rw_bitmask
;
1391 arg1
= arg1
<< env
->CP0_LLAddr_shift
;
1392 env
->lladdr
= (env
->lladdr
& ~mask
) | (arg1
& mask
);
1395 void helper_mtc0_watchlo (target_ulong arg1
, uint32_t sel
)
1397 /* Watch exceptions for instructions, data loads, data stores
1399 env
->CP0_WatchLo
[sel
] = (arg1
& ~0x7);
1402 void helper_mtc0_watchhi (target_ulong arg1
, uint32_t sel
)
1404 env
->CP0_WatchHi
[sel
] = (arg1
& 0x40FF0FF8);
1405 env
->CP0_WatchHi
[sel
] &= ~(env
->CP0_WatchHi
[sel
] & arg1
& 0x7);
1408 void helper_mtc0_xcontext (target_ulong arg1
)
1410 target_ulong mask
= (1ULL << (env
->SEGBITS
- 7)) - 1;
1411 env
->CP0_XContext
= (env
->CP0_XContext
& mask
) | (arg1
& ~mask
);
1414 void helper_mtc0_framemask (target_ulong arg1
)
1416 env
->CP0_Framemask
= arg1
; /* XXX */
1419 void helper_mtc0_debug (target_ulong arg1
)
1421 env
->CP0_Debug
= (env
->CP0_Debug
& 0x8C03FC1F) | (arg1
& 0x13300120);
1422 if (arg1
& (1 << CP0DB_DM
))
1423 env
->hflags
|= MIPS_HFLAG_DM
;
1425 env
->hflags
&= ~MIPS_HFLAG_DM
;
1428 void helper_mttc0_debug(target_ulong arg1
)
1430 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1431 uint32_t val
= arg1
& ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
));
1433 /* XXX: Might be wrong, check with EJTAG spec. */
1434 if (other_tc
== env
->current_tc
)
1435 env
->active_tc
.CP0_Debug_tcstatus
= val
;
1437 env
->tcs
[other_tc
].CP0_Debug_tcstatus
= val
;
1438 env
->CP0_Debug
= (env
->CP0_Debug
& ((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
))) |
1439 (arg1
& ~((1 << CP0DB_SSt
) | (1 << CP0DB_Halt
)));
1442 void helper_mtc0_performance0 (target_ulong arg1
)
1444 env
->CP0_Performance0
= arg1
& 0x000007ff;
1447 void helper_mtc0_taglo (target_ulong arg1
)
1449 env
->CP0_TagLo
= arg1
& 0xFFFFFCF6;
1452 void helper_mtc0_datalo (target_ulong arg1
)
1454 env
->CP0_DataLo
= arg1
; /* XXX */
1457 void helper_mtc0_taghi (target_ulong arg1
)
1459 env
->CP0_TagHi
= arg1
; /* XXX */
1462 void helper_mtc0_datahi (target_ulong arg1
)
1464 env
->CP0_DataHi
= arg1
; /* XXX */
1467 /* MIPS MT functions */
1468 target_ulong
helper_mftgpr(uint32_t sel
)
1470 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1472 if (other_tc
== env
->current_tc
)
1473 return env
->active_tc
.gpr
[sel
];
1475 return env
->tcs
[other_tc
].gpr
[sel
];
1478 target_ulong
helper_mftlo(uint32_t sel
)
1480 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1482 if (other_tc
== env
->current_tc
)
1483 return env
->active_tc
.LO
[sel
];
1485 return env
->tcs
[other_tc
].LO
[sel
];
1488 target_ulong
helper_mfthi(uint32_t sel
)
1490 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1492 if (other_tc
== env
->current_tc
)
1493 return env
->active_tc
.HI
[sel
];
1495 return env
->tcs
[other_tc
].HI
[sel
];
1498 target_ulong
helper_mftacx(uint32_t sel
)
1500 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1502 if (other_tc
== env
->current_tc
)
1503 return env
->active_tc
.ACX
[sel
];
1505 return env
->tcs
[other_tc
].ACX
[sel
];
1508 target_ulong
helper_mftdsp(void)
1510 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1512 if (other_tc
== env
->current_tc
)
1513 return env
->active_tc
.DSPControl
;
1515 return env
->tcs
[other_tc
].DSPControl
;
1518 void helper_mttgpr(target_ulong arg1
, uint32_t sel
)
1520 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1522 if (other_tc
== env
->current_tc
)
1523 env
->active_tc
.gpr
[sel
] = arg1
;
1525 env
->tcs
[other_tc
].gpr
[sel
] = arg1
;
1528 void helper_mttlo(target_ulong arg1
, uint32_t sel
)
1530 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1532 if (other_tc
== env
->current_tc
)
1533 env
->active_tc
.LO
[sel
] = arg1
;
1535 env
->tcs
[other_tc
].LO
[sel
] = arg1
;
1538 void helper_mtthi(target_ulong arg1
, uint32_t sel
)
1540 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1542 if (other_tc
== env
->current_tc
)
1543 env
->active_tc
.HI
[sel
] = arg1
;
1545 env
->tcs
[other_tc
].HI
[sel
] = arg1
;
1548 void helper_mttacx(target_ulong arg1
, uint32_t sel
)
1550 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1552 if (other_tc
== env
->current_tc
)
1553 env
->active_tc
.ACX
[sel
] = arg1
;
1555 env
->tcs
[other_tc
].ACX
[sel
] = arg1
;
1558 void helper_mttdsp(target_ulong arg1
)
1560 int other_tc
= env
->CP0_VPEControl
& (0xff << CP0VPECo_TargTC
);
1562 if (other_tc
== env
->current_tc
)
1563 env
->active_tc
.DSPControl
= arg1
;
1565 env
->tcs
[other_tc
].DSPControl
= arg1
;
1568 /* MIPS MT functions */
1569 target_ulong
helper_dmt(target_ulong arg1
)
1578 target_ulong
helper_emt(target_ulong arg1
)
1587 target_ulong
helper_dvpe(target_ulong arg1
)
1596 target_ulong
helper_evpe(target_ulong arg1
)
1604 #endif /* !CONFIG_USER_ONLY */
1606 void helper_fork(target_ulong arg1
, target_ulong arg2
)
1608 // arg1 = rt, arg2 = rs
1610 // TODO: store to TC register
1613 target_ulong
helper_yield(target_ulong arg1
)
1616 /* No scheduling policy implemented. */
1618 if (env
->CP0_VPEControl
& (1 << CP0VPECo_YSI
) &&
1619 env
->active_tc
.CP0_TCStatus
& (1 << CP0TCSt_DT
)) {
1620 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
1621 env
->CP0_VPEControl
|= 4 << CP0VPECo_EXCPT
;
1622 helper_raise_exception(EXCP_THREAD
);
1625 } else if (arg1
== 0) {
1626 if (0 /* TODO: TC underflow */) {
1627 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
1628 helper_raise_exception(EXCP_THREAD
);
1630 // TODO: Deallocate TC
1632 } else if (arg1
> 0) {
1633 /* Yield qualifier inputs not implemented. */
1634 env
->CP0_VPEControl
&= ~(0x7 << CP0VPECo_EXCPT
);
1635 env
->CP0_VPEControl
|= 2 << CP0VPECo_EXCPT
;
1636 helper_raise_exception(EXCP_THREAD
);
1638 return env
->CP0_YQMask
;
1641 #ifndef CONFIG_USER_ONLY
1642 /* TLB management */
1643 static void cpu_mips_tlb_flush (CPUState
*env
, int flush_global
)
1645 /* Flush qemu's TLB and discard all shadowed entries. */
1646 tlb_flush (env
, flush_global
);
1647 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
1650 static void r4k_mips_tlb_flush_extra (CPUState
*env
, int first
)
1652 /* Discard entries from env->tlb[first] onwards. */
1653 while (env
->tlb
->tlb_in_use
> first
) {
1654 r4k_invalidate_tlb(env
, --env
->tlb
->tlb_in_use
, 0);
1658 static void r4k_fill_tlb (int idx
)
1662 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
1663 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
1664 tlb
->VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
1665 #if defined(TARGET_MIPS64)
1666 tlb
->VPN
&= env
->SEGMask
;
1668 tlb
->ASID
= env
->CP0_EntryHi
& 0xFF;
1669 tlb
->PageMask
= env
->CP0_PageMask
;
1670 tlb
->G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
1671 tlb
->V0
= (env
->CP0_EntryLo0
& 2) != 0;
1672 tlb
->D0
= (env
->CP0_EntryLo0
& 4) != 0;
1673 tlb
->C0
= (env
->CP0_EntryLo0
>> 3) & 0x7;
1674 tlb
->PFN
[0] = (env
->CP0_EntryLo0
>> 6) << 12;
1675 tlb
->V1
= (env
->CP0_EntryLo1
& 2) != 0;
1676 tlb
->D1
= (env
->CP0_EntryLo1
& 4) != 0;
1677 tlb
->C1
= (env
->CP0_EntryLo1
>> 3) & 0x7;
1678 tlb
->PFN
[1] = (env
->CP0_EntryLo1
>> 6) << 12;
1681 void r4k_helper_tlbwi (void)
1685 idx
= (env
->CP0_Index
& ~0x80000000) % env
->tlb
->nb_tlb
;
1687 /* Discard cached TLB entries. We could avoid doing this if the
1688 tlbwi is just upgrading access permissions on the current entry;
1689 that might be a further win. */
1690 r4k_mips_tlb_flush_extra (env
, env
->tlb
->nb_tlb
);
1692 r4k_invalidate_tlb(env
, idx
, 0);
1696 void r4k_helper_tlbwr (void)
1698 int r
= cpu_mips_get_random(env
);
1700 r4k_invalidate_tlb(env
, r
, 1);
1704 void r4k_helper_tlbp (void)
1713 ASID
= env
->CP0_EntryHi
& 0xFF;
1714 for (i
= 0; i
< env
->tlb
->nb_tlb
; i
++) {
1715 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
1716 /* 1k pages are not supported. */
1717 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
1718 tag
= env
->CP0_EntryHi
& ~mask
;
1719 VPN
= tlb
->VPN
& ~mask
;
1720 /* Check ASID, virtual page number & size */
1721 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
1727 if (i
== env
->tlb
->nb_tlb
) {
1728 /* No match. Discard any shadow entries, if any of them match. */
1729 for (i
= env
->tlb
->nb_tlb
; i
< env
->tlb
->tlb_in_use
; i
++) {
1730 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
1731 /* 1k pages are not supported. */
1732 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
1733 tag
= env
->CP0_EntryHi
& ~mask
;
1734 VPN
= tlb
->VPN
& ~mask
;
1735 /* Check ASID, virtual page number & size */
1736 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
1737 r4k_mips_tlb_flush_extra (env
, i
);
1742 env
->CP0_Index
|= 0x80000000;
1746 void r4k_helper_tlbr (void)
1752 ASID
= env
->CP0_EntryHi
& 0xFF;
1753 idx
= (env
->CP0_Index
& ~0x80000000) % env
->tlb
->nb_tlb
;
1754 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
1756 /* If this will change the current ASID, flush qemu's TLB. */
1757 if (ASID
!= tlb
->ASID
)
1758 cpu_mips_tlb_flush (env
, 1);
1760 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
1762 env
->CP0_EntryHi
= tlb
->VPN
| tlb
->ASID
;
1763 env
->CP0_PageMask
= tlb
->PageMask
;
1764 env
->CP0_EntryLo0
= tlb
->G
| (tlb
->V0
<< 1) | (tlb
->D0
<< 2) |
1765 (tlb
->C0
<< 3) | (tlb
->PFN
[0] >> 6);
1766 env
->CP0_EntryLo1
= tlb
->G
| (tlb
->V1
<< 1) | (tlb
->D1
<< 2) |
1767 (tlb
->C1
<< 3) | (tlb
->PFN
[1] >> 6);
1770 void helper_tlbwi(void)
1772 env
->tlb
->helper_tlbwi();
1775 void helper_tlbwr(void)
1777 env
->tlb
->helper_tlbwr();
1780 void helper_tlbp(void)
1782 env
->tlb
->helper_tlbp();
1785 void helper_tlbr(void)
1787 env
->tlb
->helper_tlbr();
1791 target_ulong
helper_di (void)
1793 target_ulong t0
= env
->CP0_Status
;
1795 env
->CP0_Status
= t0
& ~(1 << CP0St_IE
);
1796 cpu_mips_update_irq(env
);
1801 target_ulong
helper_ei (void)
1803 target_ulong t0
= env
->CP0_Status
;
1805 env
->CP0_Status
= t0
| (1 << CP0St_IE
);
1806 cpu_mips_update_irq(env
);
1811 static void debug_pre_eret (void)
1813 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
1814 qemu_log("ERET: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
1815 env
->active_tc
.PC
, env
->CP0_EPC
);
1816 if (env
->CP0_Status
& (1 << CP0St_ERL
))
1817 qemu_log(" ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
1818 if (env
->hflags
& MIPS_HFLAG_DM
)
1819 qemu_log(" DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
1824 static void debug_post_eret (void)
1826 if (qemu_loglevel_mask(CPU_LOG_EXEC
)) {
1827 qemu_log(" => PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
1828 env
->active_tc
.PC
, env
->CP0_EPC
);
1829 if (env
->CP0_Status
& (1 << CP0St_ERL
))
1830 qemu_log(" ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
1831 if (env
->hflags
& MIPS_HFLAG_DM
)
1832 qemu_log(" DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
1833 switch (env
->hflags
& MIPS_HFLAG_KSU
) {
1834 case MIPS_HFLAG_UM
: qemu_log(", UM\n"); break;
1835 case MIPS_HFLAG_SM
: qemu_log(", SM\n"); break;
1836 case MIPS_HFLAG_KM
: qemu_log("\n"); break;
1837 default: cpu_abort(env
, "Invalid MMU mode!\n"); break;
1842 static void set_pc (target_ulong error_pc
)
1844 env
->active_tc
.PC
= error_pc
& ~(target_ulong
)1;
1846 env
->hflags
|= MIPS_HFLAG_M16
;
1848 env
->hflags
&= ~(MIPS_HFLAG_M16
);
1852 void helper_eret (void)
1855 if (env
->CP0_Status
& (1 << CP0St_ERL
)) {
1856 set_pc(env
->CP0_ErrorEPC
);
1857 env
->CP0_Status
&= ~(1 << CP0St_ERL
);
1859 set_pc(env
->CP0_EPC
);
1860 env
->CP0_Status
&= ~(1 << CP0St_EXL
);
1862 compute_hflags(env
);
1867 void helper_deret (void)
1870 set_pc(env
->CP0_DEPC
);
1872 env
->hflags
&= MIPS_HFLAG_DM
;
1873 compute_hflags(env
);
1877 #endif /* !CONFIG_USER_ONLY */
1879 target_ulong
helper_rdhwr_cpunum(void)
1881 if ((env
->hflags
& MIPS_HFLAG_CP0
) ||
1882 (env
->CP0_HWREna
& (1 << 0)))
1883 return env
->CP0_EBase
& 0x3ff;
1885 helper_raise_exception(EXCP_RI
);
1890 target_ulong
helper_rdhwr_synci_step(void)
1892 if ((env
->hflags
& MIPS_HFLAG_CP0
) ||
1893 (env
->CP0_HWREna
& (1 << 1)))
1894 return env
->SYNCI_Step
;
1896 helper_raise_exception(EXCP_RI
);
1901 target_ulong
helper_rdhwr_cc(void)
1903 if ((env
->hflags
& MIPS_HFLAG_CP0
) ||
1904 (env
->CP0_HWREna
& (1 << 2)))
1905 return env
->CP0_Count
;
1907 helper_raise_exception(EXCP_RI
);
1912 target_ulong
helper_rdhwr_ccres(void)
1914 if ((env
->hflags
& MIPS_HFLAG_CP0
) ||
1915 (env
->CP0_HWREna
& (1 << 3)))
1918 helper_raise_exception(EXCP_RI
);
1923 void helper_pmon (int function
)
1927 case 2: /* TODO: char inbyte(int waitflag); */
1928 if (env
->active_tc
.gpr
[4] == 0)
1929 env
->active_tc
.gpr
[2] = -1;
1931 case 11: /* TODO: char inbyte (void); */
1932 env
->active_tc
.gpr
[2] = -1;
1936 printf("%c", (char)(env
->active_tc
.gpr
[4] & 0xFF));
1942 unsigned char *fmt
= (void *)(unsigned long)env
->active_tc
.gpr
[4];
1949 void helper_wait (void)
1952 helper_raise_exception(EXCP_HLT
);
1955 #if !defined(CONFIG_USER_ONLY)
1957 static void do_unaligned_access (target_ulong addr
, int is_write
, int is_user
, void *retaddr
);
1959 #define MMUSUFFIX _mmu
1960 #define ALIGNED_ONLY
1963 #include "softmmu_template.h"
1966 #include "softmmu_template.h"
1969 #include "softmmu_template.h"
1972 #include "softmmu_template.h"
1974 static void do_unaligned_access (target_ulong addr
, int is_write
, int is_user
, void *retaddr
)
1976 env
->CP0_BadVAddr
= addr
;
1977 do_restore_state (retaddr
);
1978 helper_raise_exception ((is_write
== 1) ? EXCP_AdES
: EXCP_AdEL
);
1981 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
1983 TranslationBlock
*tb
;
1984 CPUState
*saved_env
;
1988 /* XXX: hack to restore env in all cases, even if not called from
1991 env
= cpu_single_env
;
1992 ret
= cpu_mips_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
1995 /* now we have a real cpu fault */
1996 pc
= (unsigned long)retaddr
;
1997 tb
= tb_find_pc(pc
);
1999 /* the PC is inside the translated code. It means that we have
2000 a virtual CPU fault */
2001 cpu_restore_state(tb
, env
, pc
, NULL
);
2004 helper_raise_exception_err(env
->exception_index
, env
->error_code
);
2009 void do_unassigned_access(target_phys_addr_t addr
, int is_write
, int is_exec
,
2010 int unused
, int size
)
2013 helper_raise_exception(EXCP_IBE
);
2015 helper_raise_exception(EXCP_DBE
);
2017 #endif /* !CONFIG_USER_ONLY */
2019 /* Complex FPU operations which may need stack space. */
2021 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
2022 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
2023 #define FLOAT_TWO32 make_float32(1 << 30)
2024 #define FLOAT_TWO64 make_float64(1ULL << 62)
2025 #define FLOAT_QNAN32 0x7fbfffff
2026 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
2027 #define FLOAT_SNAN32 0x7fffffff
2028 #define FLOAT_SNAN64 0x7fffffffffffffffULL
2030 /* convert MIPS rounding mode in FCR31 to IEEE library */
2031 static unsigned int ieee_rm
[] = {
2032 float_round_nearest_even
,
2033 float_round_to_zero
,
2038 #define RESTORE_ROUNDING_MODE \
2039 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
2041 #define RESTORE_FLUSH_MODE \
2042 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
2044 target_ulong
helper_cfc1 (uint32_t reg
)
2050 arg1
= (int32_t)env
->active_fpu
.fcr0
;
2053 arg1
= ((env
->active_fpu
.fcr31
>> 24) & 0xfe) | ((env
->active_fpu
.fcr31
>> 23) & 0x1);
2056 arg1
= env
->active_fpu
.fcr31
& 0x0003f07c;
2059 arg1
= (env
->active_fpu
.fcr31
& 0x00000f83) | ((env
->active_fpu
.fcr31
>> 22) & 0x4);
2062 arg1
= (int32_t)env
->active_fpu
.fcr31
;
2069 void helper_ctc1 (target_ulong arg1
, uint32_t reg
)
2073 if (arg1
& 0xffffff00)
2075 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0x017fffff) | ((arg1
& 0xfe) << 24) |
2076 ((arg1
& 0x1) << 23);
2079 if (arg1
& 0x007c0000)
2081 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0xfffc0f83) | (arg1
& 0x0003f07c);
2084 if (arg1
& 0x007c0000)
2086 env
->active_fpu
.fcr31
= (env
->active_fpu
.fcr31
& 0xfefff07c) | (arg1
& 0x00000f83) |
2087 ((arg1
& 0x4) << 22);
2090 if (arg1
& 0x007c0000)
2092 env
->active_fpu
.fcr31
= arg1
;
2097 /* set rounding mode */
2098 RESTORE_ROUNDING_MODE
;
2099 /* set flush-to-zero mode */
2101 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2102 if ((GET_FP_ENABLE(env
->active_fpu
.fcr31
) | 0x20) & GET_FP_CAUSE(env
->active_fpu
.fcr31
))
2103 helper_raise_exception(EXCP_FPE
);
2106 static inline char ieee_ex_to_mips(char xcpt
)
2108 return (xcpt
& float_flag_inexact
) >> 5 |
2109 (xcpt
& float_flag_underflow
) >> 3 |
2110 (xcpt
& float_flag_overflow
) >> 1 |
2111 (xcpt
& float_flag_divbyzero
) << 1 |
2112 (xcpt
& float_flag_invalid
) << 4;
2115 static inline char mips_ex_to_ieee(char xcpt
)
2117 return (xcpt
& FP_INEXACT
) << 5 |
2118 (xcpt
& FP_UNDERFLOW
) << 3 |
2119 (xcpt
& FP_OVERFLOW
) << 1 |
2120 (xcpt
& FP_DIV0
) >> 1 |
2121 (xcpt
& FP_INVALID
) >> 4;
2124 static inline void update_fcr31(void)
2126 int tmp
= ieee_ex_to_mips(get_float_exception_flags(&env
->active_fpu
.fp_status
));
2128 SET_FP_CAUSE(env
->active_fpu
.fcr31
, tmp
);
2129 if (GET_FP_ENABLE(env
->active_fpu
.fcr31
) & tmp
)
2130 helper_raise_exception(EXCP_FPE
);
2132 UPDATE_FP_FLAGS(env
->active_fpu
.fcr31
, tmp
);
2136 Single precition routines have a "s" suffix, double precision a
2137 "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
2138 paired single lower "pl", paired single upper "pu". */
2140 /* unary operations, modifying fp status */
2141 uint64_t helper_float_sqrt_d(uint64_t fdt0
)
2143 return float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
2146 uint32_t helper_float_sqrt_s(uint32_t fst0
)
2148 return float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
2151 uint64_t helper_float_cvtd_s(uint32_t fst0
)
2155 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2156 fdt2
= float32_to_float64(fst0
, &env
->active_fpu
.fp_status
);
2161 uint64_t helper_float_cvtd_w(uint32_t wt0
)
2165 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2166 fdt2
= int32_to_float64(wt0
, &env
->active_fpu
.fp_status
);
2171 uint64_t helper_float_cvtd_l(uint64_t dt0
)
2175 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2176 fdt2
= int64_to_float64(dt0
, &env
->active_fpu
.fp_status
);
2181 uint64_t helper_float_cvtl_d(uint64_t fdt0
)
2185 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2186 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2188 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2193 uint64_t helper_float_cvtl_s(uint32_t fst0
)
2197 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2198 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2200 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2205 uint64_t helper_float_cvtps_pw(uint64_t dt0
)
2210 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2211 fst2
= int32_to_float32(dt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
2212 fsth2
= int32_to_float32(dt0
>> 32, &env
->active_fpu
.fp_status
);
2214 return ((uint64_t)fsth2
<< 32) | fst2
;
2217 uint64_t helper_float_cvtpw_ps(uint64_t fdt0
)
2222 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2223 wt2
= float32_to_int32(fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
2224 wth2
= float32_to_int32(fdt0
>> 32, &env
->active_fpu
.fp_status
);
2226 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
)) {
2228 wth2
= FLOAT_SNAN32
;
2230 return ((uint64_t)wth2
<< 32) | wt2
;
2233 uint32_t helper_float_cvts_d(uint64_t fdt0
)
2237 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2238 fst2
= float64_to_float32(fdt0
, &env
->active_fpu
.fp_status
);
2243 uint32_t helper_float_cvts_w(uint32_t wt0
)
2247 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2248 fst2
= int32_to_float32(wt0
, &env
->active_fpu
.fp_status
);
2253 uint32_t helper_float_cvts_l(uint64_t dt0
)
2257 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2258 fst2
= int64_to_float32(dt0
, &env
->active_fpu
.fp_status
);
2263 uint32_t helper_float_cvts_pl(uint32_t wt0
)
2267 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2273 uint32_t helper_float_cvts_pu(uint32_t wth0
)
2277 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2283 uint32_t helper_float_cvtw_s(uint32_t fst0
)
2287 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2288 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
2290 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2295 uint32_t helper_float_cvtw_d(uint64_t fdt0
)
2299 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2300 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
2302 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2307 uint64_t helper_float_roundl_d(uint64_t fdt0
)
2311 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2312 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2313 RESTORE_ROUNDING_MODE
;
2315 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2320 uint64_t helper_float_roundl_s(uint32_t fst0
)
2324 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2325 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2326 RESTORE_ROUNDING_MODE
;
2328 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2333 uint32_t helper_float_roundw_d(uint64_t fdt0
)
2337 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2338 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
2339 RESTORE_ROUNDING_MODE
;
2341 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2346 uint32_t helper_float_roundw_s(uint32_t fst0
)
2350 set_float_rounding_mode(float_round_nearest_even
, &env
->active_fpu
.fp_status
);
2351 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
2352 RESTORE_ROUNDING_MODE
;
2354 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2359 uint64_t helper_float_truncl_d(uint64_t fdt0
)
2363 dt2
= float64_to_int64_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
2365 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2370 uint64_t helper_float_truncl_s(uint32_t fst0
)
2374 dt2
= float32_to_int64_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
2376 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2381 uint32_t helper_float_truncw_d(uint64_t fdt0
)
2385 wt2
= float64_to_int32_round_to_zero(fdt0
, &env
->active_fpu
.fp_status
);
2387 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2392 uint32_t helper_float_truncw_s(uint32_t fst0
)
2396 wt2
= float32_to_int32_round_to_zero(fst0
, &env
->active_fpu
.fp_status
);
2398 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2403 uint64_t helper_float_ceill_d(uint64_t fdt0
)
2407 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
2408 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2409 RESTORE_ROUNDING_MODE
;
2411 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2416 uint64_t helper_float_ceill_s(uint32_t fst0
)
2420 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
2421 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2422 RESTORE_ROUNDING_MODE
;
2424 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2429 uint32_t helper_float_ceilw_d(uint64_t fdt0
)
2433 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
2434 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
2435 RESTORE_ROUNDING_MODE
;
2437 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2442 uint32_t helper_float_ceilw_s(uint32_t fst0
)
2446 set_float_rounding_mode(float_round_up
, &env
->active_fpu
.fp_status
);
2447 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
2448 RESTORE_ROUNDING_MODE
;
2450 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2455 uint64_t helper_float_floorl_d(uint64_t fdt0
)
2459 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
2460 dt2
= float64_to_int64(fdt0
, &env
->active_fpu
.fp_status
);
2461 RESTORE_ROUNDING_MODE
;
2463 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2468 uint64_t helper_float_floorl_s(uint32_t fst0
)
2472 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
2473 dt2
= float32_to_int64(fst0
, &env
->active_fpu
.fp_status
);
2474 RESTORE_ROUNDING_MODE
;
2476 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2481 uint32_t helper_float_floorw_d(uint64_t fdt0
)
2485 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
2486 wt2
= float64_to_int32(fdt0
, &env
->active_fpu
.fp_status
);
2487 RESTORE_ROUNDING_MODE
;
2489 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2494 uint32_t helper_float_floorw_s(uint32_t fst0
)
2498 set_float_rounding_mode(float_round_down
, &env
->active_fpu
.fp_status
);
2499 wt2
= float32_to_int32(fst0
, &env
->active_fpu
.fp_status
);
2500 RESTORE_ROUNDING_MODE
;
2502 if (GET_FP_CAUSE(env
->active_fpu
.fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
2507 /* unary operations, not modifying fp status */
2508 #define FLOAT_UNOP(name) \
2509 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
2511 return float64_ ## name(fdt0); \
2513 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
2515 return float32_ ## name(fst0); \
2517 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
2522 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
2523 wth0 = float32_ ## name(fdt0 >> 32); \
2524 return ((uint64_t)wth0 << 32) | wt0; \
2530 /* MIPS specific unary operations */
2531 uint64_t helper_float_recip_d(uint64_t fdt0
)
2535 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2536 fdt2
= float64_div(FLOAT_ONE64
, fdt0
, &env
->active_fpu
.fp_status
);
2541 uint32_t helper_float_recip_s(uint32_t fst0
)
2545 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2546 fst2
= float32_div(FLOAT_ONE32
, fst0
, &env
->active_fpu
.fp_status
);
2551 uint64_t helper_float_rsqrt_d(uint64_t fdt0
)
2555 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2556 fdt2
= float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
2557 fdt2
= float64_div(FLOAT_ONE64
, fdt2
, &env
->active_fpu
.fp_status
);
2562 uint32_t helper_float_rsqrt_s(uint32_t fst0
)
2566 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2567 fst2
= float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
2568 fst2
= float32_div(FLOAT_ONE32
, fst2
, &env
->active_fpu
.fp_status
);
2573 uint64_t helper_float_recip1_d(uint64_t fdt0
)
2577 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2578 fdt2
= float64_div(FLOAT_ONE64
, fdt0
, &env
->active_fpu
.fp_status
);
2583 uint32_t helper_float_recip1_s(uint32_t fst0
)
2587 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2588 fst2
= float32_div(FLOAT_ONE32
, fst0
, &env
->active_fpu
.fp_status
);
2593 uint64_t helper_float_recip1_ps(uint64_t fdt0
)
2598 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2599 fst2
= float32_div(FLOAT_ONE32
, fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
2600 fsth2
= float32_div(FLOAT_ONE32
, fdt0
>> 32, &env
->active_fpu
.fp_status
);
2602 return ((uint64_t)fsth2
<< 32) | fst2
;
2605 uint64_t helper_float_rsqrt1_d(uint64_t fdt0
)
2609 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2610 fdt2
= float64_sqrt(fdt0
, &env
->active_fpu
.fp_status
);
2611 fdt2
= float64_div(FLOAT_ONE64
, fdt2
, &env
->active_fpu
.fp_status
);
2616 uint32_t helper_float_rsqrt1_s(uint32_t fst0
)
2620 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2621 fst2
= float32_sqrt(fst0
, &env
->active_fpu
.fp_status
);
2622 fst2
= float32_div(FLOAT_ONE32
, fst2
, &env
->active_fpu
.fp_status
);
2627 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0
)
2632 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2633 fst2
= float32_sqrt(fdt0
& 0XFFFFFFFF, &env
->active_fpu
.fp_status
);
2634 fsth2
= float32_sqrt(fdt0
>> 32, &env
->active_fpu
.fp_status
);
2635 fst2
= float32_div(FLOAT_ONE32
, fst2
, &env
->active_fpu
.fp_status
);
2636 fsth2
= float32_div(FLOAT_ONE32
, fsth2
, &env
->active_fpu
.fp_status
);
2638 return ((uint64_t)fsth2
<< 32) | fst2
;
2641 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
2643 /* binary operations */
2644 #define FLOAT_BINOP(name) \
2645 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1) \
2649 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2650 dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
2652 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
2653 dt2 = FLOAT_QNAN64; \
2657 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1) \
2661 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2662 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
2664 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) \
2665 wt2 = FLOAT_QNAN32; \
2669 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1) \
2671 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
2672 uint32_t fsth0 = fdt0 >> 32; \
2673 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
2674 uint32_t fsth1 = fdt1 >> 32; \
2678 set_float_exception_flags(0, &env->active_fpu.fp_status); \
2679 wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
2680 wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
2682 if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) { \
2683 wt2 = FLOAT_QNAN32; \
2684 wth2 = FLOAT_QNAN32; \
2686 return ((uint64_t)wth2 << 32) | wt2; \
2695 /* ternary operations */
2696 #define FLOAT_TERNOP(name1, name2) \
2697 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2700 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
2701 return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
2704 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2707 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2708 return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2711 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
2714 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
2715 uint32_t fsth0 = fdt0 >> 32; \
2716 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
2717 uint32_t fsth1 = fdt1 >> 32; \
2718 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
2719 uint32_t fsth2 = fdt2 >> 32; \
2721 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2722 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
2723 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2724 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
2725 return ((uint64_t)fsth2 << 32) | fst2; \
2728 FLOAT_TERNOP(mul
, add
)
2729 FLOAT_TERNOP(mul
, sub
)
2732 /* negated ternary operations */
2733 #define FLOAT_NTERNOP(name1, name2) \
2734 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
2737 fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status); \
2738 fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status); \
2739 return float64_chs(fdt2); \
2742 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
2745 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2746 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2747 return float32_chs(fst2); \
2750 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
2753 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
2754 uint32_t fsth0 = fdt0 >> 32; \
2755 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
2756 uint32_t fsth1 = fdt1 >> 32; \
2757 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
2758 uint32_t fsth2 = fdt2 >> 32; \
2760 fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status); \
2761 fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status); \
2762 fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status); \
2763 fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status); \
2764 fst2 = float32_chs(fst2); \
2765 fsth2 = float32_chs(fsth2); \
2766 return ((uint64_t)fsth2 << 32) | fst2; \
2769 FLOAT_NTERNOP(mul
, add
)
2770 FLOAT_NTERNOP(mul
, sub
)
2771 #undef FLOAT_NTERNOP
2773 /* MIPS specific binary operations */
2774 uint64_t helper_float_recip2_d(uint64_t fdt0
, uint64_t fdt2
)
2776 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2777 fdt2
= float64_mul(fdt0
, fdt2
, &env
->active_fpu
.fp_status
);
2778 fdt2
= float64_chs(float64_sub(fdt2
, FLOAT_ONE64
, &env
->active_fpu
.fp_status
));
2783 uint32_t helper_float_recip2_s(uint32_t fst0
, uint32_t fst2
)
2785 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2786 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
2787 fst2
= float32_chs(float32_sub(fst2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
));
2792 uint64_t helper_float_recip2_ps(uint64_t fdt0
, uint64_t fdt2
)
2794 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
2795 uint32_t fsth0
= fdt0
>> 32;
2796 uint32_t fst2
= fdt2
& 0XFFFFFFFF;
2797 uint32_t fsth2
= fdt2
>> 32;
2799 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2800 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
2801 fsth2
= float32_mul(fsth0
, fsth2
, &env
->active_fpu
.fp_status
);
2802 fst2
= float32_chs(float32_sub(fst2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
));
2803 fsth2
= float32_chs(float32_sub(fsth2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
));
2805 return ((uint64_t)fsth2
<< 32) | fst2
;
2808 uint64_t helper_float_rsqrt2_d(uint64_t fdt0
, uint64_t fdt2
)
2810 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2811 fdt2
= float64_mul(fdt0
, fdt2
, &env
->active_fpu
.fp_status
);
2812 fdt2
= float64_sub(fdt2
, FLOAT_ONE64
, &env
->active_fpu
.fp_status
);
2813 fdt2
= float64_chs(float64_div(fdt2
, FLOAT_TWO64
, &env
->active_fpu
.fp_status
));
2818 uint32_t helper_float_rsqrt2_s(uint32_t fst0
, uint32_t fst2
)
2820 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2821 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
2822 fst2
= float32_sub(fst2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
);
2823 fst2
= float32_chs(float32_div(fst2
, FLOAT_TWO32
, &env
->active_fpu
.fp_status
));
2828 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0
, uint64_t fdt2
)
2830 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
2831 uint32_t fsth0
= fdt0
>> 32;
2832 uint32_t fst2
= fdt2
& 0XFFFFFFFF;
2833 uint32_t fsth2
= fdt2
>> 32;
2835 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2836 fst2
= float32_mul(fst0
, fst2
, &env
->active_fpu
.fp_status
);
2837 fsth2
= float32_mul(fsth0
, fsth2
, &env
->active_fpu
.fp_status
);
2838 fst2
= float32_sub(fst2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
);
2839 fsth2
= float32_sub(fsth2
, FLOAT_ONE32
, &env
->active_fpu
.fp_status
);
2840 fst2
= float32_chs(float32_div(fst2
, FLOAT_TWO32
, &env
->active_fpu
.fp_status
));
2841 fsth2
= float32_chs(float32_div(fsth2
, FLOAT_TWO32
, &env
->active_fpu
.fp_status
));
2843 return ((uint64_t)fsth2
<< 32) | fst2
;
2846 uint64_t helper_float_addr_ps(uint64_t fdt0
, uint64_t fdt1
)
2848 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
2849 uint32_t fsth0
= fdt0
>> 32;
2850 uint32_t fst1
= fdt1
& 0XFFFFFFFF;
2851 uint32_t fsth1
= fdt1
>> 32;
2855 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2856 fst2
= float32_add (fst0
, fsth0
, &env
->active_fpu
.fp_status
);
2857 fsth2
= float32_add (fst1
, fsth1
, &env
->active_fpu
.fp_status
);
2859 return ((uint64_t)fsth2
<< 32) | fst2
;
2862 uint64_t helper_float_mulr_ps(uint64_t fdt0
, uint64_t fdt1
)
2864 uint32_t fst0
= fdt0
& 0XFFFFFFFF;
2865 uint32_t fsth0
= fdt0
>> 32;
2866 uint32_t fst1
= fdt1
& 0XFFFFFFFF;
2867 uint32_t fsth1
= fdt1
>> 32;
2871 set_float_exception_flags(0, &env
->active_fpu
.fp_status
);
2872 fst2
= float32_mul (fst0
, fsth0
, &env
->active_fpu
.fp_status
);
2873 fsth2
= float32_mul (fst1
, fsth1
, &env
->active_fpu
.fp_status
);
2875 return ((uint64_t)fsth2
<< 32) | fst2
;
2878 /* compare operations */
2879 #define FOP_COND_D(op, cond) \
2880 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2885 SET_FP_COND(cc, env->active_fpu); \
2887 CLEAR_FP_COND(cc, env->active_fpu); \
2889 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2892 fdt0 = float64_abs(fdt0); \
2893 fdt1 = float64_abs(fdt1); \
2897 SET_FP_COND(cc, env->active_fpu); \
2899 CLEAR_FP_COND(cc, env->active_fpu); \
2902 static int float64_is_unordered(int sig
, float64 a
, float64 b STATUS_PARAM
)
2904 if (float64_is_signaling_nan(a
) ||
2905 float64_is_signaling_nan(b
) ||
2906 (sig
&& (float64_is_nan(a
) || float64_is_nan(b
)))) {
2907 float_raise(float_flag_invalid
, status
);
2909 } else if (float64_is_nan(a
) || float64_is_nan(b
)) {
2916 /* NOTE: the comma operator will make "cond" to eval to false,
2917 * but float*_is_unordered() is still called. */
2918 FOP_COND_D(f
, (float64_is_unordered(0, fdt1
, fdt0
, &env
->active_fpu
.fp_status
), 0))
2919 FOP_COND_D(un
, float64_is_unordered(0, fdt1
, fdt0
, &env
->active_fpu
.fp_status
))
2920 FOP_COND_D(eq
, !float64_is_unordered(0, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) && float64_eq(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2921 FOP_COND_D(ueq
, float64_is_unordered(0, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_eq(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2922 FOP_COND_D(olt
, !float64_is_unordered(0, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) && float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2923 FOP_COND_D(ult
, float64_is_unordered(0, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2924 FOP_COND_D(ole
, !float64_is_unordered(0, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) && float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2925 FOP_COND_D(ule
, float64_is_unordered(0, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2926 /* NOTE: the comma operator will make "cond" to eval to false,
2927 * but float*_is_unordered() is still called. */
2928 FOP_COND_D(sf
, (float64_is_unordered(1, fdt1
, fdt0
, &env
->active_fpu
.fp_status
), 0))
2929 FOP_COND_D(ngle
,float64_is_unordered(1, fdt1
, fdt0
, &env
->active_fpu
.fp_status
))
2930 FOP_COND_D(seq
, !float64_is_unordered(1, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) && float64_eq(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2931 FOP_COND_D(ngl
, float64_is_unordered(1, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_eq(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2932 FOP_COND_D(lt
, !float64_is_unordered(1, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) && float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2933 FOP_COND_D(nge
, float64_is_unordered(1, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_lt(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2934 FOP_COND_D(le
, !float64_is_unordered(1, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) && float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2935 FOP_COND_D(ngt
, float64_is_unordered(1, fdt1
, fdt0
, &env
->active_fpu
.fp_status
) || float64_le(fdt0
, fdt1
, &env
->active_fpu
.fp_status
))
2937 #define FOP_COND_S(op, cond) \
2938 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2943 SET_FP_COND(cc, env->active_fpu); \
2945 CLEAR_FP_COND(cc, env->active_fpu); \
2947 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
2950 fst0 = float32_abs(fst0); \
2951 fst1 = float32_abs(fst1); \
2955 SET_FP_COND(cc, env->active_fpu); \
2957 CLEAR_FP_COND(cc, env->active_fpu); \
2960 static flag
float32_is_unordered(int sig
, float32 a
, float32 b STATUS_PARAM
)
2962 if (float32_is_signaling_nan(a
) ||
2963 float32_is_signaling_nan(b
) ||
2964 (sig
&& (float32_is_nan(a
) || float32_is_nan(b
)))) {
2965 float_raise(float_flag_invalid
, status
);
2967 } else if (float32_is_nan(a
) || float32_is_nan(b
)) {
2974 /* NOTE: the comma operator will make "cond" to eval to false,
2975 * but float*_is_unordered() is still called. */
2976 FOP_COND_S(f
, (float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
), 0))
2977 FOP_COND_S(un
, float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
))
2978 FOP_COND_S(eq
, !float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2979 FOP_COND_S(ueq
, float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2980 FOP_COND_S(olt
, !float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2981 FOP_COND_S(ult
, float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2982 FOP_COND_S(ole
, !float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2983 FOP_COND_S(ule
, float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2984 /* NOTE: the comma operator will make "cond" to eval to false,
2985 * but float*_is_unordered() is still called. */
2986 FOP_COND_S(sf
, (float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
), 0))
2987 FOP_COND_S(ngle
,float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
))
2988 FOP_COND_S(seq
, !float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2989 FOP_COND_S(ngl
, float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2990 FOP_COND_S(lt
, !float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2991 FOP_COND_S(nge
, float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2992 FOP_COND_S(le
, !float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2993 FOP_COND_S(ngt
, float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
))
2995 #define FOP_COND_PS(op, condl, condh) \
2996 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
2998 uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
2999 uint32_t fsth0 = float32_abs(fdt0 >> 32); \
3000 uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3001 uint32_t fsth1 = float32_abs(fdt1 >> 32); \
3007 SET_FP_COND(cc, env->active_fpu); \
3009 CLEAR_FP_COND(cc, env->active_fpu); \
3011 SET_FP_COND(cc + 1, env->active_fpu); \
3013 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3015 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
3017 uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
3018 uint32_t fsth0 = float32_abs(fdt0 >> 32); \
3019 uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
3020 uint32_t fsth1 = float32_abs(fdt1 >> 32); \
3026 SET_FP_COND(cc, env->active_fpu); \
3028 CLEAR_FP_COND(cc, env->active_fpu); \
3030 SET_FP_COND(cc + 1, env->active_fpu); \
3032 CLEAR_FP_COND(cc + 1, env->active_fpu); \
3035 /* NOTE: the comma operator will make "cond" to eval to false,
3036 * but float*_is_unordered() is still called. */
3037 FOP_COND_PS(f
, (float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
), 0),
3038 (float32_is_unordered(0, fsth1
, fsth0
, &env
->active_fpu
.fp_status
), 0))
3039 FOP_COND_PS(un
, float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
),
3040 float32_is_unordered(0, fsth1
, fsth0
, &env
->active_fpu
.fp_status
))
3041 FOP_COND_PS(eq
, !float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3042 !float32_is_unordered(0, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) && float32_eq(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3043 FOP_COND_PS(ueq
, float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3044 float32_is_unordered(0, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_eq(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3045 FOP_COND_PS(olt
, !float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3046 !float32_is_unordered(0, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) && float32_lt(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3047 FOP_COND_PS(ult
, float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3048 float32_is_unordered(0, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_lt(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3049 FOP_COND_PS(ole
, !float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3050 !float32_is_unordered(0, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) && float32_le(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3051 FOP_COND_PS(ule
, float32_is_unordered(0, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3052 float32_is_unordered(0, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_le(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3053 /* NOTE: the comma operator will make "cond" to eval to false,
3054 * but float*_is_unordered() is still called. */
3055 FOP_COND_PS(sf
, (float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
), 0),
3056 (float32_is_unordered(1, fsth1
, fsth0
, &env
->active_fpu
.fp_status
), 0))
3057 FOP_COND_PS(ngle
,float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
),
3058 float32_is_unordered(1, fsth1
, fsth0
, &env
->active_fpu
.fp_status
))
3059 FOP_COND_PS(seq
, !float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3060 !float32_is_unordered(1, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) && float32_eq(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3061 FOP_COND_PS(ngl
, float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_eq(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3062 float32_is_unordered(1, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_eq(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3063 FOP_COND_PS(lt
, !float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3064 !float32_is_unordered(1, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) && float32_lt(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3065 FOP_COND_PS(nge
, float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_lt(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3066 float32_is_unordered(1, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_lt(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3067 FOP_COND_PS(le
, !float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) && float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3068 !float32_is_unordered(1, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) && float32_le(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))
3069 FOP_COND_PS(ngt
, float32_is_unordered(1, fst1
, fst0
, &env
->active_fpu
.fp_status
) || float32_le(fst0
, fst1
, &env
->active_fpu
.fp_status
),
3070 float32_is_unordered(1, fsth1
, fsth0
, &env
->active_fpu
.fp_status
) || float32_le(fsth0
, fsth1
, &env
->active_fpu
.fp_status
))