Remove env->ready_for_interrupt_injection
[qemu-kvm/fedora.git] / target-mips / op_helper.c
blob2020e9efe346825e794cd88e179d1d9baba94367
1 /*
2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdlib.h>
21 #include "exec.h"
23 #include "host-utils.h"
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
28 void do_raise_exception_err (uint32_t exception, int error_code)
30 #if 1
31 if (logfile && exception < 0x100)
32 fprintf(logfile, "%s: %d %d\n", __func__, exception, error_code);
33 #endif
34 env->exception_index = exception;
35 env->error_code = error_code;
36 T0 = 0;
37 cpu_loop_exit();
40 void do_raise_exception (uint32_t exception)
42 do_raise_exception_err(exception, 0);
45 void do_interrupt_restart (void)
47 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
48 !(env->CP0_Status & (1 << CP0St_ERL)) &&
49 !(env->hflags & MIPS_HFLAG_DM) &&
50 (env->CP0_Status & (1 << CP0St_IE)) &&
51 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
52 env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
53 do_raise_exception(EXCP_EXT_INTERRUPT);
57 void do_restore_state (void *pc_ptr)
59 TranslationBlock *tb;
60 unsigned long pc = (unsigned long) pc_ptr;
62 tb = tb_find_pc (pc);
63 if (tb) {
64 cpu_restore_state (tb, env, pc, NULL);
68 void do_clo (void)
70 T0 = clo32(T0);
73 void do_clz (void)
75 T0 = clz32(T0);
78 #if defined(TARGET_MIPS64)
79 #if TARGET_LONG_BITS > HOST_LONG_BITS
80 /* Those might call libgcc functions. */
81 void do_dsll (void)
83 T0 = T0 << T1;
86 void do_dsll32 (void)
88 T0 = T0 << (T1 + 32);
91 void do_dsra (void)
93 T0 = (int64_t)T0 >> T1;
96 void do_dsra32 (void)
98 T0 = (int64_t)T0 >> (T1 + 32);
101 void do_dsrl (void)
103 T0 = T0 >> T1;
106 void do_dsrl32 (void)
108 T0 = T0 >> (T1 + 32);
111 void do_drotr (void)
113 target_ulong tmp;
115 if (T1) {
116 tmp = T0 << (0x40 - T1);
117 T0 = (T0 >> T1) | tmp;
121 void do_drotr32 (void)
123 target_ulong tmp;
125 tmp = T0 << (0x40 - (32 + T1));
126 T0 = (T0 >> (32 + T1)) | tmp;
129 void do_dsllv (void)
131 T0 = T1 << (T0 & 0x3F);
134 void do_dsrav (void)
136 T0 = (int64_t)T1 >> (T0 & 0x3F);
139 void do_dsrlv (void)
141 T0 = T1 >> (T0 & 0x3F);
144 void do_drotrv (void)
146 target_ulong tmp;
148 T0 &= 0x3F;
149 if (T0) {
150 tmp = T1 << (0x40 - T0);
151 T0 = (T1 >> T0) | tmp;
152 } else
153 T0 = T1;
156 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
158 void do_dclo (void)
160 T0 = clo64(T0);
163 void do_dclz (void)
165 T0 = clz64(T0);
168 #endif /* TARGET_MIPS64 */
170 /* 64 bits arithmetic for 32 bits hosts */
171 #if TARGET_LONG_BITS > HOST_LONG_BITS
172 static always_inline uint64_t get_HILO (void)
174 return (env->HI[env->current_tc][0] << 32) | (uint32_t)env->LO[env->current_tc][0];
177 static always_inline void set_HILO (uint64_t HILO)
179 env->LO[env->current_tc][0] = (int32_t)HILO;
180 env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
183 static always_inline void set_HIT0_LO (uint64_t HILO)
185 env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
186 T0 = env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
189 static always_inline void set_HI_LOT0 (uint64_t HILO)
191 T0 = env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
192 env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
195 void do_mult (void)
197 set_HILO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
200 void do_multu (void)
202 set_HILO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
205 void do_madd (void)
207 int64_t tmp;
209 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
210 set_HILO((int64_t)get_HILO() + tmp);
213 void do_maddu (void)
215 uint64_t tmp;
217 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
218 set_HILO(get_HILO() + tmp);
221 void do_msub (void)
223 int64_t tmp;
225 tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
226 set_HILO((int64_t)get_HILO() - tmp);
229 void do_msubu (void)
231 uint64_t tmp;
233 tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
234 set_HILO(get_HILO() - tmp);
237 /* Multiplication variants of the vr54xx. */
238 void do_muls (void)
240 set_HI_LOT0(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
243 void do_mulsu (void)
245 set_HI_LOT0(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
248 void do_macc (void)
250 set_HI_LOT0(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
253 void do_macchi (void)
255 set_HIT0_LO(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
258 void do_maccu (void)
260 set_HI_LOT0(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
263 void do_macchiu (void)
265 set_HIT0_LO(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
268 void do_msac (void)
270 set_HI_LOT0(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
273 void do_msachi (void)
275 set_HIT0_LO(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
278 void do_msacu (void)
280 set_HI_LOT0(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
283 void do_msachiu (void)
285 set_HIT0_LO(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
288 void do_mulhi (void)
290 set_HIT0_LO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
293 void do_mulhiu (void)
295 set_HIT0_LO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
298 void do_mulshi (void)
300 set_HIT0_LO(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
303 void do_mulshiu (void)
305 set_HIT0_LO(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
307 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
309 #if defined(CONFIG_USER_ONLY)
310 void do_mfc0_random (void)
312 cpu_abort(env, "mfc0 random\n");
315 void do_mfc0_count (void)
317 cpu_abort(env, "mfc0 count\n");
320 void cpu_mips_store_count(CPUState *env, uint32_t value)
322 cpu_abort(env, "mtc0 count\n");
325 void cpu_mips_store_compare(CPUState *env, uint32_t value)
327 cpu_abort(env, "mtc0 compare\n");
330 void cpu_mips_start_count(CPUState *env)
332 cpu_abort(env, "start count\n");
335 void cpu_mips_stop_count(CPUState *env)
337 cpu_abort(env, "stop count\n");
340 void cpu_mips_update_irq(CPUState *env)
342 cpu_abort(env, "mtc0 status / mtc0 cause\n");
345 void do_mtc0_status_debug(uint32_t old, uint32_t val)
347 cpu_abort(env, "mtc0 status debug\n");
350 void do_mtc0_status_irqraise_debug (void)
352 cpu_abort(env, "mtc0 status irqraise debug\n");
355 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
357 cpu_abort(env, "mips_tlb_flush\n");
360 #else
362 /* CP0 helpers */
363 void do_mfc0_random (void)
365 T0 = (int32_t)cpu_mips_get_random(env);
368 void do_mfc0_count (void)
370 T0 = (int32_t)cpu_mips_get_count(env);
373 void do_mtc0_status_debug(uint32_t old, uint32_t val)
375 fprintf(logfile, "Status %08x (%08x) => %08x (%08x) Cause %08x",
376 old, old & env->CP0_Cause & CP0Ca_IP_mask,
377 val, val & env->CP0_Cause & CP0Ca_IP_mask,
378 env->CP0_Cause);
379 switch (env->hflags & MIPS_HFLAG_KSU) {
380 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
381 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
382 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
383 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
387 void do_mtc0_status_irqraise_debug(void)
389 fprintf(logfile, "Raise pending IRQs\n");
392 void fpu_handle_exception(void)
394 #ifdef CONFIG_SOFTFLOAT
395 int flags = get_float_exception_flags(&env->fpu->fp_status);
396 unsigned int cpuflags = 0, enable, cause = 0;
398 enable = GET_FP_ENABLE(env->fpu->fcr31);
400 /* determine current flags */
401 if (flags & float_flag_invalid) {
402 cpuflags |= FP_INVALID;
403 cause |= FP_INVALID & enable;
405 if (flags & float_flag_divbyzero) {
406 cpuflags |= FP_DIV0;
407 cause |= FP_DIV0 & enable;
409 if (flags & float_flag_overflow) {
410 cpuflags |= FP_OVERFLOW;
411 cause |= FP_OVERFLOW & enable;
413 if (flags & float_flag_underflow) {
414 cpuflags |= FP_UNDERFLOW;
415 cause |= FP_UNDERFLOW & enable;
417 if (flags & float_flag_inexact) {
418 cpuflags |= FP_INEXACT;
419 cause |= FP_INEXACT & enable;
421 SET_FP_FLAGS(env->fpu->fcr31, cpuflags);
422 SET_FP_CAUSE(env->fpu->fcr31, cause);
423 #else
424 SET_FP_FLAGS(env->fpu->fcr31, 0);
425 SET_FP_CAUSE(env->fpu->fcr31, 0);
426 #endif
429 /* TLB management */
430 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
432 /* Flush qemu's TLB and discard all shadowed entries. */
433 tlb_flush (env, flush_global);
434 env->tlb->tlb_in_use = env->tlb->nb_tlb;
437 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
439 /* Discard entries from env->tlb[first] onwards. */
440 while (env->tlb->tlb_in_use > first) {
441 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
445 static void r4k_fill_tlb (int idx)
447 r4k_tlb_t *tlb;
449 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
450 tlb = &env->tlb->mmu.r4k.tlb[idx];
451 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
452 #if defined(TARGET_MIPS64)
453 tlb->VPN &= env->SEGMask;
454 #endif
455 tlb->ASID = env->CP0_EntryHi & 0xFF;
456 tlb->PageMask = env->CP0_PageMask;
457 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
458 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
459 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
460 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
461 tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
462 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
463 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
464 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
465 tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
468 void r4k_do_tlbwi (void)
470 /* Discard cached TLB entries. We could avoid doing this if the
471 tlbwi is just upgrading access permissions on the current entry;
472 that might be a further win. */
473 r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
475 r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb, 0);
476 r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
479 void r4k_do_tlbwr (void)
481 int r = cpu_mips_get_random(env);
483 r4k_invalidate_tlb(env, r, 1);
484 r4k_fill_tlb(r);
487 void r4k_do_tlbp (void)
489 r4k_tlb_t *tlb;
490 target_ulong mask;
491 target_ulong tag;
492 target_ulong VPN;
493 uint8_t ASID;
494 int i;
496 ASID = env->CP0_EntryHi & 0xFF;
497 for (i = 0; i < env->tlb->nb_tlb; i++) {
498 tlb = &env->tlb->mmu.r4k.tlb[i];
499 /* 1k pages are not supported. */
500 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
501 tag = env->CP0_EntryHi & ~mask;
502 VPN = tlb->VPN & ~mask;
503 /* Check ASID, virtual page number & size */
504 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
505 /* TLB match */
506 env->CP0_Index = i;
507 break;
510 if (i == env->tlb->nb_tlb) {
511 /* No match. Discard any shadow entries, if any of them match. */
512 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
513 tlb = &env->tlb->mmu.r4k.tlb[i];
514 /* 1k pages are not supported. */
515 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
516 tag = env->CP0_EntryHi & ~mask;
517 VPN = tlb->VPN & ~mask;
518 /* Check ASID, virtual page number & size */
519 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
520 r4k_mips_tlb_flush_extra (env, i);
521 break;
525 env->CP0_Index |= 0x80000000;
529 void r4k_do_tlbr (void)
531 r4k_tlb_t *tlb;
532 uint8_t ASID;
534 ASID = env->CP0_EntryHi & 0xFF;
535 tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
537 /* If this will change the current ASID, flush qemu's TLB. */
538 if (ASID != tlb->ASID)
539 cpu_mips_tlb_flush (env, 1);
541 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
543 env->CP0_EntryHi = tlb->VPN | tlb->ASID;
544 env->CP0_PageMask = tlb->PageMask;
545 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
546 (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
547 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
548 (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
551 #endif /* !CONFIG_USER_ONLY */
553 void dump_ldst (const unsigned char *func)
555 if (loglevel)
556 fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);
559 void dump_sc (void)
561 if (loglevel) {
562 fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,
563 T1, T0, env->CP0_LLAddr);
567 void debug_pre_eret (void)
569 fprintf(logfile, "ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
570 env->PC[env->current_tc], env->CP0_EPC);
571 if (env->CP0_Status & (1 << CP0St_ERL))
572 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
573 if (env->hflags & MIPS_HFLAG_DM)
574 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
575 fputs("\n", logfile);
578 void debug_post_eret (void)
580 fprintf(logfile, " => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
581 env->PC[env->current_tc], env->CP0_EPC);
582 if (env->CP0_Status & (1 << CP0St_ERL))
583 fprintf(logfile, " ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
584 if (env->hflags & MIPS_HFLAG_DM)
585 fprintf(logfile, " DEPC " TARGET_FMT_lx, env->CP0_DEPC);
586 switch (env->hflags & MIPS_HFLAG_KSU) {
587 case MIPS_HFLAG_UM: fputs(", UM\n", logfile); break;
588 case MIPS_HFLAG_SM: fputs(", SM\n", logfile); break;
589 case MIPS_HFLAG_KM: fputs("\n", logfile); break;
590 default: cpu_abort(env, "Invalid MMU mode!\n"); break;
594 void do_pmon (int function)
596 function /= 2;
597 switch (function) {
598 case 2: /* TODO: char inbyte(int waitflag); */
599 if (env->gpr[env->current_tc][4] == 0)
600 env->gpr[env->current_tc][2] = -1;
601 /* Fall through */
602 case 11: /* TODO: char inbyte (void); */
603 env->gpr[env->current_tc][2] = -1;
604 break;
605 case 3:
606 case 12:
607 printf("%c", (char)(env->gpr[env->current_tc][4] & 0xFF));
608 break;
609 case 17:
610 break;
611 case 158:
613 unsigned char *fmt = (void *)(unsigned long)env->gpr[env->current_tc][4];
614 printf("%s", fmt);
616 break;
620 #if !defined(CONFIG_USER_ONLY)
622 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
624 #define MMUSUFFIX _mmu
625 #define ALIGNED_ONLY
627 #define SHIFT 0
628 #include "softmmu_template.h"
630 #define SHIFT 1
631 #include "softmmu_template.h"
633 #define SHIFT 2
634 #include "softmmu_template.h"
636 #define SHIFT 3
637 #include "softmmu_template.h"
639 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
641 env->CP0_BadVAddr = addr;
642 do_restore_state (retaddr);
643 do_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
646 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
648 TranslationBlock *tb;
649 CPUState *saved_env;
650 unsigned long pc;
651 int ret;
653 /* XXX: hack to restore env in all cases, even if not called from
654 generated code */
655 saved_env = env;
656 env = cpu_single_env;
657 ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
658 if (ret) {
659 if (retaddr) {
660 /* now we have a real cpu fault */
661 pc = (unsigned long)retaddr;
662 tb = tb_find_pc(pc);
663 if (tb) {
664 /* the PC is inside the translated code. It means that we have
665 a virtual CPU fault */
666 cpu_restore_state(tb, env, pc, NULL);
669 do_raise_exception_err(env->exception_index, env->error_code);
671 env = saved_env;
674 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
675 int unused)
677 if (is_exec)
678 do_raise_exception(EXCP_IBE);
679 else
680 do_raise_exception(EXCP_DBE);
682 #endif
684 /* Complex FPU operations which may need stack space. */
686 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
687 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
688 #define FLOAT_TWO32 make_float32(1 << 30)
689 #define FLOAT_TWO64 make_float64(1ULL << 62)
690 #define FLOAT_QNAN32 0x7fbfffff
691 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
692 #define FLOAT_SNAN32 0x7fffffff
693 #define FLOAT_SNAN64 0x7fffffffffffffffULL
695 /* convert MIPS rounding mode in FCR31 to IEEE library */
696 unsigned int ieee_rm[] = {
697 float_round_nearest_even,
698 float_round_to_zero,
699 float_round_up,
700 float_round_down
703 #define RESTORE_ROUNDING_MODE \
704 set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
706 void do_cfc1 (int reg)
708 switch (reg) {
709 case 0:
710 T0 = (int32_t)env->fpu->fcr0;
711 break;
712 case 25:
713 T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
714 break;
715 case 26:
716 T0 = env->fpu->fcr31 & 0x0003f07c;
717 break;
718 case 28:
719 T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
720 break;
721 default:
722 T0 = (int32_t)env->fpu->fcr31;
723 break;
727 void do_ctc1 (int reg)
729 switch(reg) {
730 case 25:
731 if (T0 & 0xffffff00)
732 return;
733 env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) |
734 ((T0 & 0x1) << 23);
735 break;
736 case 26:
737 if (T0 & 0x007c0000)
738 return;
739 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c);
740 break;
741 case 28:
742 if (T0 & 0x007c0000)
743 return;
744 env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) |
745 ((T0 & 0x4) << 22);
746 break;
747 case 31:
748 if (T0 & 0x007c0000)
749 return;
750 env->fpu->fcr31 = T0;
751 break;
752 default:
753 return;
755 /* set rounding mode */
756 RESTORE_ROUNDING_MODE;
757 set_float_exception_flags(0, &env->fpu->fp_status);
758 if ((GET_FP_ENABLE(env->fpu->fcr31) | 0x20) & GET_FP_CAUSE(env->fpu->fcr31))
759 do_raise_exception(EXCP_FPE);
762 static always_inline char ieee_ex_to_mips(char xcpt)
764 return (xcpt & float_flag_inexact) >> 5 |
765 (xcpt & float_flag_underflow) >> 3 |
766 (xcpt & float_flag_overflow) >> 1 |
767 (xcpt & float_flag_divbyzero) << 1 |
768 (xcpt & float_flag_invalid) << 4;
771 static always_inline char mips_ex_to_ieee(char xcpt)
773 return (xcpt & FP_INEXACT) << 5 |
774 (xcpt & FP_UNDERFLOW) << 3 |
775 (xcpt & FP_OVERFLOW) << 1 |
776 (xcpt & FP_DIV0) >> 1 |
777 (xcpt & FP_INVALID) >> 4;
780 static always_inline void update_fcr31(void)
782 int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->fpu->fp_status));
784 SET_FP_CAUSE(env->fpu->fcr31, tmp);
785 if (GET_FP_ENABLE(env->fpu->fcr31) & tmp)
786 do_raise_exception(EXCP_FPE);
787 else
788 UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
791 #define FLOAT_OP(name, p) void do_float_##name##_##p(void)
793 FLOAT_OP(cvtd, s)
795 set_float_exception_flags(0, &env->fpu->fp_status);
796 FDT2 = float32_to_float64(FST0, &env->fpu->fp_status);
797 update_fcr31();
799 FLOAT_OP(cvtd, w)
801 set_float_exception_flags(0, &env->fpu->fp_status);
802 FDT2 = int32_to_float64(WT0, &env->fpu->fp_status);
803 update_fcr31();
805 FLOAT_OP(cvtd, l)
807 set_float_exception_flags(0, &env->fpu->fp_status);
808 FDT2 = int64_to_float64(DT0, &env->fpu->fp_status);
809 update_fcr31();
811 FLOAT_OP(cvtl, d)
813 set_float_exception_flags(0, &env->fpu->fp_status);
814 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
815 update_fcr31();
816 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
817 DT2 = FLOAT_SNAN64;
819 FLOAT_OP(cvtl, s)
821 set_float_exception_flags(0, &env->fpu->fp_status);
822 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
823 update_fcr31();
824 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
825 DT2 = FLOAT_SNAN64;
828 FLOAT_OP(cvtps, pw)
830 set_float_exception_flags(0, &env->fpu->fp_status);
831 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
832 FSTH2 = int32_to_float32(WTH0, &env->fpu->fp_status);
833 update_fcr31();
835 FLOAT_OP(cvtpw, ps)
837 set_float_exception_flags(0, &env->fpu->fp_status);
838 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
839 WTH2 = float32_to_int32(FSTH0, &env->fpu->fp_status);
840 update_fcr31();
841 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
842 WT2 = FLOAT_SNAN32;
844 FLOAT_OP(cvts, d)
846 set_float_exception_flags(0, &env->fpu->fp_status);
847 FST2 = float64_to_float32(FDT0, &env->fpu->fp_status);
848 update_fcr31();
850 FLOAT_OP(cvts, w)
852 set_float_exception_flags(0, &env->fpu->fp_status);
853 FST2 = int32_to_float32(WT0, &env->fpu->fp_status);
854 update_fcr31();
856 FLOAT_OP(cvts, l)
858 set_float_exception_flags(0, &env->fpu->fp_status);
859 FST2 = int64_to_float32(DT0, &env->fpu->fp_status);
860 update_fcr31();
862 FLOAT_OP(cvts, pl)
864 set_float_exception_flags(0, &env->fpu->fp_status);
865 WT2 = WT0;
866 update_fcr31();
868 FLOAT_OP(cvts, pu)
870 set_float_exception_flags(0, &env->fpu->fp_status);
871 WT2 = WTH0;
872 update_fcr31();
874 FLOAT_OP(cvtw, s)
876 set_float_exception_flags(0, &env->fpu->fp_status);
877 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
878 update_fcr31();
879 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
880 WT2 = FLOAT_SNAN32;
882 FLOAT_OP(cvtw, d)
884 set_float_exception_flags(0, &env->fpu->fp_status);
885 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
886 update_fcr31();
887 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
888 WT2 = FLOAT_SNAN32;
891 FLOAT_OP(roundl, d)
893 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
894 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
895 RESTORE_ROUNDING_MODE;
896 update_fcr31();
897 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
898 DT2 = FLOAT_SNAN64;
900 FLOAT_OP(roundl, s)
902 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
903 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
904 RESTORE_ROUNDING_MODE;
905 update_fcr31();
906 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
907 DT2 = FLOAT_SNAN64;
909 FLOAT_OP(roundw, d)
911 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
912 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
913 RESTORE_ROUNDING_MODE;
914 update_fcr31();
915 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
916 WT2 = FLOAT_SNAN32;
918 FLOAT_OP(roundw, s)
920 set_float_rounding_mode(float_round_nearest_even, &env->fpu->fp_status);
921 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
922 RESTORE_ROUNDING_MODE;
923 update_fcr31();
924 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
925 WT2 = FLOAT_SNAN32;
928 FLOAT_OP(truncl, d)
930 DT2 = float64_to_int64_round_to_zero(FDT0, &env->fpu->fp_status);
931 update_fcr31();
932 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
933 DT2 = FLOAT_SNAN64;
935 FLOAT_OP(truncl, s)
937 DT2 = float32_to_int64_round_to_zero(FST0, &env->fpu->fp_status);
938 update_fcr31();
939 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
940 DT2 = FLOAT_SNAN64;
942 FLOAT_OP(truncw, d)
944 WT2 = float64_to_int32_round_to_zero(FDT0, &env->fpu->fp_status);
945 update_fcr31();
946 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
947 WT2 = FLOAT_SNAN32;
949 FLOAT_OP(truncw, s)
951 WT2 = float32_to_int32_round_to_zero(FST0, &env->fpu->fp_status);
952 update_fcr31();
953 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
954 WT2 = FLOAT_SNAN32;
957 FLOAT_OP(ceill, d)
959 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
960 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
961 RESTORE_ROUNDING_MODE;
962 update_fcr31();
963 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
964 DT2 = FLOAT_SNAN64;
966 FLOAT_OP(ceill, s)
968 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
969 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
970 RESTORE_ROUNDING_MODE;
971 update_fcr31();
972 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
973 DT2 = FLOAT_SNAN64;
975 FLOAT_OP(ceilw, d)
977 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
978 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
979 RESTORE_ROUNDING_MODE;
980 update_fcr31();
981 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
982 WT2 = FLOAT_SNAN32;
984 FLOAT_OP(ceilw, s)
986 set_float_rounding_mode(float_round_up, &env->fpu->fp_status);
987 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
988 RESTORE_ROUNDING_MODE;
989 update_fcr31();
990 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
991 WT2 = FLOAT_SNAN32;
994 FLOAT_OP(floorl, d)
996 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
997 DT2 = float64_to_int64(FDT0, &env->fpu->fp_status);
998 RESTORE_ROUNDING_MODE;
999 update_fcr31();
1000 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1001 DT2 = FLOAT_SNAN64;
1003 FLOAT_OP(floorl, s)
1005 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1006 DT2 = float32_to_int64(FST0, &env->fpu->fp_status);
1007 RESTORE_ROUNDING_MODE;
1008 update_fcr31();
1009 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1010 DT2 = FLOAT_SNAN64;
1012 FLOAT_OP(floorw, d)
1014 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1015 WT2 = float64_to_int32(FDT0, &env->fpu->fp_status);
1016 RESTORE_ROUNDING_MODE;
1017 update_fcr31();
1018 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1019 WT2 = FLOAT_SNAN32;
1021 FLOAT_OP(floorw, s)
1023 set_float_rounding_mode(float_round_down, &env->fpu->fp_status);
1024 WT2 = float32_to_int32(FST0, &env->fpu->fp_status);
1025 RESTORE_ROUNDING_MODE;
1026 update_fcr31();
1027 if (GET_FP_CAUSE(env->fpu->fcr31) & (FP_OVERFLOW | FP_INVALID))
1028 WT2 = FLOAT_SNAN32;
1031 /* MIPS specific unary operations */
1032 FLOAT_OP(recip, d)
1034 set_float_exception_flags(0, &env->fpu->fp_status);
1035 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1036 update_fcr31();
1038 FLOAT_OP(recip, s)
1040 set_float_exception_flags(0, &env->fpu->fp_status);
1041 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1042 update_fcr31();
1045 FLOAT_OP(rsqrt, d)
1047 set_float_exception_flags(0, &env->fpu->fp_status);
1048 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1049 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1050 update_fcr31();
1052 FLOAT_OP(rsqrt, s)
1054 set_float_exception_flags(0, &env->fpu->fp_status);
1055 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1056 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1057 update_fcr31();
1060 FLOAT_OP(recip1, d)
1062 set_float_exception_flags(0, &env->fpu->fp_status);
1063 FDT2 = float64_div(FLOAT_ONE64, FDT0, &env->fpu->fp_status);
1064 update_fcr31();
1066 FLOAT_OP(recip1, s)
1068 set_float_exception_flags(0, &env->fpu->fp_status);
1069 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1070 update_fcr31();
1072 FLOAT_OP(recip1, ps)
1074 set_float_exception_flags(0, &env->fpu->fp_status);
1075 FST2 = float32_div(FLOAT_ONE32, FST0, &env->fpu->fp_status);
1076 FSTH2 = float32_div(FLOAT_ONE32, FSTH0, &env->fpu->fp_status);
1077 update_fcr31();
1080 FLOAT_OP(rsqrt1, d)
1082 set_float_exception_flags(0, &env->fpu->fp_status);
1083 FDT2 = float64_sqrt(FDT0, &env->fpu->fp_status);
1084 FDT2 = float64_div(FLOAT_ONE64, FDT2, &env->fpu->fp_status);
1085 update_fcr31();
1087 FLOAT_OP(rsqrt1, s)
1089 set_float_exception_flags(0, &env->fpu->fp_status);
1090 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1091 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1092 update_fcr31();
1094 FLOAT_OP(rsqrt1, ps)
1096 set_float_exception_flags(0, &env->fpu->fp_status);
1097 FST2 = float32_sqrt(FST0, &env->fpu->fp_status);
1098 FSTH2 = float32_sqrt(FSTH0, &env->fpu->fp_status);
1099 FST2 = float32_div(FLOAT_ONE32, FST2, &env->fpu->fp_status);
1100 FSTH2 = float32_div(FLOAT_ONE32, FSTH2, &env->fpu->fp_status);
1101 update_fcr31();
1104 /* binary operations */
1105 #define FLOAT_BINOP(name) \
1106 FLOAT_OP(name, d) \
1108 set_float_exception_flags(0, &env->fpu->fp_status); \
1109 FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \
1110 update_fcr31(); \
1111 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1112 DT2 = FLOAT_QNAN64; \
1114 FLOAT_OP(name, s) \
1116 set_float_exception_flags(0, &env->fpu->fp_status); \
1117 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1118 update_fcr31(); \
1119 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1120 WT2 = FLOAT_QNAN32; \
1122 FLOAT_OP(name, ps) \
1124 set_float_exception_flags(0, &env->fpu->fp_status); \
1125 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1126 FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
1127 update_fcr31(); \
1128 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
1129 WT2 = FLOAT_QNAN32; \
1130 WTH2 = FLOAT_QNAN32; \
1133 FLOAT_BINOP(add)
1134 FLOAT_BINOP(sub)
1135 FLOAT_BINOP(mul)
1136 FLOAT_BINOP(div)
1137 #undef FLOAT_BINOP
1139 /* MIPS specific binary operations */
1140 FLOAT_OP(recip2, d)
1142 set_float_exception_flags(0, &env->fpu->fp_status);
1143 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1144 FDT2 = float64_chs(float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status));
1145 update_fcr31();
1147 FLOAT_OP(recip2, s)
1149 set_float_exception_flags(0, &env->fpu->fp_status);
1150 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1151 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
1152 update_fcr31();
1154 FLOAT_OP(recip2, ps)
1156 set_float_exception_flags(0, &env->fpu->fp_status);
1157 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1158 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1159 FST2 = float32_chs(float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status));
1160 FSTH2 = float32_chs(float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status));
1161 update_fcr31();
1164 FLOAT_OP(rsqrt2, d)
1166 set_float_exception_flags(0, &env->fpu->fp_status);
1167 FDT2 = float64_mul(FDT0, FDT2, &env->fpu->fp_status);
1168 FDT2 = float64_sub(FDT2, FLOAT_ONE64, &env->fpu->fp_status);
1169 FDT2 = float64_chs(float64_div(FDT2, FLOAT_TWO64, &env->fpu->fp_status));
1170 update_fcr31();
1172 FLOAT_OP(rsqrt2, s)
1174 set_float_exception_flags(0, &env->fpu->fp_status);
1175 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1176 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1177 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
1178 update_fcr31();
1180 FLOAT_OP(rsqrt2, ps)
1182 set_float_exception_flags(0, &env->fpu->fp_status);
1183 FST2 = float32_mul(FST0, FST2, &env->fpu->fp_status);
1184 FSTH2 = float32_mul(FSTH0, FSTH2, &env->fpu->fp_status);
1185 FST2 = float32_sub(FST2, FLOAT_ONE32, &env->fpu->fp_status);
1186 FSTH2 = float32_sub(FSTH2, FLOAT_ONE32, &env->fpu->fp_status);
1187 FST2 = float32_chs(float32_div(FST2, FLOAT_TWO32, &env->fpu->fp_status));
1188 FSTH2 = float32_chs(float32_div(FSTH2, FLOAT_TWO32, &env->fpu->fp_status));
1189 update_fcr31();
1192 FLOAT_OP(addr, ps)
1194 set_float_exception_flags(0, &env->fpu->fp_status);
1195 FST2 = float32_add (FST0, FSTH0, &env->fpu->fp_status);
1196 FSTH2 = float32_add (FST1, FSTH1, &env->fpu->fp_status);
1197 update_fcr31();
1200 FLOAT_OP(mulr, ps)
1202 set_float_exception_flags(0, &env->fpu->fp_status);
1203 FST2 = float32_mul (FST0, FSTH0, &env->fpu->fp_status);
1204 FSTH2 = float32_mul (FST1, FSTH1, &env->fpu->fp_status);
1205 update_fcr31();
1208 /* compare operations */
1209 #define FOP_COND_D(op, cond) \
1210 void do_cmp_d_ ## op (long cc) \
1212 int c = cond; \
1213 update_fcr31(); \
1214 if (c) \
1215 SET_FP_COND(cc, env->fpu); \
1216 else \
1217 CLEAR_FP_COND(cc, env->fpu); \
1219 void do_cmpabs_d_ ## op (long cc) \
1221 int c; \
1222 FDT0 = float64_abs(FDT0); \
1223 FDT1 = float64_abs(FDT1); \
1224 c = cond; \
1225 update_fcr31(); \
1226 if (c) \
1227 SET_FP_COND(cc, env->fpu); \
1228 else \
1229 CLEAR_FP_COND(cc, env->fpu); \
1232 int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
1234 if (float64_is_signaling_nan(a) ||
1235 float64_is_signaling_nan(b) ||
1236 (sig && (float64_is_nan(a) || float64_is_nan(b)))) {
1237 float_raise(float_flag_invalid, status);
1238 return 1;
1239 } else if (float64_is_nan(a) || float64_is_nan(b)) {
1240 return 1;
1241 } else {
1242 return 0;
1246 /* NOTE: the comma operator will make "cond" to eval to false,
1247 * but float*_is_unordered() is still called. */
1248 FOP_COND_D(f, (float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status), 0))
1249 FOP_COND_D(un, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status))
1250 FOP_COND_D(eq, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1251 FOP_COND_D(ueq, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1252 FOP_COND_D(olt, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1253 FOP_COND_D(ult, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1254 FOP_COND_D(ole, !float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1255 FOP_COND_D(ule, float64_is_unordered(0, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1256 /* NOTE: the comma operator will make "cond" to eval to false,
1257 * but float*_is_unordered() is still called. */
1258 FOP_COND_D(sf, (float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status), 0))
1259 FOP_COND_D(ngle,float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status))
1260 FOP_COND_D(seq, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1261 FOP_COND_D(ngl, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_eq(FDT0, FDT1, &env->fpu->fp_status))
1262 FOP_COND_D(lt, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1263 FOP_COND_D(nge, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_lt(FDT0, FDT1, &env->fpu->fp_status))
1264 FOP_COND_D(le, !float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) && float64_le(FDT0, FDT1, &env->fpu->fp_status))
1265 FOP_COND_D(ngt, float64_is_unordered(1, FDT1, FDT0, &env->fpu->fp_status) || float64_le(FDT0, FDT1, &env->fpu->fp_status))
1267 #define FOP_COND_S(op, cond) \
1268 void do_cmp_s_ ## op (long cc) \
1270 int c = cond; \
1271 update_fcr31(); \
1272 if (c) \
1273 SET_FP_COND(cc, env->fpu); \
1274 else \
1275 CLEAR_FP_COND(cc, env->fpu); \
1277 void do_cmpabs_s_ ## op (long cc) \
1279 int c; \
1280 FST0 = float32_abs(FST0); \
1281 FST1 = float32_abs(FST1); \
1282 c = cond; \
1283 update_fcr31(); \
1284 if (c) \
1285 SET_FP_COND(cc, env->fpu); \
1286 else \
1287 CLEAR_FP_COND(cc, env->fpu); \
1290 flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
1292 if (float32_is_signaling_nan(a) ||
1293 float32_is_signaling_nan(b) ||
1294 (sig && (float32_is_nan(a) || float32_is_nan(b)))) {
1295 float_raise(float_flag_invalid, status);
1296 return 1;
1297 } else if (float32_is_nan(a) || float32_is_nan(b)) {
1298 return 1;
1299 } else {
1300 return 0;
1304 /* NOTE: the comma operator will make "cond" to eval to false,
1305 * but float*_is_unordered() is still called. */
1306 FOP_COND_S(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0))
1307 FOP_COND_S(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status))
1308 FOP_COND_S(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1309 FOP_COND_S(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
1310 FOP_COND_S(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1311 FOP_COND_S(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
1312 FOP_COND_S(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1313 FOP_COND_S(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
1314 /* NOTE: the comma operator will make "cond" to eval to false,
1315 * but float*_is_unordered() is still called. */
1316 FOP_COND_S(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0))
1317 FOP_COND_S(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status))
1318 FOP_COND_S(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status))
1319 FOP_COND_S(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status))
1320 FOP_COND_S(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status))
1321 FOP_COND_S(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status))
1322 FOP_COND_S(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status))
1323 FOP_COND_S(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status))
1325 #define FOP_COND_PS(op, condl, condh) \
1326 void do_cmp_ps_ ## op (long cc) \
1328 int cl = condl; \
1329 int ch = condh; \
1330 update_fcr31(); \
1331 if (cl) \
1332 SET_FP_COND(cc, env->fpu); \
1333 else \
1334 CLEAR_FP_COND(cc, env->fpu); \
1335 if (ch) \
1336 SET_FP_COND(cc + 1, env->fpu); \
1337 else \
1338 CLEAR_FP_COND(cc + 1, env->fpu); \
1340 void do_cmpabs_ps_ ## op (long cc) \
1342 int cl, ch; \
1343 FST0 = float32_abs(FST0); \
1344 FSTH0 = float32_abs(FSTH0); \
1345 FST1 = float32_abs(FST1); \
1346 FSTH1 = float32_abs(FSTH1); \
1347 cl = condl; \
1348 ch = condh; \
1349 update_fcr31(); \
1350 if (cl) \
1351 SET_FP_COND(cc, env->fpu); \
1352 else \
1353 CLEAR_FP_COND(cc, env->fpu); \
1354 if (ch) \
1355 SET_FP_COND(cc + 1, env->fpu); \
1356 else \
1357 CLEAR_FP_COND(cc + 1, env->fpu); \
1360 /* NOTE: the comma operator will make "cond" to eval to false,
1361 * but float*_is_unordered() is still called. */
1362 FOP_COND_PS(f, (float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status), 0),
1363 (float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1364 FOP_COND_PS(un, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status),
1365 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status))
1366 FOP_COND_PS(eq, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
1367 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1368 FOP_COND_PS(ueq, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
1369 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1370 FOP_COND_PS(olt, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
1371 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1372 FOP_COND_PS(ult, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
1373 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1374 FOP_COND_PS(ole, !float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
1375 !float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1376 FOP_COND_PS(ule, float32_is_unordered(0, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
1377 float32_is_unordered(0, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1378 /* NOTE: the comma operator will make "cond" to eval to false,
1379 * but float*_is_unordered() is still called. */
1380 FOP_COND_PS(sf, (float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status), 0),
1381 (float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status), 0))
1382 FOP_COND_PS(ngle,float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status),
1383 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status))
1384 FOP_COND_PS(seq, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_eq(FST0, FST1, &env->fpu->fp_status),
1385 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1386 FOP_COND_PS(ngl, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_eq(FST0, FST1, &env->fpu->fp_status),
1387 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_eq(FSTH0, FSTH1, &env->fpu->fp_status))
1388 FOP_COND_PS(lt, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_lt(FST0, FST1, &env->fpu->fp_status),
1389 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1390 FOP_COND_PS(nge, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_lt(FST0, FST1, &env->fpu->fp_status),
1391 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_lt(FSTH0, FSTH1, &env->fpu->fp_status))
1392 FOP_COND_PS(le, !float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) && float32_le(FST0, FST1, &env->fpu->fp_status),
1393 !float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) && float32_le(FSTH0, FSTH1, &env->fpu->fp_status))
1394 FOP_COND_PS(ngt, float32_is_unordered(1, FST1, FST0, &env->fpu->fp_status) || float32_le(FST0, FST1, &env->fpu->fp_status),
1395 float32_is_unordered(1, FSTH1, FSTH0, &env->fpu->fp_status) || float32_le(FSTH0, FSTH1, &env->fpu->fp_status))