2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #define GETPC() (__builtin_return_address(0))
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
28 void do_raise_exception_err (uint32_t exception
, int error_code
)
31 if (logfile
&& exception
< 0x100)
32 fprintf(logfile
, "%s: %d %d\n", __func__
, exception
, error_code
);
34 env
->exception_index
= exception
;
35 env
->error_code
= error_code
;
40 void do_raise_exception (uint32_t exception
)
42 do_raise_exception_err(exception
, 0);
45 void do_restore_state (void *pc_ptr
)
48 unsigned long pc
= (unsigned long) pc_ptr
;
51 cpu_restore_state (tb
, env
, pc
, NULL
);
54 void do_raise_exception_direct_err (uint32_t exception
, int error_code
)
56 do_restore_state (GETPC ());
57 do_raise_exception_err (exception
, error_code
);
60 void do_raise_exception_direct (uint32_t exception
)
62 do_raise_exception_direct_err (exception
, 0);
65 #if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
66 #if TARGET_LONG_BITS > HOST_LONG_BITS
67 /* Those might call libgcc functions. */
80 T0
= (int64_t)T0
>> T1
;
85 T0
= (int64_t)T0
>> (T1
+ 32);
103 tmp
= T0
<< (0x40 - T1
);
104 T0
= (T0
>> T1
) | tmp
;
108 void do_drotr32 (void)
113 tmp
= T0
<< (0x40 - (32 + T1
));
114 T0
= (T0
>> (32 + T1
)) | tmp
;
120 T0
= T1
<< (T0
& 0x3F);
125 T0
= (int64_t)T1
>> (T0
& 0x3F);
130 T0
= T1
>> (T0
& 0x3F);
133 void do_drotrv (void)
139 tmp
= T1
<< (0x40 - T0
);
140 T0
= (T1
>> T0
) | tmp
;
144 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
145 #endif /* TARGET_MIPSN32 || TARGET_MIPS64 */
147 /* 64 bits arithmetic for 32 bits hosts */
148 #if TARGET_LONG_BITS > HOST_LONG_BITS
149 static always_inline
uint64_t get_HILO (void)
151 return (env
->HI
[0][env
->current_tc
] << 32) | (uint32_t)env
->LO
[0][env
->current_tc
];
154 static always_inline
void set_HILO (uint64_t HILO
)
156 env
->LO
[0][env
->current_tc
] = (int32_t)HILO
;
157 env
->HI
[0][env
->current_tc
] = (int32_t)(HILO
>> 32);
162 set_HILO((int64_t)(int32_t)T0
* (int64_t)(int32_t)T1
);
167 set_HILO((uint64_t)(uint32_t)T0
* (uint64_t)(uint32_t)T1
);
174 tmp
= ((int64_t)(int32_t)T0
* (int64_t)(int32_t)T1
);
175 set_HILO((int64_t)get_HILO() + tmp
);
182 tmp
= ((uint64_t)(uint32_t)T0
* (uint64_t)(uint32_t)T1
);
183 set_HILO(get_HILO() + tmp
);
190 tmp
= ((int64_t)(int32_t)T0
* (int64_t)(int32_t)T1
);
191 set_HILO((int64_t)get_HILO() - tmp
);
198 tmp
= ((uint64_t)(uint32_t)T0
* (uint64_t)(uint32_t)T1
);
199 set_HILO(get_HILO() - tmp
);
203 #if HOST_LONG_BITS < 64
206 /* 64bit datatypes because we may see overflow/underflow. */
208 env
->LO
[0][env
->current_tc
] = (int32_t)((int64_t)(int32_t)T0
/ (int32_t)T1
);
209 env
->HI
[0][env
->current_tc
] = (int32_t)((int64_t)(int32_t)T0
% (int32_t)T1
);
214 #if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
218 lldiv_t res
= lldiv((int64_t)T0
, (int64_t)T1
);
219 env
->LO
[0][env
->current_tc
] = res
.quot
;
220 env
->HI
[0][env
->current_tc
] = res
.rem
;
224 #if TARGET_LONG_BITS > HOST_LONG_BITS
228 env
->LO
[0][env
->current_tc
] = T0
/ T1
;
229 env
->HI
[0][env
->current_tc
] = T0
% T1
;
233 #endif /* TARGET_MIPSN32 || TARGET_MIPS64 */
235 #if defined(CONFIG_USER_ONLY)
236 void do_mfc0_random (void)
238 cpu_abort(env
, "mfc0 random\n");
241 void do_mfc0_count (void)
243 cpu_abort(env
, "mfc0 count\n");
246 void cpu_mips_store_count(CPUState
*env
, uint32_t value
)
248 cpu_abort(env
, "mtc0 count\n");
251 void cpu_mips_store_compare(CPUState
*env
, uint32_t value
)
253 cpu_abort(env
, "mtc0 compare\n");
256 void cpu_mips_start_count(CPUState
*env
)
258 cpu_abort(env
, "start count\n");
261 void cpu_mips_stop_count(CPUState
*env
)
263 cpu_abort(env
, "stop count\n");
266 void cpu_mips_update_irq(CPUState
*env
)
268 cpu_abort(env
, "mtc0 status / mtc0 cause\n");
271 void do_mtc0_status_debug(uint32_t old
, uint32_t val
)
273 cpu_abort(env
, "mtc0 status debug\n");
276 void do_mtc0_status_irqraise_debug (void)
278 cpu_abort(env
, "mtc0 status irqraise debug\n");
281 void cpu_mips_tlb_flush (CPUState
*env
, int flush_global
)
283 cpu_abort(env
, "mips_tlb_flush\n");
289 void do_mfc0_random (void)
291 T0
= (int32_t)cpu_mips_get_random(env
);
294 void do_mfc0_count (void)
296 T0
= (int32_t)cpu_mips_get_count(env
);
299 void do_mtc0_status_debug(uint32_t old
, uint32_t val
)
301 fprintf(logfile
, "Status %08x (%08x) => %08x (%08x) Cause %08x",
302 old
, old
& env
->CP0_Cause
& CP0Ca_IP_mask
,
303 val
, val
& env
->CP0_Cause
& CP0Ca_IP_mask
,
305 (env
->hflags
& MIPS_HFLAG_UM
) ? fputs(", UM\n", logfile
)
306 : fputs("\n", logfile
);
309 void do_mtc0_status_irqraise_debug(void)
311 fprintf(logfile
, "Raise pending IRQs\n");
314 void fpu_handle_exception(void)
316 #ifdef CONFIG_SOFTFLOAT
317 int flags
= get_float_exception_flags(&env
->fpu
->fp_status
);
318 unsigned int cpuflags
= 0, enable
, cause
= 0;
320 enable
= GET_FP_ENABLE(env
->fpu
->fcr31
);
322 /* determine current flags */
323 if (flags
& float_flag_invalid
) {
324 cpuflags
|= FP_INVALID
;
325 cause
|= FP_INVALID
& enable
;
327 if (flags
& float_flag_divbyzero
) {
329 cause
|= FP_DIV0
& enable
;
331 if (flags
& float_flag_overflow
) {
332 cpuflags
|= FP_OVERFLOW
;
333 cause
|= FP_OVERFLOW
& enable
;
335 if (flags
& float_flag_underflow
) {
336 cpuflags
|= FP_UNDERFLOW
;
337 cause
|= FP_UNDERFLOW
& enable
;
339 if (flags
& float_flag_inexact
) {
340 cpuflags
|= FP_INEXACT
;
341 cause
|= FP_INEXACT
& enable
;
343 SET_FP_FLAGS(env
->fpu
->fcr31
, cpuflags
);
344 SET_FP_CAUSE(env
->fpu
->fcr31
, cause
);
346 SET_FP_FLAGS(env
->fpu
->fcr31
, 0);
347 SET_FP_CAUSE(env
->fpu
->fcr31
, 0);
352 void cpu_mips_tlb_flush (CPUState
*env
, int flush_global
)
354 /* Flush qemu's TLB and discard all shadowed entries. */
355 tlb_flush (env
, flush_global
);
356 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
359 static void r4k_mips_tlb_flush_extra (CPUState
*env
, int first
)
361 /* Discard entries from env->tlb[first] onwards. */
362 while (env
->tlb
->tlb_in_use
> first
) {
363 r4k_invalidate_tlb(env
, --env
->tlb
->tlb_in_use
, 0);
367 static void r4k_fill_tlb (int idx
)
371 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
372 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
373 tlb
->VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
374 #if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
375 tlb
->VPN
&= env
->SEGMask
;
377 tlb
->ASID
= env
->CP0_EntryHi
& 0xFF;
378 tlb
->PageMask
= env
->CP0_PageMask
;
379 tlb
->G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
380 tlb
->V0
= (env
->CP0_EntryLo0
& 2) != 0;
381 tlb
->D0
= (env
->CP0_EntryLo0
& 4) != 0;
382 tlb
->C0
= (env
->CP0_EntryLo0
>> 3) & 0x7;
383 tlb
->PFN
[0] = (env
->CP0_EntryLo0
>> 6) << 12;
384 tlb
->V1
= (env
->CP0_EntryLo1
& 2) != 0;
385 tlb
->D1
= (env
->CP0_EntryLo1
& 4) != 0;
386 tlb
->C1
= (env
->CP0_EntryLo1
>> 3) & 0x7;
387 tlb
->PFN
[1] = (env
->CP0_EntryLo1
>> 6) << 12;
390 void r4k_do_tlbwi (void)
392 /* Discard cached TLB entries. We could avoid doing this if the
393 tlbwi is just upgrading access permissions on the current entry;
394 that might be a further win. */
395 r4k_mips_tlb_flush_extra (env
, env
->tlb
->nb_tlb
);
397 r4k_invalidate_tlb(env
, env
->CP0_Index
% env
->tlb
->nb_tlb
, 0);
398 r4k_fill_tlb(env
->CP0_Index
% env
->tlb
->nb_tlb
);
401 void r4k_do_tlbwr (void)
403 int r
= cpu_mips_get_random(env
);
405 r4k_invalidate_tlb(env
, r
, 1);
409 void r4k_do_tlbp (void)
418 ASID
= env
->CP0_EntryHi
& 0xFF;
419 for (i
= 0; i
< env
->tlb
->nb_tlb
; i
++) {
420 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
421 /* 1k pages are not supported. */
422 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
423 tag
= env
->CP0_EntryHi
& ~mask
;
424 VPN
= tlb
->VPN
& ~mask
;
425 /* Check ASID, virtual page number & size */
426 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
432 if (i
== env
->tlb
->nb_tlb
) {
433 /* No match. Discard any shadow entries, if any of them match. */
434 for (i
= env
->tlb
->nb_tlb
; i
< env
->tlb
->tlb_in_use
; i
++) {
435 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
436 /* 1k pages are not supported. */
437 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
438 tag
= env
->CP0_EntryHi
& ~mask
;
439 VPN
= tlb
->VPN
& ~mask
;
440 /* Check ASID, virtual page number & size */
441 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
442 r4k_mips_tlb_flush_extra (env
, i
);
447 env
->CP0_Index
|= 0x80000000;
451 void r4k_do_tlbr (void)
456 ASID
= env
->CP0_EntryHi
& 0xFF;
457 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[env
->CP0_Index
% env
->tlb
->nb_tlb
];
459 /* If this will change the current ASID, flush qemu's TLB. */
460 if (ASID
!= tlb
->ASID
)
461 cpu_mips_tlb_flush (env
, 1);
463 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
465 env
->CP0_EntryHi
= tlb
->VPN
| tlb
->ASID
;
466 env
->CP0_PageMask
= tlb
->PageMask
;
467 env
->CP0_EntryLo0
= tlb
->G
| (tlb
->V0
<< 1) | (tlb
->D0
<< 2) |
468 (tlb
->C0
<< 3) | (tlb
->PFN
[0] >> 6);
469 env
->CP0_EntryLo1
= tlb
->G
| (tlb
->V1
<< 1) | (tlb
->D1
<< 2) |
470 (tlb
->C1
<< 3) | (tlb
->PFN
[1] >> 6);
473 #endif /* !CONFIG_USER_ONLY */
475 void dump_ldst (const unsigned char *func
)
478 fprintf(logfile
, "%s => " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
, T0
, T1
);
484 fprintf(logfile
, "%s " TARGET_FMT_lx
" at " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", __func__
,
485 T1
, T0
, env
->CP0_LLAddr
);
489 void debug_pre_eret (void)
491 fprintf(logfile
, "ERET: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
492 env
->PC
[env
->current_tc
], env
->CP0_EPC
);
493 if (env
->CP0_Status
& (1 << CP0St_ERL
))
494 fprintf(logfile
, " ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
495 if (env
->hflags
& MIPS_HFLAG_DM
)
496 fprintf(logfile
, " DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
497 fputs("\n", logfile
);
500 void debug_post_eret (void)
502 fprintf(logfile
, " => PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
503 env
->PC
[env
->current_tc
], env
->CP0_EPC
);
504 if (env
->CP0_Status
& (1 << CP0St_ERL
))
505 fprintf(logfile
, " ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
506 if (env
->hflags
& MIPS_HFLAG_DM
)
507 fprintf(logfile
, " DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
508 if (env
->hflags
& MIPS_HFLAG_UM
)
509 fputs(", UM\n", logfile
);
511 fputs("\n", logfile
);
514 void do_pmon (int function
)
518 case 2: /* TODO: char inbyte(int waitflag); */
519 if (env
->gpr
[4][env
->current_tc
] == 0)
520 env
->gpr
[2][env
->current_tc
] = -1;
522 case 11: /* TODO: char inbyte (void); */
523 env
->gpr
[2][env
->current_tc
] = -1;
527 printf("%c", (char)(env
->gpr
[4][env
->current_tc
] & 0xFF));
533 unsigned char *fmt
= (void *)(unsigned long)env
->gpr
[4][env
->current_tc
];
540 #if !defined(CONFIG_USER_ONLY)
542 static void do_unaligned_access (target_ulong addr
, int is_write
, int is_user
, void *retaddr
);
544 #define MMUSUFFIX _mmu
548 #include "softmmu_template.h"
551 #include "softmmu_template.h"
554 #include "softmmu_template.h"
557 #include "softmmu_template.h"
559 static void do_unaligned_access (target_ulong addr
, int is_write
, int is_user
, void *retaddr
)
561 env
->CP0_BadVAddr
= addr
;
562 do_restore_state (retaddr
);
563 do_raise_exception ((is_write
== 1) ? EXCP_AdES
: EXCP_AdEL
);
566 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
568 TranslationBlock
*tb
;
573 /* XXX: hack to restore env in all cases, even if not called from
576 env
= cpu_single_env
;
577 ret
= cpu_mips_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
580 /* now we have a real cpu fault */
581 pc
= (unsigned long)retaddr
;
584 /* the PC is inside the translated code. It means that we have
585 a virtual CPU fault */
586 cpu_restore_state(tb
, env
, pc
, NULL
);
589 do_raise_exception_err(env
->exception_index
, env
->error_code
);
596 /* Complex FPU operations which may need stack space. */
598 #define FLOAT_SIGN32 (1 << 31)
599 #define FLOAT_SIGN64 (1ULL << 63)
600 #define FLOAT_ONE32 (0x3f8 << 20)
601 #define FLOAT_ONE64 (0x3ffULL << 52)
602 #define FLOAT_TWO32 (1 << 30)
603 #define FLOAT_TWO64 (1ULL << 62)
604 #define FLOAT_QNAN32 0x7fbfffff
605 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
606 #define FLOAT_SNAN32 0x7fffffff
607 #define FLOAT_SNAN64 0x7fffffffffffffffULL
609 /* convert MIPS rounding mode in FCR31 to IEEE library */
610 unsigned int ieee_rm
[] = {
611 float_round_nearest_even
,
617 #define RESTORE_ROUNDING_MODE \
618 set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
620 void do_cfc1 (int reg
)
624 T0
= (int32_t)env
->fpu
->fcr0
;
627 T0
= ((env
->fpu
->fcr31
>> 24) & 0xfe) | ((env
->fpu
->fcr31
>> 23) & 0x1);
630 T0
= env
->fpu
->fcr31
& 0x0003f07c;
633 T0
= (env
->fpu
->fcr31
& 0x00000f83) | ((env
->fpu
->fcr31
>> 22) & 0x4);
636 T0
= (int32_t)env
->fpu
->fcr31
;
641 void do_ctc1 (int reg
)
647 env
->fpu
->fcr31
= (env
->fpu
->fcr31
& 0x017fffff) | ((T0
& 0xfe) << 24) |
653 env
->fpu
->fcr31
= (env
->fpu
->fcr31
& 0xfffc0f83) | (T0
& 0x0003f07c);
658 env
->fpu
->fcr31
= (env
->fpu
->fcr31
& 0xfefff07c) | (T0
& 0x00000f83) |
664 env
->fpu
->fcr31
= T0
;
669 /* set rounding mode */
670 RESTORE_ROUNDING_MODE
;
671 set_float_exception_flags(0, &env
->fpu
->fp_status
);
672 if ((GET_FP_ENABLE(env
->fpu
->fcr31
) | 0x20) & GET_FP_CAUSE(env
->fpu
->fcr31
))
673 do_raise_exception(EXCP_FPE
);
676 static always_inline
char ieee_ex_to_mips(char xcpt
)
678 return (xcpt
& float_flag_inexact
) >> 5 |
679 (xcpt
& float_flag_underflow
) >> 3 |
680 (xcpt
& float_flag_overflow
) >> 1 |
681 (xcpt
& float_flag_divbyzero
) << 1 |
682 (xcpt
& float_flag_invalid
) << 4;
685 static always_inline
char mips_ex_to_ieee(char xcpt
)
687 return (xcpt
& FP_INEXACT
) << 5 |
688 (xcpt
& FP_UNDERFLOW
) << 3 |
689 (xcpt
& FP_OVERFLOW
) << 1 |
690 (xcpt
& FP_DIV0
) >> 1 |
691 (xcpt
& FP_INVALID
) >> 4;
694 static always_inline
void update_fcr31(void)
696 int tmp
= ieee_ex_to_mips(get_float_exception_flags(&env
->fpu
->fp_status
));
698 SET_FP_CAUSE(env
->fpu
->fcr31
, tmp
);
699 if (GET_FP_ENABLE(env
->fpu
->fcr31
) & tmp
)
700 do_raise_exception(EXCP_FPE
);
702 UPDATE_FP_FLAGS(env
->fpu
->fcr31
, tmp
);
705 #define FLOAT_OP(name, p) void do_float_##name##_##p(void)
709 set_float_exception_flags(0, &env
->fpu
->fp_status
);
710 FDT2
= float32_to_float64(FST0
, &env
->fpu
->fp_status
);
715 set_float_exception_flags(0, &env
->fpu
->fp_status
);
716 FDT2
= int32_to_float64(WT0
, &env
->fpu
->fp_status
);
721 set_float_exception_flags(0, &env
->fpu
->fp_status
);
722 FDT2
= int64_to_float64(DT0
, &env
->fpu
->fp_status
);
727 set_float_exception_flags(0, &env
->fpu
->fp_status
);
728 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
730 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
735 set_float_exception_flags(0, &env
->fpu
->fp_status
);
736 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
738 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
744 set_float_exception_flags(0, &env
->fpu
->fp_status
);
745 FST2
= int32_to_float32(WT0
, &env
->fpu
->fp_status
);
746 FSTH2
= int32_to_float32(WTH0
, &env
->fpu
->fp_status
);
751 set_float_exception_flags(0, &env
->fpu
->fp_status
);
752 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
753 WTH2
= float32_to_int32(FSTH0
, &env
->fpu
->fp_status
);
755 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
760 set_float_exception_flags(0, &env
->fpu
->fp_status
);
761 FST2
= float64_to_float32(FDT0
, &env
->fpu
->fp_status
);
766 set_float_exception_flags(0, &env
->fpu
->fp_status
);
767 FST2
= int32_to_float32(WT0
, &env
->fpu
->fp_status
);
772 set_float_exception_flags(0, &env
->fpu
->fp_status
);
773 FST2
= int64_to_float32(DT0
, &env
->fpu
->fp_status
);
778 set_float_exception_flags(0, &env
->fpu
->fp_status
);
784 set_float_exception_flags(0, &env
->fpu
->fp_status
);
790 set_float_exception_flags(0, &env
->fpu
->fp_status
);
791 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
793 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
798 set_float_exception_flags(0, &env
->fpu
->fp_status
);
799 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
801 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
807 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
808 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
809 RESTORE_ROUNDING_MODE
;
811 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
816 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
817 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
818 RESTORE_ROUNDING_MODE
;
820 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
825 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
826 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
827 RESTORE_ROUNDING_MODE
;
829 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
834 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
835 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
836 RESTORE_ROUNDING_MODE
;
838 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
844 DT2
= float64_to_int64_round_to_zero(FDT0
, &env
->fpu
->fp_status
);
846 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
851 DT2
= float32_to_int64_round_to_zero(FST0
, &env
->fpu
->fp_status
);
853 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
858 WT2
= float64_to_int32_round_to_zero(FDT0
, &env
->fpu
->fp_status
);
860 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
865 WT2
= float32_to_int32_round_to_zero(FST0
, &env
->fpu
->fp_status
);
867 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
873 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
874 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
875 RESTORE_ROUNDING_MODE
;
877 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
882 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
883 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
884 RESTORE_ROUNDING_MODE
;
886 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
891 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
892 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
893 RESTORE_ROUNDING_MODE
;
895 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
900 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
901 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
902 RESTORE_ROUNDING_MODE
;
904 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
910 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
911 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
912 RESTORE_ROUNDING_MODE
;
914 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
919 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
920 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
921 RESTORE_ROUNDING_MODE
;
923 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
928 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
929 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
930 RESTORE_ROUNDING_MODE
;
932 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
937 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
938 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
939 RESTORE_ROUNDING_MODE
;
941 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
945 /* MIPS specific unary operations */
948 set_float_exception_flags(0, &env
->fpu
->fp_status
);
949 FDT2
= float64_div(FLOAT_ONE64
, FDT0
, &env
->fpu
->fp_status
);
954 set_float_exception_flags(0, &env
->fpu
->fp_status
);
955 FST2
= float32_div(FLOAT_ONE32
, FST0
, &env
->fpu
->fp_status
);
961 set_float_exception_flags(0, &env
->fpu
->fp_status
);
962 FDT2
= float64_sqrt(FDT0
, &env
->fpu
->fp_status
);
963 FDT2
= float64_div(FLOAT_ONE64
, FDT2
, &env
->fpu
->fp_status
);
968 set_float_exception_flags(0, &env
->fpu
->fp_status
);
969 FST2
= float32_sqrt(FST0
, &env
->fpu
->fp_status
);
970 FST2
= float32_div(FLOAT_ONE32
, FST2
, &env
->fpu
->fp_status
);
976 set_float_exception_flags(0, &env
->fpu
->fp_status
);
977 FDT2
= float64_div(FLOAT_ONE64
, FDT0
, &env
->fpu
->fp_status
);
982 set_float_exception_flags(0, &env
->fpu
->fp_status
);
983 FST2
= float32_div(FLOAT_ONE32
, FST0
, &env
->fpu
->fp_status
);
988 set_float_exception_flags(0, &env
->fpu
->fp_status
);
989 FST2
= float32_div(FLOAT_ONE32
, FST0
, &env
->fpu
->fp_status
);
990 FSTH2
= float32_div(FLOAT_ONE32
, FSTH0
, &env
->fpu
->fp_status
);
996 set_float_exception_flags(0, &env
->fpu
->fp_status
);
997 FDT2
= float64_sqrt(FDT0
, &env
->fpu
->fp_status
);
998 FDT2
= float64_div(FLOAT_ONE64
, FDT2
, &env
->fpu
->fp_status
);
1003 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1004 FST2
= float32_sqrt(FST0
, &env
->fpu
->fp_status
);
1005 FST2
= float32_div(FLOAT_ONE32
, FST2
, &env
->fpu
->fp_status
);
1008 FLOAT_OP(rsqrt1
, ps
)
1010 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1011 FST2
= float32_sqrt(FST0
, &env
->fpu
->fp_status
);
1012 FSTH2
= float32_sqrt(FSTH0
, &env
->fpu
->fp_status
);
1013 FST2
= float32_div(FLOAT_ONE32
, FST2
, &env
->fpu
->fp_status
);
1014 FSTH2
= float32_div(FLOAT_ONE32
, FSTH2
, &env
->fpu
->fp_status
);
1018 /* binary operations */
1019 #define FLOAT_BINOP(name) \
1022 set_float_exception_flags(0, &env->fpu->fp_status); \
1023 FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \
1025 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1026 FDT2 = FLOAT_QNAN64; \
1030 set_float_exception_flags(0, &env->fpu->fp_status); \
1031 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1033 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1034 FST2 = FLOAT_QNAN32; \
1036 FLOAT_OP(name, ps) \
1038 set_float_exception_flags(0, &env->fpu->fp_status); \
1039 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1040 FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
1042 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
1043 FST2 = FLOAT_QNAN32; \
1044 FSTH2 = FLOAT_QNAN32; \
1053 /* MIPS specific binary operations */
1056 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1057 FDT2
= float64_mul(FDT0
, FDT2
, &env
->fpu
->fp_status
);
1058 FDT2
= float64_sub(FDT2
, FLOAT_ONE64
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN64
;
1063 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1064 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1065 FST2
= float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1068 FLOAT_OP(recip2
, ps
)
1070 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1071 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1072 FSTH2
= float32_mul(FSTH0
, FSTH2
, &env
->fpu
->fp_status
);
1073 FST2
= float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1074 FSTH2
= float32_sub(FSTH2
, FLOAT_ONE32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1080 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1081 FDT2
= float64_mul(FDT0
, FDT2
, &env
->fpu
->fp_status
);
1082 FDT2
= float64_sub(FDT2
, FLOAT_ONE64
, &env
->fpu
->fp_status
);
1083 FDT2
= float64_div(FDT2
, FLOAT_TWO64
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN64
;
1088 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1089 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1090 FST2
= float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
);
1091 FST2
= float32_div(FST2
, FLOAT_TWO32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1094 FLOAT_OP(rsqrt2
, ps
)
1096 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1097 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1098 FSTH2
= float32_mul(FSTH0
, FSTH2
, &env
->fpu
->fp_status
);
1099 FST2
= float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
);
1100 FSTH2
= float32_sub(FSTH2
, FLOAT_ONE32
, &env
->fpu
->fp_status
);
1101 FST2
= float32_div(FST2
, FLOAT_TWO32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1102 FSTH2
= float32_div(FSTH2
, FLOAT_TWO32
, &env
->fpu
->fp_status
) ^ FLOAT_SIGN32
;
1108 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1109 FST2
= float32_add (FST0
, FSTH0
, &env
->fpu
->fp_status
);
1110 FSTH2
= float32_add (FST1
, FSTH1
, &env
->fpu
->fp_status
);
1116 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1117 FST2
= float32_mul (FST0
, FSTH0
, &env
->fpu
->fp_status
);
1118 FSTH2
= float32_mul (FST1
, FSTH1
, &env
->fpu
->fp_status
);
1122 /* compare operations */
1123 #define FOP_COND_D(op, cond) \
1124 void do_cmp_d_ ## op (long cc) \
1129 SET_FP_COND(cc, env->fpu); \
1131 CLEAR_FP_COND(cc, env->fpu); \
1133 void do_cmpabs_d_ ## op (long cc) \
1136 FDT0 &= ~FLOAT_SIGN64; \
1137 FDT1 &= ~FLOAT_SIGN64; \
1141 SET_FP_COND(cc, env->fpu); \
1143 CLEAR_FP_COND(cc, env->fpu); \
1146 int float64_is_unordered(int sig
, float64 a
, float64 b STATUS_PARAM
)
1148 if (float64_is_signaling_nan(a
) ||
1149 float64_is_signaling_nan(b
) ||
1150 (sig
&& (float64_is_nan(a
) || float64_is_nan(b
)))) {
1151 float_raise(float_flag_invalid
, status
);
1153 } else if (float64_is_nan(a
) || float64_is_nan(b
)) {
1160 /* NOTE: the comma operator will make "cond" to eval to false,
1161 * but float*_is_unordered() is still called. */
1162 FOP_COND_D(f
, (float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
), 0))
1163 FOP_COND_D(un
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
))
1164 FOP_COND_D(eq
, !float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1165 FOP_COND_D(ueq
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1166 FOP_COND_D(olt
, !float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1167 FOP_COND_D(ult
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1168 FOP_COND_D(ole
, !float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1169 FOP_COND_D(ule
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1170 /* NOTE: the comma operator will make "cond" to eval to false,
1171 * but float*_is_unordered() is still called. */
1172 FOP_COND_D(sf
, (float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
), 0))
1173 FOP_COND_D(ngle
,float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
))
1174 FOP_COND_D(seq
, !float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1175 FOP_COND_D(ngl
, float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1176 FOP_COND_D(lt
, !float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1177 FOP_COND_D(nge
, float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1178 FOP_COND_D(le
, !float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1179 FOP_COND_D(ngt
, float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1181 #define FOP_COND_S(op, cond) \
1182 void do_cmp_s_ ## op (long cc) \
1187 SET_FP_COND(cc, env->fpu); \
1189 CLEAR_FP_COND(cc, env->fpu); \
1191 void do_cmpabs_s_ ## op (long cc) \
1194 FST0 &= ~FLOAT_SIGN32; \
1195 FST1 &= ~FLOAT_SIGN32; \
1199 SET_FP_COND(cc, env->fpu); \
1201 CLEAR_FP_COND(cc, env->fpu); \
1204 flag
float32_is_unordered(int sig
, float32 a
, float32 b STATUS_PARAM
)
1206 if (float32_is_signaling_nan(a
) ||
1207 float32_is_signaling_nan(b
) ||
1208 (sig
&& (float32_is_nan(a
) || float32_is_nan(b
)))) {
1209 float_raise(float_flag_invalid
, status
);
1211 } else if (float32_is_nan(a
) || float32_is_nan(b
)) {
1218 /* NOTE: the comma operator will make "cond" to eval to false,
1219 * but float*_is_unordered() is still called. */
1220 FOP_COND_S(f
, (float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
), 0))
1221 FOP_COND_S(un
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
))
1222 FOP_COND_S(eq
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1223 FOP_COND_S(ueq
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1224 FOP_COND_S(olt
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1225 FOP_COND_S(ult
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1226 FOP_COND_S(ole
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1227 FOP_COND_S(ule
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1228 /* NOTE: the comma operator will make "cond" to eval to false,
1229 * but float*_is_unordered() is still called. */
1230 FOP_COND_S(sf
, (float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
), 0))
1231 FOP_COND_S(ngle
,float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
))
1232 FOP_COND_S(seq
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1233 FOP_COND_S(ngl
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1234 FOP_COND_S(lt
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1235 FOP_COND_S(nge
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1236 FOP_COND_S(le
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1237 FOP_COND_S(ngt
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1239 #define FOP_COND_PS(op, condl, condh) \
1240 void do_cmp_ps_ ## op (long cc) \
1246 SET_FP_COND(cc, env->fpu); \
1248 CLEAR_FP_COND(cc, env->fpu); \
1250 SET_FP_COND(cc + 1, env->fpu); \
1252 CLEAR_FP_COND(cc + 1, env->fpu); \
1254 void do_cmpabs_ps_ ## op (long cc) \
1257 FST0 &= ~FLOAT_SIGN32; \
1258 FSTH0 &= ~FLOAT_SIGN32; \
1259 FST1 &= ~FLOAT_SIGN32; \
1260 FSTH1 &= ~FLOAT_SIGN32; \
1265 SET_FP_COND(cc, env->fpu); \
1267 CLEAR_FP_COND(cc, env->fpu); \
1269 SET_FP_COND(cc + 1, env->fpu); \
1271 CLEAR_FP_COND(cc + 1, env->fpu); \
1274 /* NOTE: the comma operator will make "cond" to eval to false,
1275 * but float*_is_unordered() is still called. */
1276 FOP_COND_PS(f
, (float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
), 0),
1277 (float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
), 0))
1278 FOP_COND_PS(un
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
),
1279 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
))
1280 FOP_COND_PS(eq
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1281 !float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1282 FOP_COND_PS(ueq
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1283 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1284 FOP_COND_PS(olt
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1285 !float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1286 FOP_COND_PS(ult
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1287 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1288 FOP_COND_PS(ole
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1289 !float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1290 FOP_COND_PS(ule
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1291 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1292 /* NOTE: the comma operator will make "cond" to eval to false,
1293 * but float*_is_unordered() is still called. */
1294 FOP_COND_PS(sf
, (float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
), 0),
1295 (float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
), 0))
1296 FOP_COND_PS(ngle
,float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
),
1297 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
))
1298 FOP_COND_PS(seq
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1299 !float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1300 FOP_COND_PS(ngl
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1301 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1302 FOP_COND_PS(lt
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1303 !float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1304 FOP_COND_PS(nge
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1305 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1306 FOP_COND_PS(le
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1307 !float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1308 FOP_COND_PS(ngt
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1309 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))