2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include "host-utils.h"
26 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
28 # define GETPC() (__builtin_return_address(0))
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
34 void do_raise_exception_err (uint32_t exception
, int error_code
)
37 if (logfile
&& exception
< 0x100)
38 fprintf(logfile
, "%s: %d %d\n", __func__
, exception
, error_code
);
40 env
->exception_index
= exception
;
41 env
->error_code
= error_code
;
46 void do_raise_exception (uint32_t exception
)
48 do_raise_exception_err(exception
, 0);
51 void do_restore_state (void *pc_ptr
)
54 unsigned long pc
= (unsigned long) pc_ptr
;
57 cpu_restore_state (tb
, env
, pc
, NULL
);
60 void do_raise_exception_direct_err (uint32_t exception
, int error_code
)
62 do_restore_state (GETPC ());
63 do_raise_exception_err (exception
, error_code
);
66 void do_raise_exception_direct (uint32_t exception
)
68 do_raise_exception_direct_err (exception
, 0);
71 #if defined(TARGET_MIPS64)
72 #if TARGET_LONG_BITS > HOST_LONG_BITS
73 /* Those might call libgcc functions. */
86 T0
= (int64_t)T0
>> T1
;
91 T0
= (int64_t)T0
>> (T1
+ 32);
101 T0
= T0
>> (T1
+ 32);
109 tmp
= T0
<< (0x40 - T1
);
110 T0
= (T0
>> T1
) | tmp
;
114 void do_drotr32 (void)
118 tmp
= T0
<< (0x40 - (32 + T1
));
119 T0
= (T0
>> (32 + T1
)) | tmp
;
124 T0
= T1
<< (T0
& 0x3F);
129 T0
= (int64_t)T1
>> (T0
& 0x3F);
134 T0
= T1
>> (T0
& 0x3F);
137 void do_drotrv (void)
143 tmp
= T1
<< (0x40 - T0
);
144 T0
= (T1
>> T0
) | tmp
;
159 #endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
160 #endif /* TARGET_MIPS64 */
162 /* 64 bits arithmetic for 32 bits hosts */
163 #if TARGET_LONG_BITS > HOST_LONG_BITS
164 static always_inline
uint64_t get_HILO (void)
166 return (env
->HI
[0][env
->current_tc
] << 32) | (uint32_t)env
->LO
[0][env
->current_tc
];
169 static always_inline
void set_HILO (uint64_t HILO
)
171 env
->LO
[0][env
->current_tc
] = (int32_t)HILO
;
172 env
->HI
[0][env
->current_tc
] = (int32_t)(HILO
>> 32);
177 set_HILO((int64_t)(int32_t)T0
* (int64_t)(int32_t)T1
);
182 set_HILO((uint64_t)(uint32_t)T0
* (uint64_t)(uint32_t)T1
);
189 tmp
= ((int64_t)(int32_t)T0
* (int64_t)(int32_t)T1
);
190 set_HILO((int64_t)get_HILO() + tmp
);
197 tmp
= ((uint64_t)(uint32_t)T0
* (uint64_t)(uint32_t)T1
);
198 set_HILO(get_HILO() + tmp
);
205 tmp
= ((int64_t)(int32_t)T0
* (int64_t)(int32_t)T1
);
206 set_HILO((int64_t)get_HILO() - tmp
);
213 tmp
= ((uint64_t)(uint32_t)T0
* (uint64_t)(uint32_t)T1
);
214 set_HILO(get_HILO() - tmp
);
218 #if HOST_LONG_BITS < 64
221 /* 64bit datatypes because we may see overflow/underflow. */
223 env
->LO
[0][env
->current_tc
] = (int32_t)((int64_t)(int32_t)T0
/ (int32_t)T1
);
224 env
->HI
[0][env
->current_tc
] = (int32_t)((int64_t)(int32_t)T0
% (int32_t)T1
);
229 #if defined(TARGET_MIPS64)
233 lldiv_t res
= lldiv((int64_t)T0
, (int64_t)T1
);
234 env
->LO
[0][env
->current_tc
] = res
.quot
;
235 env
->HI
[0][env
->current_tc
] = res
.rem
;
239 #if TARGET_LONG_BITS > HOST_LONG_BITS
243 env
->LO
[0][env
->current_tc
] = T0
/ T1
;
244 env
->HI
[0][env
->current_tc
] = T0
% T1
;
248 #endif /* TARGET_MIPS64 */
250 #if defined(CONFIG_USER_ONLY)
251 void do_mfc0_random (void)
253 cpu_abort(env
, "mfc0 random\n");
256 void do_mfc0_count (void)
258 cpu_abort(env
, "mfc0 count\n");
261 void cpu_mips_store_count(CPUState
*env
, uint32_t value
)
263 cpu_abort(env
, "mtc0 count\n");
266 void cpu_mips_store_compare(CPUState
*env
, uint32_t value
)
268 cpu_abort(env
, "mtc0 compare\n");
271 void cpu_mips_start_count(CPUState
*env
)
273 cpu_abort(env
, "start count\n");
276 void cpu_mips_stop_count(CPUState
*env
)
278 cpu_abort(env
, "stop count\n");
281 void cpu_mips_update_irq(CPUState
*env
)
283 cpu_abort(env
, "mtc0 status / mtc0 cause\n");
286 void do_mtc0_status_debug(uint32_t old
, uint32_t val
)
288 cpu_abort(env
, "mtc0 status debug\n");
291 void do_mtc0_status_irqraise_debug (void)
293 cpu_abort(env
, "mtc0 status irqraise debug\n");
296 void cpu_mips_tlb_flush (CPUState
*env
, int flush_global
)
298 cpu_abort(env
, "mips_tlb_flush\n");
304 void do_mfc0_random (void)
306 T0
= (int32_t)cpu_mips_get_random(env
);
309 void do_mfc0_count (void)
311 T0
= (int32_t)cpu_mips_get_count(env
);
314 void do_mtc0_status_debug(uint32_t old
, uint32_t val
)
316 fprintf(logfile
, "Status %08x (%08x) => %08x (%08x) Cause %08x",
317 old
, old
& env
->CP0_Cause
& CP0Ca_IP_mask
,
318 val
, val
& env
->CP0_Cause
& CP0Ca_IP_mask
,
320 switch (env
->hflags
& MIPS_HFLAG_KSU
) {
321 case MIPS_HFLAG_UM
: fputs(", UM\n", logfile
); break;
322 case MIPS_HFLAG_SM
: fputs(", SM\n", logfile
); break;
323 case MIPS_HFLAG_KM
: fputs("\n", logfile
); break;
324 default: cpu_abort(env
, "Invalid MMU mode!\n"); break;
328 void do_mtc0_status_irqraise_debug(void)
330 fprintf(logfile
, "Raise pending IRQs\n");
333 void fpu_handle_exception(void)
335 #ifdef CONFIG_SOFTFLOAT
336 int flags
= get_float_exception_flags(&env
->fpu
->fp_status
);
337 unsigned int cpuflags
= 0, enable
, cause
= 0;
339 enable
= GET_FP_ENABLE(env
->fpu
->fcr31
);
341 /* determine current flags */
342 if (flags
& float_flag_invalid
) {
343 cpuflags
|= FP_INVALID
;
344 cause
|= FP_INVALID
& enable
;
346 if (flags
& float_flag_divbyzero
) {
348 cause
|= FP_DIV0
& enable
;
350 if (flags
& float_flag_overflow
) {
351 cpuflags
|= FP_OVERFLOW
;
352 cause
|= FP_OVERFLOW
& enable
;
354 if (flags
& float_flag_underflow
) {
355 cpuflags
|= FP_UNDERFLOW
;
356 cause
|= FP_UNDERFLOW
& enable
;
358 if (flags
& float_flag_inexact
) {
359 cpuflags
|= FP_INEXACT
;
360 cause
|= FP_INEXACT
& enable
;
362 SET_FP_FLAGS(env
->fpu
->fcr31
, cpuflags
);
363 SET_FP_CAUSE(env
->fpu
->fcr31
, cause
);
365 SET_FP_FLAGS(env
->fpu
->fcr31
, 0);
366 SET_FP_CAUSE(env
->fpu
->fcr31
, 0);
371 void cpu_mips_tlb_flush (CPUState
*env
, int flush_global
)
373 /* Flush qemu's TLB and discard all shadowed entries. */
374 tlb_flush (env
, flush_global
);
375 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
378 static void r4k_mips_tlb_flush_extra (CPUState
*env
, int first
)
380 /* Discard entries from env->tlb[first] onwards. */
381 while (env
->tlb
->tlb_in_use
> first
) {
382 r4k_invalidate_tlb(env
, --env
->tlb
->tlb_in_use
, 0);
386 static void r4k_fill_tlb (int idx
)
390 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
391 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
392 tlb
->VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
393 #if defined(TARGET_MIPS64)
394 tlb
->VPN
&= env
->SEGMask
;
396 tlb
->ASID
= env
->CP0_EntryHi
& 0xFF;
397 tlb
->PageMask
= env
->CP0_PageMask
;
398 tlb
->G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
399 tlb
->V0
= (env
->CP0_EntryLo0
& 2) != 0;
400 tlb
->D0
= (env
->CP0_EntryLo0
& 4) != 0;
401 tlb
->C0
= (env
->CP0_EntryLo0
>> 3) & 0x7;
402 tlb
->PFN
[0] = (env
->CP0_EntryLo0
>> 6) << 12;
403 tlb
->V1
= (env
->CP0_EntryLo1
& 2) != 0;
404 tlb
->D1
= (env
->CP0_EntryLo1
& 4) != 0;
405 tlb
->C1
= (env
->CP0_EntryLo1
>> 3) & 0x7;
406 tlb
->PFN
[1] = (env
->CP0_EntryLo1
>> 6) << 12;
409 void r4k_do_tlbwi (void)
411 /* Discard cached TLB entries. We could avoid doing this if the
412 tlbwi is just upgrading access permissions on the current entry;
413 that might be a further win. */
414 r4k_mips_tlb_flush_extra (env
, env
->tlb
->nb_tlb
);
416 r4k_invalidate_tlb(env
, env
->CP0_Index
% env
->tlb
->nb_tlb
, 0);
417 r4k_fill_tlb(env
->CP0_Index
% env
->tlb
->nb_tlb
);
420 void r4k_do_tlbwr (void)
422 int r
= cpu_mips_get_random(env
);
424 r4k_invalidate_tlb(env
, r
, 1);
428 void r4k_do_tlbp (void)
437 ASID
= env
->CP0_EntryHi
& 0xFF;
438 for (i
= 0; i
< env
->tlb
->nb_tlb
; i
++) {
439 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
440 /* 1k pages are not supported. */
441 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
442 tag
= env
->CP0_EntryHi
& ~mask
;
443 VPN
= tlb
->VPN
& ~mask
;
444 /* Check ASID, virtual page number & size */
445 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
451 if (i
== env
->tlb
->nb_tlb
) {
452 /* No match. Discard any shadow entries, if any of them match. */
453 for (i
= env
->tlb
->nb_tlb
; i
< env
->tlb
->tlb_in_use
; i
++) {
454 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
455 /* 1k pages are not supported. */
456 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
457 tag
= env
->CP0_EntryHi
& ~mask
;
458 VPN
= tlb
->VPN
& ~mask
;
459 /* Check ASID, virtual page number & size */
460 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
) {
461 r4k_mips_tlb_flush_extra (env
, i
);
466 env
->CP0_Index
|= 0x80000000;
470 void r4k_do_tlbr (void)
475 ASID
= env
->CP0_EntryHi
& 0xFF;
476 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[env
->CP0_Index
% env
->tlb
->nb_tlb
];
478 /* If this will change the current ASID, flush qemu's TLB. */
479 if (ASID
!= tlb
->ASID
)
480 cpu_mips_tlb_flush (env
, 1);
482 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
484 env
->CP0_EntryHi
= tlb
->VPN
| tlb
->ASID
;
485 env
->CP0_PageMask
= tlb
->PageMask
;
486 env
->CP0_EntryLo0
= tlb
->G
| (tlb
->V0
<< 1) | (tlb
->D0
<< 2) |
487 (tlb
->C0
<< 3) | (tlb
->PFN
[0] >> 6);
488 env
->CP0_EntryLo1
= tlb
->G
| (tlb
->V1
<< 1) | (tlb
->D1
<< 2) |
489 (tlb
->C1
<< 3) | (tlb
->PFN
[1] >> 6);
492 #endif /* !CONFIG_USER_ONLY */
494 void dump_ldst (const unsigned char *func
)
497 fprintf(logfile
, "%s => " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
, T0
, T1
);
503 fprintf(logfile
, "%s " TARGET_FMT_lx
" at " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", __func__
,
504 T1
, T0
, env
->CP0_LLAddr
);
508 void debug_pre_eret (void)
510 fprintf(logfile
, "ERET: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
511 env
->PC
[env
->current_tc
], env
->CP0_EPC
);
512 if (env
->CP0_Status
& (1 << CP0St_ERL
))
513 fprintf(logfile
, " ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
514 if (env
->hflags
& MIPS_HFLAG_DM
)
515 fprintf(logfile
, " DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
516 fputs("\n", logfile
);
519 void debug_post_eret (void)
521 fprintf(logfile
, " => PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
,
522 env
->PC
[env
->current_tc
], env
->CP0_EPC
);
523 if (env
->CP0_Status
& (1 << CP0St_ERL
))
524 fprintf(logfile
, " ErrorEPC " TARGET_FMT_lx
, env
->CP0_ErrorEPC
);
525 if (env
->hflags
& MIPS_HFLAG_DM
)
526 fprintf(logfile
, " DEPC " TARGET_FMT_lx
, env
->CP0_DEPC
);
527 switch (env
->hflags
& MIPS_HFLAG_KSU
) {
528 case MIPS_HFLAG_UM
: fputs(", UM\n", logfile
); break;
529 case MIPS_HFLAG_SM
: fputs(", SM\n", logfile
); break;
530 case MIPS_HFLAG_KM
: fputs("\n", logfile
); break;
531 default: cpu_abort(env
, "Invalid MMU mode!\n"); break;
535 void do_pmon (int function
)
539 case 2: /* TODO: char inbyte(int waitflag); */
540 if (env
->gpr
[4][env
->current_tc
] == 0)
541 env
->gpr
[2][env
->current_tc
] = -1;
543 case 11: /* TODO: char inbyte (void); */
544 env
->gpr
[2][env
->current_tc
] = -1;
548 printf("%c", (char)(env
->gpr
[4][env
->current_tc
] & 0xFF));
554 unsigned char *fmt
= (void *)(unsigned long)env
->gpr
[4][env
->current_tc
];
561 #if !defined(CONFIG_USER_ONLY)
563 static void do_unaligned_access (target_ulong addr
, int is_write
, int is_user
, void *retaddr
);
565 #define MMUSUFFIX _mmu
569 #include "softmmu_template.h"
572 #include "softmmu_template.h"
575 #include "softmmu_template.h"
578 #include "softmmu_template.h"
580 static void do_unaligned_access (target_ulong addr
, int is_write
, int is_user
, void *retaddr
)
582 env
->CP0_BadVAddr
= addr
;
583 do_restore_state (retaddr
);
584 do_raise_exception ((is_write
== 1) ? EXCP_AdES
: EXCP_AdEL
);
587 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
589 TranslationBlock
*tb
;
594 /* XXX: hack to restore env in all cases, even if not called from
597 env
= cpu_single_env
;
598 ret
= cpu_mips_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
601 /* now we have a real cpu fault */
602 pc
= (unsigned long)retaddr
;
605 /* the PC is inside the translated code. It means that we have
606 a virtual CPU fault */
607 cpu_restore_state(tb
, env
, pc
, NULL
);
610 do_raise_exception_err(env
->exception_index
, env
->error_code
);
615 void do_unassigned_access(target_phys_addr_t addr
, int is_write
, int is_exec
,
619 do_raise_exception(EXCP_IBE
);
621 do_raise_exception(EXCP_DBE
);
625 /* Complex FPU operations which may need stack space. */
627 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
628 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
629 #define FLOAT_TWO32 make_float32(1 << 30)
630 #define FLOAT_TWO64 make_float64(1ULL << 62)
631 #define FLOAT_QNAN32 0x7fbfffff
632 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
633 #define FLOAT_SNAN32 0x7fffffff
634 #define FLOAT_SNAN64 0x7fffffffffffffffULL
636 /* convert MIPS rounding mode in FCR31 to IEEE library */
637 unsigned int ieee_rm
[] = {
638 float_round_nearest_even
,
644 #define RESTORE_ROUNDING_MODE \
645 set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
647 void do_cfc1 (int reg
)
651 T0
= (int32_t)env
->fpu
->fcr0
;
654 T0
= ((env
->fpu
->fcr31
>> 24) & 0xfe) | ((env
->fpu
->fcr31
>> 23) & 0x1);
657 T0
= env
->fpu
->fcr31
& 0x0003f07c;
660 T0
= (env
->fpu
->fcr31
& 0x00000f83) | ((env
->fpu
->fcr31
>> 22) & 0x4);
663 T0
= (int32_t)env
->fpu
->fcr31
;
668 void do_ctc1 (int reg
)
674 env
->fpu
->fcr31
= (env
->fpu
->fcr31
& 0x017fffff) | ((T0
& 0xfe) << 24) |
680 env
->fpu
->fcr31
= (env
->fpu
->fcr31
& 0xfffc0f83) | (T0
& 0x0003f07c);
685 env
->fpu
->fcr31
= (env
->fpu
->fcr31
& 0xfefff07c) | (T0
& 0x00000f83) |
691 env
->fpu
->fcr31
= T0
;
696 /* set rounding mode */
697 RESTORE_ROUNDING_MODE
;
698 set_float_exception_flags(0, &env
->fpu
->fp_status
);
699 if ((GET_FP_ENABLE(env
->fpu
->fcr31
) | 0x20) & GET_FP_CAUSE(env
->fpu
->fcr31
))
700 do_raise_exception(EXCP_FPE
);
703 static always_inline
char ieee_ex_to_mips(char xcpt
)
705 return (xcpt
& float_flag_inexact
) >> 5 |
706 (xcpt
& float_flag_underflow
) >> 3 |
707 (xcpt
& float_flag_overflow
) >> 1 |
708 (xcpt
& float_flag_divbyzero
) << 1 |
709 (xcpt
& float_flag_invalid
) << 4;
712 static always_inline
char mips_ex_to_ieee(char xcpt
)
714 return (xcpt
& FP_INEXACT
) << 5 |
715 (xcpt
& FP_UNDERFLOW
) << 3 |
716 (xcpt
& FP_OVERFLOW
) << 1 |
717 (xcpt
& FP_DIV0
) >> 1 |
718 (xcpt
& FP_INVALID
) >> 4;
721 static always_inline
void update_fcr31(void)
723 int tmp
= ieee_ex_to_mips(get_float_exception_flags(&env
->fpu
->fp_status
));
725 SET_FP_CAUSE(env
->fpu
->fcr31
, tmp
);
726 if (GET_FP_ENABLE(env
->fpu
->fcr31
) & tmp
)
727 do_raise_exception(EXCP_FPE
);
729 UPDATE_FP_FLAGS(env
->fpu
->fcr31
, tmp
);
732 #define FLOAT_OP(name, p) void do_float_##name##_##p(void)
736 set_float_exception_flags(0, &env
->fpu
->fp_status
);
737 FDT2
= float32_to_float64(FST0
, &env
->fpu
->fp_status
);
742 set_float_exception_flags(0, &env
->fpu
->fp_status
);
743 FDT2
= int32_to_float64(WT0
, &env
->fpu
->fp_status
);
748 set_float_exception_flags(0, &env
->fpu
->fp_status
);
749 FDT2
= int64_to_float64(DT0
, &env
->fpu
->fp_status
);
754 set_float_exception_flags(0, &env
->fpu
->fp_status
);
755 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
757 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
762 set_float_exception_flags(0, &env
->fpu
->fp_status
);
763 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
765 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
771 set_float_exception_flags(0, &env
->fpu
->fp_status
);
772 FST2
= int32_to_float32(WT0
, &env
->fpu
->fp_status
);
773 FSTH2
= int32_to_float32(WTH0
, &env
->fpu
->fp_status
);
778 set_float_exception_flags(0, &env
->fpu
->fp_status
);
779 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
780 WTH2
= float32_to_int32(FSTH0
, &env
->fpu
->fp_status
);
782 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
787 set_float_exception_flags(0, &env
->fpu
->fp_status
);
788 FST2
= float64_to_float32(FDT0
, &env
->fpu
->fp_status
);
793 set_float_exception_flags(0, &env
->fpu
->fp_status
);
794 FST2
= int32_to_float32(WT0
, &env
->fpu
->fp_status
);
799 set_float_exception_flags(0, &env
->fpu
->fp_status
);
800 FST2
= int64_to_float32(DT0
, &env
->fpu
->fp_status
);
805 set_float_exception_flags(0, &env
->fpu
->fp_status
);
811 set_float_exception_flags(0, &env
->fpu
->fp_status
);
817 set_float_exception_flags(0, &env
->fpu
->fp_status
);
818 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
820 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
825 set_float_exception_flags(0, &env
->fpu
->fp_status
);
826 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
828 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
834 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
835 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
836 RESTORE_ROUNDING_MODE
;
838 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
843 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
844 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
845 RESTORE_ROUNDING_MODE
;
847 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
852 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
853 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
854 RESTORE_ROUNDING_MODE
;
856 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
861 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu
->fp_status
);
862 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
863 RESTORE_ROUNDING_MODE
;
865 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
871 DT2
= float64_to_int64_round_to_zero(FDT0
, &env
->fpu
->fp_status
);
873 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
878 DT2
= float32_to_int64_round_to_zero(FST0
, &env
->fpu
->fp_status
);
880 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
885 WT2
= float64_to_int32_round_to_zero(FDT0
, &env
->fpu
->fp_status
);
887 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
892 WT2
= float32_to_int32_round_to_zero(FST0
, &env
->fpu
->fp_status
);
894 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
900 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
901 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
902 RESTORE_ROUNDING_MODE
;
904 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
909 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
910 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
911 RESTORE_ROUNDING_MODE
;
913 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
918 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
919 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
920 RESTORE_ROUNDING_MODE
;
922 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
927 set_float_rounding_mode(float_round_up
, &env
->fpu
->fp_status
);
928 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
929 RESTORE_ROUNDING_MODE
;
931 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
937 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
938 DT2
= float64_to_int64(FDT0
, &env
->fpu
->fp_status
);
939 RESTORE_ROUNDING_MODE
;
941 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
946 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
947 DT2
= float32_to_int64(FST0
, &env
->fpu
->fp_status
);
948 RESTORE_ROUNDING_MODE
;
950 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
955 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
956 WT2
= float64_to_int32(FDT0
, &env
->fpu
->fp_status
);
957 RESTORE_ROUNDING_MODE
;
959 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
964 set_float_rounding_mode(float_round_down
, &env
->fpu
->fp_status
);
965 WT2
= float32_to_int32(FST0
, &env
->fpu
->fp_status
);
966 RESTORE_ROUNDING_MODE
;
968 if (GET_FP_CAUSE(env
->fpu
->fcr31
) & (FP_OVERFLOW
| FP_INVALID
))
972 /* MIPS specific unary operations */
975 set_float_exception_flags(0, &env
->fpu
->fp_status
);
976 FDT2
= float64_div(FLOAT_ONE64
, FDT0
, &env
->fpu
->fp_status
);
981 set_float_exception_flags(0, &env
->fpu
->fp_status
);
982 FST2
= float32_div(FLOAT_ONE32
, FST0
, &env
->fpu
->fp_status
);
988 set_float_exception_flags(0, &env
->fpu
->fp_status
);
989 FDT2
= float64_sqrt(FDT0
, &env
->fpu
->fp_status
);
990 FDT2
= float64_div(FLOAT_ONE64
, FDT2
, &env
->fpu
->fp_status
);
995 set_float_exception_flags(0, &env
->fpu
->fp_status
);
996 FST2
= float32_sqrt(FST0
, &env
->fpu
->fp_status
);
997 FST2
= float32_div(FLOAT_ONE32
, FST2
, &env
->fpu
->fp_status
);
1003 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1004 FDT2
= float64_div(FLOAT_ONE64
, FDT0
, &env
->fpu
->fp_status
);
1009 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1010 FST2
= float32_div(FLOAT_ONE32
, FST0
, &env
->fpu
->fp_status
);
1013 FLOAT_OP(recip1
, ps
)
1015 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1016 FST2
= float32_div(FLOAT_ONE32
, FST0
, &env
->fpu
->fp_status
);
1017 FSTH2
= float32_div(FLOAT_ONE32
, FSTH0
, &env
->fpu
->fp_status
);
1023 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1024 FDT2
= float64_sqrt(FDT0
, &env
->fpu
->fp_status
);
1025 FDT2
= float64_div(FLOAT_ONE64
, FDT2
, &env
->fpu
->fp_status
);
1030 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1031 FST2
= float32_sqrt(FST0
, &env
->fpu
->fp_status
);
1032 FST2
= float32_div(FLOAT_ONE32
, FST2
, &env
->fpu
->fp_status
);
1035 FLOAT_OP(rsqrt1
, ps
)
1037 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1038 FST2
= float32_sqrt(FST0
, &env
->fpu
->fp_status
);
1039 FSTH2
= float32_sqrt(FSTH0
, &env
->fpu
->fp_status
);
1040 FST2
= float32_div(FLOAT_ONE32
, FST2
, &env
->fpu
->fp_status
);
1041 FSTH2
= float32_div(FLOAT_ONE32
, FSTH2
, &env
->fpu
->fp_status
);
1045 /* binary operations */
1046 #define FLOAT_BINOP(name) \
1049 set_float_exception_flags(0, &env->fpu->fp_status); \
1050 FDT2 = float64_ ## name (FDT0, FDT1, &env->fpu->fp_status); \
1052 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1053 DT2 = FLOAT_QNAN64; \
1057 set_float_exception_flags(0, &env->fpu->fp_status); \
1058 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1060 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) \
1061 WT2 = FLOAT_QNAN32; \
1063 FLOAT_OP(name, ps) \
1065 set_float_exception_flags(0, &env->fpu->fp_status); \
1066 FST2 = float32_ ## name (FST0, FST1, &env->fpu->fp_status); \
1067 FSTH2 = float32_ ## name (FSTH0, FSTH1, &env->fpu->fp_status); \
1069 if (GET_FP_CAUSE(env->fpu->fcr31) & FP_INVALID) { \
1070 WT2 = FLOAT_QNAN32; \
1071 WTH2 = FLOAT_QNAN32; \
1080 /* MIPS specific binary operations */
1083 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1084 FDT2
= float64_mul(FDT0
, FDT2
, &env
->fpu
->fp_status
);
1085 FDT2
= float64_chs(float64_sub(FDT2
, FLOAT_ONE64
, &env
->fpu
->fp_status
));
1090 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1091 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1092 FST2
= float32_chs(float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
));
1095 FLOAT_OP(recip2
, ps
)
1097 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1098 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1099 FSTH2
= float32_mul(FSTH0
, FSTH2
, &env
->fpu
->fp_status
);
1100 FST2
= float32_chs(float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
));
1101 FSTH2
= float32_chs(float32_sub(FSTH2
, FLOAT_ONE32
, &env
->fpu
->fp_status
));
1107 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1108 FDT2
= float64_mul(FDT0
, FDT2
, &env
->fpu
->fp_status
);
1109 FDT2
= float64_sub(FDT2
, FLOAT_ONE64
, &env
->fpu
->fp_status
);
1110 FDT2
= float64_chs(float64_div(FDT2
, FLOAT_TWO64
, &env
->fpu
->fp_status
));
1115 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1116 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1117 FST2
= float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
);
1118 FST2
= float32_chs(float32_div(FST2
, FLOAT_TWO32
, &env
->fpu
->fp_status
));
1121 FLOAT_OP(rsqrt2
, ps
)
1123 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1124 FST2
= float32_mul(FST0
, FST2
, &env
->fpu
->fp_status
);
1125 FSTH2
= float32_mul(FSTH0
, FSTH2
, &env
->fpu
->fp_status
);
1126 FST2
= float32_sub(FST2
, FLOAT_ONE32
, &env
->fpu
->fp_status
);
1127 FSTH2
= float32_sub(FSTH2
, FLOAT_ONE32
, &env
->fpu
->fp_status
);
1128 FST2
= float32_chs(float32_div(FST2
, FLOAT_TWO32
, &env
->fpu
->fp_status
));
1129 FSTH2
= float32_chs(float32_div(FSTH2
, FLOAT_TWO32
, &env
->fpu
->fp_status
));
1135 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1136 FST2
= float32_add (FST0
, FSTH0
, &env
->fpu
->fp_status
);
1137 FSTH2
= float32_add (FST1
, FSTH1
, &env
->fpu
->fp_status
);
1143 set_float_exception_flags(0, &env
->fpu
->fp_status
);
1144 FST2
= float32_mul (FST0
, FSTH0
, &env
->fpu
->fp_status
);
1145 FSTH2
= float32_mul (FST1
, FSTH1
, &env
->fpu
->fp_status
);
1149 /* compare operations */
1150 #define FOP_COND_D(op, cond) \
1151 void do_cmp_d_ ## op (long cc) \
1156 SET_FP_COND(cc, env->fpu); \
1158 CLEAR_FP_COND(cc, env->fpu); \
1160 void do_cmpabs_d_ ## op (long cc) \
1163 FDT0 = float64_chs(FDT0); \
1164 FDT1 = float64_chs(FDT1); \
1168 SET_FP_COND(cc, env->fpu); \
1170 CLEAR_FP_COND(cc, env->fpu); \
1173 int float64_is_unordered(int sig
, float64 a
, float64 b STATUS_PARAM
)
1175 if (float64_is_signaling_nan(a
) ||
1176 float64_is_signaling_nan(b
) ||
1177 (sig
&& (float64_is_nan(a
) || float64_is_nan(b
)))) {
1178 float_raise(float_flag_invalid
, status
);
1180 } else if (float64_is_nan(a
) || float64_is_nan(b
)) {
1187 /* NOTE: the comma operator will make "cond" to eval to false,
1188 * but float*_is_unordered() is still called. */
1189 FOP_COND_D(f
, (float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
), 0))
1190 FOP_COND_D(un
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
))
1191 FOP_COND_D(eq
, !float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1192 FOP_COND_D(ueq
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1193 FOP_COND_D(olt
, !float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1194 FOP_COND_D(ult
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1195 FOP_COND_D(ole
, !float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1196 FOP_COND_D(ule
, float64_is_unordered(0, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1197 /* NOTE: the comma operator will make "cond" to eval to false,
1198 * but float*_is_unordered() is still called. */
1199 FOP_COND_D(sf
, (float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
), 0))
1200 FOP_COND_D(ngle
,float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
))
1201 FOP_COND_D(seq
, !float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1202 FOP_COND_D(ngl
, float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_eq(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1203 FOP_COND_D(lt
, !float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1204 FOP_COND_D(nge
, float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_lt(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1205 FOP_COND_D(le
, !float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) && float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1206 FOP_COND_D(ngt
, float64_is_unordered(1, FDT1
, FDT0
, &env
->fpu
->fp_status
) || float64_le(FDT0
, FDT1
, &env
->fpu
->fp_status
))
1208 #define FOP_COND_S(op, cond) \
1209 void do_cmp_s_ ## op (long cc) \
1214 SET_FP_COND(cc, env->fpu); \
1216 CLEAR_FP_COND(cc, env->fpu); \
1218 void do_cmpabs_s_ ## op (long cc) \
1221 FST0 = float32_abs(FST0); \
1222 FST1 = float32_abs(FST1); \
1226 SET_FP_COND(cc, env->fpu); \
1228 CLEAR_FP_COND(cc, env->fpu); \
1231 flag
float32_is_unordered(int sig
, float32 a
, float32 b STATUS_PARAM
)
1233 if (float32_is_signaling_nan(a
) ||
1234 float32_is_signaling_nan(b
) ||
1235 (sig
&& (float32_is_nan(a
) || float32_is_nan(b
)))) {
1236 float_raise(float_flag_invalid
, status
);
1238 } else if (float32_is_nan(a
) || float32_is_nan(b
)) {
1245 /* NOTE: the comma operator will make "cond" to eval to false,
1246 * but float*_is_unordered() is still called. */
1247 FOP_COND_S(f
, (float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
), 0))
1248 FOP_COND_S(un
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
))
1249 FOP_COND_S(eq
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1250 FOP_COND_S(ueq
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1251 FOP_COND_S(olt
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1252 FOP_COND_S(ult
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1253 FOP_COND_S(ole
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1254 FOP_COND_S(ule
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1255 /* NOTE: the comma operator will make "cond" to eval to false,
1256 * but float*_is_unordered() is still called. */
1257 FOP_COND_S(sf
, (float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
), 0))
1258 FOP_COND_S(ngle
,float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
))
1259 FOP_COND_S(seq
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1260 FOP_COND_S(ngl
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
))
1261 FOP_COND_S(lt
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1262 FOP_COND_S(nge
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
))
1263 FOP_COND_S(le
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1264 FOP_COND_S(ngt
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
))
1266 #define FOP_COND_PS(op, condl, condh) \
1267 void do_cmp_ps_ ## op (long cc) \
1273 SET_FP_COND(cc, env->fpu); \
1275 CLEAR_FP_COND(cc, env->fpu); \
1277 SET_FP_COND(cc + 1, env->fpu); \
1279 CLEAR_FP_COND(cc + 1, env->fpu); \
1281 void do_cmpabs_ps_ ## op (long cc) \
1284 FST0 = float32_abs(FST0); \
1285 FSTH0 = float32_abs(FSTH0); \
1286 FST1 = float32_abs(FST1); \
1287 FSTH1 = float32_abs(FSTH1); \
1292 SET_FP_COND(cc, env->fpu); \
1294 CLEAR_FP_COND(cc, env->fpu); \
1296 SET_FP_COND(cc + 1, env->fpu); \
1298 CLEAR_FP_COND(cc + 1, env->fpu); \
1301 /* NOTE: the comma operator will make "cond" to eval to false,
1302 * but float*_is_unordered() is still called. */
1303 FOP_COND_PS(f
, (float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
), 0),
1304 (float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
), 0))
1305 FOP_COND_PS(un
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
),
1306 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
))
1307 FOP_COND_PS(eq
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1308 !float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1309 FOP_COND_PS(ueq
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1310 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1311 FOP_COND_PS(olt
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1312 !float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1313 FOP_COND_PS(ult
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1314 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1315 FOP_COND_PS(ole
, !float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1316 !float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1317 FOP_COND_PS(ule
, float32_is_unordered(0, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1318 float32_is_unordered(0, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1319 /* NOTE: the comma operator will make "cond" to eval to false,
1320 * but float*_is_unordered() is still called. */
1321 FOP_COND_PS(sf
, (float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
), 0),
1322 (float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
), 0))
1323 FOP_COND_PS(ngle
,float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
),
1324 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
))
1325 FOP_COND_PS(seq
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1326 !float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1327 FOP_COND_PS(ngl
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_eq(FST0
, FST1
, &env
->fpu
->fp_status
),
1328 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_eq(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1329 FOP_COND_PS(lt
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1330 !float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1331 FOP_COND_PS(nge
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_lt(FST0
, FST1
, &env
->fpu
->fp_status
),
1332 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_lt(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1333 FOP_COND_PS(le
, !float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) && float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1334 !float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) && float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))
1335 FOP_COND_PS(ngt
, float32_is_unordered(1, FST1
, FST0
, &env
->fpu
->fp_status
) || float32_le(FST0
, FST1
, &env
->fpu
->fp_status
),
1336 float32_is_unordered(1, FSTH1
, FSTH0
, &env
->fpu
->fp_status
) || float32_le(FSTH0
, FSTH1
, &env
->fpu
->fp_status
))