2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "dyngen-exec.h"
22 #include "host-utils.h"
23 #include "softfloat.h"
25 #include "qemu-timer.h"
27 #define FP_STATUS (env->fp_status)
29 /*****************************************************************************/
30 /* Exceptions processing helpers */
32 /* This should only be called from translate, via gen_excp.
33 We expect that ENV->PC has already been updated. */
34 void QEMU_NORETURN
helper_excp(int excp
, int error
)
36 env
->exception_index
= excp
;
37 env
->error_code
= error
;
41 static void do_restore_state(void *retaddr
)
43 unsigned long pc
= (unsigned long)retaddr
;
46 TranslationBlock
*tb
= tb_find_pc(pc
);
48 cpu_restore_state(tb
, env
, pc
);
53 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
54 static void QEMU_NORETURN
dynamic_excp(int excp
, int error
)
56 env
->exception_index
= excp
;
57 env
->error_code
= error
;
58 do_restore_state(GETPC());
62 static void QEMU_NORETURN
arith_excp(int exc
, uint64_t mask
)
65 env
->trap_arg1
= mask
;
66 dynamic_excp(EXCP_ARITH
, 0);
69 uint64_t helper_load_pcc (void)
71 #ifndef CONFIG_USER_ONLY
72 /* In system mode we have access to a decent high-resolution clock.
73 In order to make OS-level time accounting work with the RPCC,
74 present it with a well-timed clock fixed at 250MHz. */
75 return (((uint64_t)env
->pcc_ofs
<< 32)
76 | (uint32_t)(qemu_get_clock_ns(vm_clock
) >> 2));
78 /* In user-mode, vm_clock doesn't exist. Just pass through the host cpu
79 clock ticks. Also, don't bother taking PCC_OFS into account. */
80 return (uint32_t)cpu_get_real_ticks();
84 uint64_t helper_load_fpcr (void)
86 return cpu_alpha_load_fpcr (env
);
89 void helper_store_fpcr (uint64_t val
)
91 cpu_alpha_store_fpcr (env
, val
);
94 uint64_t helper_addqv (uint64_t op1
, uint64_t op2
)
98 if (unlikely((tmp
^ op2
^ (-1ULL)) & (tmp
^ op1
) & (1ULL << 63))) {
99 arith_excp(EXC_M_IOV
, 0);
104 uint64_t helper_addlv (uint64_t op1
, uint64_t op2
)
107 op1
= (uint32_t)(op1
+ op2
);
108 if (unlikely((tmp
^ op2
^ (-1UL)) & (tmp
^ op1
) & (1UL << 31))) {
109 arith_excp(EXC_M_IOV
, 0);
114 uint64_t helper_subqv (uint64_t op1
, uint64_t op2
)
118 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1ULL << 63))) {
119 arith_excp(EXC_M_IOV
, 0);
124 uint64_t helper_sublv (uint64_t op1
, uint64_t op2
)
128 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1UL << 31))) {
129 arith_excp(EXC_M_IOV
, 0);
134 uint64_t helper_mullv (uint64_t op1
, uint64_t op2
)
136 int64_t res
= (int64_t)op1
* (int64_t)op2
;
138 if (unlikely((int32_t)res
!= res
)) {
139 arith_excp(EXC_M_IOV
, 0);
141 return (int64_t)((int32_t)res
);
144 uint64_t helper_mulqv (uint64_t op1
, uint64_t op2
)
148 muls64(&tl
, &th
, op1
, op2
);
149 /* If th != 0 && th != -1, then we had an overflow */
150 if (unlikely((th
+ 1) > 1)) {
151 arith_excp(EXC_M_IOV
, 0);
156 uint64_t helper_umulh (uint64_t op1
, uint64_t op2
)
160 mulu64(&tl
, &th
, op1
, op2
);
164 uint64_t helper_ctpop (uint64_t arg
)
169 uint64_t helper_ctlz (uint64_t arg
)
174 uint64_t helper_cttz (uint64_t arg
)
179 static inline uint64_t byte_zap(uint64_t op
, uint8_t mskb
)
184 mask
|= ((mskb
>> 0) & 1) * 0x00000000000000FFULL
;
185 mask
|= ((mskb
>> 1) & 1) * 0x000000000000FF00ULL
;
186 mask
|= ((mskb
>> 2) & 1) * 0x0000000000FF0000ULL
;
187 mask
|= ((mskb
>> 3) & 1) * 0x00000000FF000000ULL
;
188 mask
|= ((mskb
>> 4) & 1) * 0x000000FF00000000ULL
;
189 mask
|= ((mskb
>> 5) & 1) * 0x0000FF0000000000ULL
;
190 mask
|= ((mskb
>> 6) & 1) * 0x00FF000000000000ULL
;
191 mask
|= ((mskb
>> 7) & 1) * 0xFF00000000000000ULL
;
196 uint64_t helper_zap(uint64_t val
, uint64_t mask
)
198 return byte_zap(val
, mask
);
201 uint64_t helper_zapnot(uint64_t val
, uint64_t mask
)
203 return byte_zap(val
, ~mask
);
206 uint64_t helper_cmpbge (uint64_t op1
, uint64_t op2
)
208 uint8_t opa
, opb
, res
;
212 for (i
= 0; i
< 8; i
++) {
213 opa
= op1
>> (i
* 8);
214 opb
= op2
>> (i
* 8);
221 uint64_t helper_minub8 (uint64_t op1
, uint64_t op2
)
224 uint8_t opa
, opb
, opr
;
227 for (i
= 0; i
< 8; ++i
) {
228 opa
= op1
>> (i
* 8);
229 opb
= op2
>> (i
* 8);
230 opr
= opa
< opb
? opa
: opb
;
231 res
|= (uint64_t)opr
<< (i
* 8);
236 uint64_t helper_minsb8 (uint64_t op1
, uint64_t op2
)
243 for (i
= 0; i
< 8; ++i
) {
244 opa
= op1
>> (i
* 8);
245 opb
= op2
>> (i
* 8);
246 opr
= opa
< opb
? opa
: opb
;
247 res
|= (uint64_t)opr
<< (i
* 8);
252 uint64_t helper_minuw4 (uint64_t op1
, uint64_t op2
)
255 uint16_t opa
, opb
, opr
;
258 for (i
= 0; i
< 4; ++i
) {
259 opa
= op1
>> (i
* 16);
260 opb
= op2
>> (i
* 16);
261 opr
= opa
< opb
? opa
: opb
;
262 res
|= (uint64_t)opr
<< (i
* 16);
267 uint64_t helper_minsw4 (uint64_t op1
, uint64_t op2
)
274 for (i
= 0; i
< 4; ++i
) {
275 opa
= op1
>> (i
* 16);
276 opb
= op2
>> (i
* 16);
277 opr
= opa
< opb
? opa
: opb
;
278 res
|= (uint64_t)opr
<< (i
* 16);
283 uint64_t helper_maxub8 (uint64_t op1
, uint64_t op2
)
286 uint8_t opa
, opb
, opr
;
289 for (i
= 0; i
< 8; ++i
) {
290 opa
= op1
>> (i
* 8);
291 opb
= op2
>> (i
* 8);
292 opr
= opa
> opb
? opa
: opb
;
293 res
|= (uint64_t)opr
<< (i
* 8);
298 uint64_t helper_maxsb8 (uint64_t op1
, uint64_t op2
)
305 for (i
= 0; i
< 8; ++i
) {
306 opa
= op1
>> (i
* 8);
307 opb
= op2
>> (i
* 8);
308 opr
= opa
> opb
? opa
: opb
;
309 res
|= (uint64_t)opr
<< (i
* 8);
314 uint64_t helper_maxuw4 (uint64_t op1
, uint64_t op2
)
317 uint16_t opa
, opb
, opr
;
320 for (i
= 0; i
< 4; ++i
) {
321 opa
= op1
>> (i
* 16);
322 opb
= op2
>> (i
* 16);
323 opr
= opa
> opb
? opa
: opb
;
324 res
|= (uint64_t)opr
<< (i
* 16);
329 uint64_t helper_maxsw4 (uint64_t op1
, uint64_t op2
)
336 for (i
= 0; i
< 4; ++i
) {
337 opa
= op1
>> (i
* 16);
338 opb
= op2
>> (i
* 16);
339 opr
= opa
> opb
? opa
: opb
;
340 res
|= (uint64_t)opr
<< (i
* 16);
345 uint64_t helper_perr (uint64_t op1
, uint64_t op2
)
348 uint8_t opa
, opb
, opr
;
351 for (i
= 0; i
< 8; ++i
) {
352 opa
= op1
>> (i
* 8);
353 opb
= op2
>> (i
* 8);
363 uint64_t helper_pklb (uint64_t op1
)
365 return (op1
& 0xff) | ((op1
>> 24) & 0xff00);
368 uint64_t helper_pkwb (uint64_t op1
)
371 | ((op1
>> 8) & 0xff00)
372 | ((op1
>> 16) & 0xff0000)
373 | ((op1
>> 24) & 0xff000000));
376 uint64_t helper_unpkbl (uint64_t op1
)
378 return (op1
& 0xff) | ((op1
& 0xff00) << 24);
381 uint64_t helper_unpkbw (uint64_t op1
)
384 | ((op1
& 0xff00) << 8)
385 | ((op1
& 0xff0000) << 16)
386 | ((op1
& 0xff000000) << 24));
389 /* Floating point helpers */
391 void helper_setroundmode (uint32_t val
)
393 set_float_rounding_mode(val
, &FP_STATUS
);
396 void helper_setflushzero (uint32_t val
)
398 set_flush_to_zero(val
, &FP_STATUS
);
401 void helper_fp_exc_clear (void)
403 set_float_exception_flags(0, &FP_STATUS
);
406 uint32_t helper_fp_exc_get (void)
408 return get_float_exception_flags(&FP_STATUS
);
411 /* Raise exceptions for ieee fp insns without software completion.
412 In that case there are no exceptions that don't trap; the mask
414 void helper_fp_exc_raise(uint32_t exc
, uint32_t regno
)
419 if (exc
& float_flag_invalid
) {
422 if (exc
& float_flag_divbyzero
) {
425 if (exc
& float_flag_overflow
) {
428 if (exc
& float_flag_underflow
) {
431 if (exc
& float_flag_inexact
) {
435 arith_excp(hw_exc
, 1ull << regno
);
439 /* Raise exceptions for ieee fp insns with software completion. */
440 void helper_fp_exc_raise_s(uint32_t exc
, uint32_t regno
)
443 env
->fpcr_exc_status
|= exc
;
445 exc
&= ~env
->fpcr_exc_mask
;
447 helper_fp_exc_raise(exc
, regno
);
452 /* Input remapping without software completion. Handle denormal-map-to-zero
453 and trap for all other non-finite numbers. */
454 uint64_t helper_ieee_input(uint64_t val
)
456 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
457 uint64_t frac
= val
& 0xfffffffffffffull
;
461 /* If DNZ is set flush denormals to zero on input. */
465 arith_excp(EXC_M_UNF
, 0);
468 } else if (exp
== 0x7ff) {
469 /* Infinity or NaN. */
470 /* ??? I'm not sure these exception bit flags are correct. I do
471 know that the Linux kernel, at least, doesn't rely on them and
472 just emulates the insn to figure out what exception to use. */
473 arith_excp(frac
? EXC_M_INV
: EXC_M_FOV
, 0);
478 /* Similar, but does not trap for infinities. Used for comparisons. */
479 uint64_t helper_ieee_input_cmp(uint64_t val
)
481 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
482 uint64_t frac
= val
& 0xfffffffffffffull
;
486 /* If DNZ is set flush denormals to zero on input. */
490 arith_excp(EXC_M_UNF
, 0);
493 } else if (exp
== 0x7ff && frac
) {
495 arith_excp(EXC_M_INV
, 0);
500 /* Input remapping with software completion enabled. All we have to do
501 is handle denormal-map-to-zero; all other inputs get exceptions as
502 needed from the actual operation. */
503 uint64_t helper_ieee_input_s(uint64_t val
)
506 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
514 /* F floating (VAX) */
515 static inline uint64_t float32_to_f(float32 fa
)
517 uint64_t r
, exp
, mant
, sig
;
521 sig
= ((uint64_t)a
.l
& 0x80000000) << 32;
522 exp
= (a
.l
>> 23) & 0xff;
523 mant
= ((uint64_t)a
.l
& 0x007fffff) << 29;
526 /* NaN or infinity */
527 r
= 1; /* VAX dirty zero */
528 } else if (exp
== 0) {
534 r
= sig
| ((exp
+ 1) << 52) | mant
;
539 r
= 1; /* VAX dirty zero */
541 r
= sig
| ((exp
+ 2) << 52);
548 static inline float32
f_to_float32(uint64_t a
)
550 uint32_t exp
, mant_sig
;
553 exp
= ((a
>> 55) & 0x80) | ((a
>> 52) & 0x7f);
554 mant_sig
= ((a
>> 32) & 0x80000000) | ((a
>> 29) & 0x007fffff);
556 if (unlikely(!exp
&& mant_sig
)) {
557 /* Reserved operands / Dirty zero */
558 dynamic_excp(EXCP_OPCDEC
, 0);
565 r
.l
= ((exp
- 2) << 23) | mant_sig
;
571 uint32_t helper_f_to_memory (uint64_t a
)
574 r
= (a
& 0x00001fffe0000000ull
) >> 13;
575 r
|= (a
& 0x07ffe00000000000ull
) >> 45;
576 r
|= (a
& 0xc000000000000000ull
) >> 48;
580 uint64_t helper_memory_to_f (uint32_t a
)
583 r
= ((uint64_t)(a
& 0x0000c000)) << 48;
584 r
|= ((uint64_t)(a
& 0x003fffff)) << 45;
585 r
|= ((uint64_t)(a
& 0xffff0000)) << 13;
586 if (!(a
& 0x00004000))
591 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
592 either implement VAX arithmetic properly or just signal invalid opcode. */
594 uint64_t helper_addf (uint64_t a
, uint64_t b
)
598 fa
= f_to_float32(a
);
599 fb
= f_to_float32(b
);
600 fr
= float32_add(fa
, fb
, &FP_STATUS
);
601 return float32_to_f(fr
);
604 uint64_t helper_subf (uint64_t a
, uint64_t b
)
608 fa
= f_to_float32(a
);
609 fb
= f_to_float32(b
);
610 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
611 return float32_to_f(fr
);
614 uint64_t helper_mulf (uint64_t a
, uint64_t b
)
618 fa
= f_to_float32(a
);
619 fb
= f_to_float32(b
);
620 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
621 return float32_to_f(fr
);
624 uint64_t helper_divf (uint64_t a
, uint64_t b
)
628 fa
= f_to_float32(a
);
629 fb
= f_to_float32(b
);
630 fr
= float32_div(fa
, fb
, &FP_STATUS
);
631 return float32_to_f(fr
);
634 uint64_t helper_sqrtf (uint64_t t
)
638 ft
= f_to_float32(t
);
639 fr
= float32_sqrt(ft
, &FP_STATUS
);
640 return float32_to_f(fr
);
644 /* G floating (VAX) */
645 static inline uint64_t float64_to_g(float64 fa
)
647 uint64_t r
, exp
, mant
, sig
;
651 sig
= a
.ll
& 0x8000000000000000ull
;
652 exp
= (a
.ll
>> 52) & 0x7ff;
653 mant
= a
.ll
& 0x000fffffffffffffull
;
656 /* NaN or infinity */
657 r
= 1; /* VAX dirty zero */
658 } else if (exp
== 0) {
664 r
= sig
| ((exp
+ 1) << 52) | mant
;
669 r
= 1; /* VAX dirty zero */
671 r
= sig
| ((exp
+ 2) << 52);
678 static inline float64
g_to_float64(uint64_t a
)
680 uint64_t exp
, mant_sig
;
683 exp
= (a
>> 52) & 0x7ff;
684 mant_sig
= a
& 0x800fffffffffffffull
;
686 if (!exp
&& mant_sig
) {
687 /* Reserved operands / Dirty zero */
688 dynamic_excp(EXCP_OPCDEC
, 0);
695 r
.ll
= ((exp
- 2) << 52) | mant_sig
;
701 uint64_t helper_g_to_memory (uint64_t a
)
704 r
= (a
& 0x000000000000ffffull
) << 48;
705 r
|= (a
& 0x00000000ffff0000ull
) << 16;
706 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
707 r
|= (a
& 0xffff000000000000ull
) >> 48;
711 uint64_t helper_memory_to_g (uint64_t a
)
714 r
= (a
& 0x000000000000ffffull
) << 48;
715 r
|= (a
& 0x00000000ffff0000ull
) << 16;
716 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
717 r
|= (a
& 0xffff000000000000ull
) >> 48;
721 uint64_t helper_addg (uint64_t a
, uint64_t b
)
725 fa
= g_to_float64(a
);
726 fb
= g_to_float64(b
);
727 fr
= float64_add(fa
, fb
, &FP_STATUS
);
728 return float64_to_g(fr
);
731 uint64_t helper_subg (uint64_t a
, uint64_t b
)
735 fa
= g_to_float64(a
);
736 fb
= g_to_float64(b
);
737 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
738 return float64_to_g(fr
);
741 uint64_t helper_mulg (uint64_t a
, uint64_t b
)
745 fa
= g_to_float64(a
);
746 fb
= g_to_float64(b
);
747 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
748 return float64_to_g(fr
);
751 uint64_t helper_divg (uint64_t a
, uint64_t b
)
755 fa
= g_to_float64(a
);
756 fb
= g_to_float64(b
);
757 fr
= float64_div(fa
, fb
, &FP_STATUS
);
758 return float64_to_g(fr
);
761 uint64_t helper_sqrtg (uint64_t a
)
765 fa
= g_to_float64(a
);
766 fr
= float64_sqrt(fa
, &FP_STATUS
);
767 return float64_to_g(fr
);
771 /* S floating (single) */
773 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
774 static inline uint64_t float32_to_s_int(uint32_t fi
)
776 uint32_t frac
= fi
& 0x7fffff;
777 uint32_t sign
= fi
>> 31;
778 uint32_t exp_msb
= (fi
>> 30) & 1;
779 uint32_t exp_low
= (fi
>> 23) & 0x7f;
782 exp
= (exp_msb
<< 10) | exp_low
;
791 return (((uint64_t)sign
<< 63)
792 | ((uint64_t)exp
<< 52)
793 | ((uint64_t)frac
<< 29));
796 static inline uint64_t float32_to_s(float32 fa
)
800 return float32_to_s_int(a
.l
);
803 static inline uint32_t s_to_float32_int(uint64_t a
)
805 return ((a
>> 32) & 0xc0000000) | ((a
>> 29) & 0x3fffffff);
808 static inline float32
s_to_float32(uint64_t a
)
811 r
.l
= s_to_float32_int(a
);
815 uint32_t helper_s_to_memory (uint64_t a
)
817 return s_to_float32_int(a
);
820 uint64_t helper_memory_to_s (uint32_t a
)
822 return float32_to_s_int(a
);
825 uint64_t helper_adds (uint64_t a
, uint64_t b
)
829 fa
= s_to_float32(a
);
830 fb
= s_to_float32(b
);
831 fr
= float32_add(fa
, fb
, &FP_STATUS
);
832 return float32_to_s(fr
);
835 uint64_t helper_subs (uint64_t a
, uint64_t b
)
839 fa
= s_to_float32(a
);
840 fb
= s_to_float32(b
);
841 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
842 return float32_to_s(fr
);
845 uint64_t helper_muls (uint64_t a
, uint64_t b
)
849 fa
= s_to_float32(a
);
850 fb
= s_to_float32(b
);
851 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
852 return float32_to_s(fr
);
855 uint64_t helper_divs (uint64_t a
, uint64_t b
)
859 fa
= s_to_float32(a
);
860 fb
= s_to_float32(b
);
861 fr
= float32_div(fa
, fb
, &FP_STATUS
);
862 return float32_to_s(fr
);
865 uint64_t helper_sqrts (uint64_t a
)
869 fa
= s_to_float32(a
);
870 fr
= float32_sqrt(fa
, &FP_STATUS
);
871 return float32_to_s(fr
);
875 /* T floating (double) */
876 static inline float64
t_to_float64(uint64_t a
)
878 /* Memory format is the same as float64 */
884 static inline uint64_t float64_to_t(float64 fa
)
886 /* Memory format is the same as float64 */
892 uint64_t helper_addt (uint64_t a
, uint64_t b
)
896 fa
= t_to_float64(a
);
897 fb
= t_to_float64(b
);
898 fr
= float64_add(fa
, fb
, &FP_STATUS
);
899 return float64_to_t(fr
);
902 uint64_t helper_subt (uint64_t a
, uint64_t b
)
906 fa
= t_to_float64(a
);
907 fb
= t_to_float64(b
);
908 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
909 return float64_to_t(fr
);
912 uint64_t helper_mult (uint64_t a
, uint64_t b
)
916 fa
= t_to_float64(a
);
917 fb
= t_to_float64(b
);
918 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
919 return float64_to_t(fr
);
922 uint64_t helper_divt (uint64_t a
, uint64_t b
)
926 fa
= t_to_float64(a
);
927 fb
= t_to_float64(b
);
928 fr
= float64_div(fa
, fb
, &FP_STATUS
);
929 return float64_to_t(fr
);
932 uint64_t helper_sqrtt (uint64_t a
)
936 fa
= t_to_float64(a
);
937 fr
= float64_sqrt(fa
, &FP_STATUS
);
938 return float64_to_t(fr
);
942 uint64_t helper_cmptun (uint64_t a
, uint64_t b
)
946 fa
= t_to_float64(a
);
947 fb
= t_to_float64(b
);
949 if (float64_unordered_quiet(fa
, fb
, &FP_STATUS
)) {
950 return 0x4000000000000000ULL
;
956 uint64_t helper_cmpteq(uint64_t a
, uint64_t b
)
960 fa
= t_to_float64(a
);
961 fb
= t_to_float64(b
);
963 if (float64_eq_quiet(fa
, fb
, &FP_STATUS
))
964 return 0x4000000000000000ULL
;
969 uint64_t helper_cmptle(uint64_t a
, uint64_t b
)
973 fa
= t_to_float64(a
);
974 fb
= t_to_float64(b
);
976 if (float64_le(fa
, fb
, &FP_STATUS
))
977 return 0x4000000000000000ULL
;
982 uint64_t helper_cmptlt(uint64_t a
, uint64_t b
)
986 fa
= t_to_float64(a
);
987 fb
= t_to_float64(b
);
989 if (float64_lt(fa
, fb
, &FP_STATUS
))
990 return 0x4000000000000000ULL
;
995 uint64_t helper_cmpgeq(uint64_t a
, uint64_t b
)
999 fa
= g_to_float64(a
);
1000 fb
= g_to_float64(b
);
1002 if (float64_eq_quiet(fa
, fb
, &FP_STATUS
))
1003 return 0x4000000000000000ULL
;
1008 uint64_t helper_cmpgle(uint64_t a
, uint64_t b
)
1012 fa
= g_to_float64(a
);
1013 fb
= g_to_float64(b
);
1015 if (float64_le(fa
, fb
, &FP_STATUS
))
1016 return 0x4000000000000000ULL
;
1021 uint64_t helper_cmpglt(uint64_t a
, uint64_t b
)
1025 fa
= g_to_float64(a
);
1026 fb
= g_to_float64(b
);
1028 if (float64_lt(fa
, fb
, &FP_STATUS
))
1029 return 0x4000000000000000ULL
;
1034 /* Floating point format conversion */
1035 uint64_t helper_cvtts (uint64_t a
)
1040 fa
= t_to_float64(a
);
1041 fr
= float64_to_float32(fa
, &FP_STATUS
);
1042 return float32_to_s(fr
);
1045 uint64_t helper_cvtst (uint64_t a
)
1050 fa
= s_to_float32(a
);
1051 fr
= float32_to_float64(fa
, &FP_STATUS
);
1052 return float64_to_t(fr
);
1055 uint64_t helper_cvtqs (uint64_t a
)
1057 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
1058 return float32_to_s(fr
);
1061 /* Implement float64 to uint64 conversion without saturation -- we must
1062 supply the truncated result. This behaviour is used by the compiler
1063 to get unsigned conversion for free with the same instruction.
1065 The VI flag is set when overflow or inexact exceptions should be raised. */
1067 static inline uint64_t helper_cvttq_internal(uint64_t a
, int roundmode
, int VI
)
1069 uint64_t frac
, ret
= 0;
1070 uint32_t exp
, sign
, exc
= 0;
1074 exp
= (uint32_t)(a
>> 52) & 0x7ff;
1075 frac
= a
& 0xfffffffffffffull
;
1078 if (unlikely(frac
!= 0)) {
1081 } else if (exp
== 0x7ff) {
1082 exc
= (frac
? float_flag_invalid
: VI
? float_flag_overflow
: 0);
1084 /* Restore implicit bit. */
1085 frac
|= 0x10000000000000ull
;
1087 shift
= exp
- 1023 - 52;
1089 /* In this case the number is so large that we must shift
1090 the fraction left. There is no rounding to do. */
1092 ret
= frac
<< shift
;
1093 if (VI
&& (ret
>> shift
) != frac
) {
1094 exc
= float_flag_overflow
;
1100 /* In this case the number is smaller than the fraction as
1101 represented by the 52 bit number. Here we must think
1102 about rounding the result. Handle this by shifting the
1103 fractional part of the number into the high bits of ROUND.
1104 This will let us efficiently handle round-to-nearest. */
1107 ret
= frac
>> shift
;
1108 round
= frac
<< (64 - shift
);
1110 /* The exponent is so small we shift out everything.
1111 Leave a sticky bit for proper rounding below. */
1117 exc
= (VI
? float_flag_inexact
: 0);
1118 switch (roundmode
) {
1119 case float_round_nearest_even
:
1120 if (round
== (1ull << 63)) {
1121 /* Fraction is exactly 0.5; round to even. */
1123 } else if (round
> (1ull << 63)) {
1127 case float_round_to_zero
:
1129 case float_round_up
:
1132 case float_round_down
:
1142 if (unlikely(exc
)) {
1143 float_raise(exc
, &FP_STATUS
);
1149 uint64_t helper_cvttq(uint64_t a
)
1151 return helper_cvttq_internal(a
, FP_STATUS
.float_rounding_mode
, 1);
1154 uint64_t helper_cvttq_c(uint64_t a
)
1156 return helper_cvttq_internal(a
, float_round_to_zero
, 0);
1159 uint64_t helper_cvttq_svic(uint64_t a
)
1161 return helper_cvttq_internal(a
, float_round_to_zero
, 1);
1164 uint64_t helper_cvtqt (uint64_t a
)
1166 float64 fr
= int64_to_float64(a
, &FP_STATUS
);
1167 return float64_to_t(fr
);
1170 uint64_t helper_cvtqf (uint64_t a
)
1172 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
1173 return float32_to_f(fr
);
1176 uint64_t helper_cvtgf (uint64_t a
)
1181 fa
= g_to_float64(a
);
1182 fr
= float64_to_float32(fa
, &FP_STATUS
);
1183 return float32_to_f(fr
);
1186 uint64_t helper_cvtgq (uint64_t a
)
1188 float64 fa
= g_to_float64(a
);
1189 return float64_to_int64_round_to_zero(fa
, &FP_STATUS
);
1192 uint64_t helper_cvtqg (uint64_t a
)
1195 fr
= int64_to_float64(a
, &FP_STATUS
);
1196 return float64_to_g(fr
);
1199 /* PALcode support special instructions */
1200 #if !defined (CONFIG_USER_ONLY)
1201 void helper_hw_ret (uint64_t a
)
1205 env
->lock_addr
= -1;
1208 swap_shadow_regs(env
);
1212 void helper_tbia(void)
1217 void helper_tbis(uint64_t p
)
1219 tlb_flush_page(env
, p
);
1223 /*****************************************************************************/
1224 /* Softmmu support */
1225 #if !defined (CONFIG_USER_ONLY)
1226 uint64_t helper_ldl_phys(uint64_t p
)
1228 return (int32_t)ldl_phys(p
);
1231 uint64_t helper_ldq_phys(uint64_t p
)
1236 uint64_t helper_ldl_l_phys(uint64_t p
)
1239 return env
->lock_value
= (int32_t)ldl_phys(p
);
1242 uint64_t helper_ldq_l_phys(uint64_t p
)
1245 return env
->lock_value
= ldl_phys(p
);
1248 void helper_stl_phys(uint64_t p
, uint64_t v
)
1253 void helper_stq_phys(uint64_t p
, uint64_t v
)
1258 uint64_t helper_stl_c_phys(uint64_t p
, uint64_t v
)
1262 if (p
== env
->lock_addr
) {
1263 int32_t old
= ldl_phys(p
);
1264 if (old
== (int32_t)env
->lock_value
) {
1269 env
->lock_addr
= -1;
1274 uint64_t helper_stq_c_phys(uint64_t p
, uint64_t v
)
1278 if (p
== env
->lock_addr
) {
1279 uint64_t old
= ldq_phys(p
);
1280 if (old
== env
->lock_value
) {
1285 env
->lock_addr
= -1;
1290 static void QEMU_NORETURN
do_unaligned_access(target_ulong addr
, int is_write
,
1291 int is_user
, void *retaddr
)
1296 do_restore_state(retaddr
);
1299 insn
= ldl_code(pc
);
1301 env
->trap_arg0
= addr
;
1302 env
->trap_arg1
= insn
>> 26; /* opcode */
1303 env
->trap_arg2
= (insn
>> 21) & 31; /* dest regno */
1304 helper_excp(EXCP_UNALIGN
, 0);
1307 void QEMU_NORETURN
cpu_unassigned_access(CPUState
*env1
,
1308 target_phys_addr_t addr
, int is_write
,
1309 int is_exec
, int unused
, int size
)
1312 env
->trap_arg0
= addr
;
1313 env
->trap_arg1
= is_write
;
1314 dynamic_excp(EXCP_MCHK
, 0);
1317 #include "softmmu_exec.h"
1319 #define MMUSUFFIX _mmu
1320 #define ALIGNED_ONLY
1323 #include "softmmu_template.h"
1326 #include "softmmu_template.h"
1329 #include "softmmu_template.h"
1332 #include "softmmu_template.h"
1334 /* try to fill the TLB and return an exception if error. If retaddr is
1335 NULL, it means that the function was called in C code (i.e. not
1336 from generated code or from helper.c) */
1337 /* XXX: fix it to restore all registers */
1338 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
1340 CPUState
*saved_env
;
1343 /* XXX: hack to restore env in all cases, even if not called from
1346 env
= cpu_single_env
;
1347 ret
= cpu_alpha_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
1348 if (unlikely(ret
!= 0)) {
1349 do_restore_state(retaddr
);
1350 /* Exception index and error code are already set */