2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "dyngen-exec.h"
22 #include "host-utils.h"
23 #include "softfloat.h"
26 #include "qemu-timer.h"
28 #define FP_STATUS (env->fp_status)
30 /*****************************************************************************/
31 /* Exceptions processing helpers */
33 /* This should only be called from translate, via gen_excp.
34 We expect that ENV->PC has already been updated. */
35 void QEMU_NORETURN
helper_excp(int excp
, int error
)
37 env
->exception_index
= excp
;
38 env
->error_code
= error
;
42 static void do_restore_state(void *retaddr
)
44 unsigned long pc
= (unsigned long)retaddr
;
47 TranslationBlock
*tb
= tb_find_pc(pc
);
49 cpu_restore_state(tb
, env
, pc
);
54 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
55 static void QEMU_NORETURN
dynamic_excp(int excp
, int error
)
57 env
->exception_index
= excp
;
58 env
->error_code
= error
;
59 do_restore_state(GETPC());
63 static void QEMU_NORETURN
arith_excp(int exc
, uint64_t mask
)
66 env
->trap_arg1
= mask
;
67 dynamic_excp(EXCP_ARITH
, 0);
70 uint64_t helper_load_pcc (void)
72 #ifndef CONFIG_USER_ONLY
73 /* In system mode we have access to a decent high-resolution clock.
74 In order to make OS-level time accounting work with the RPCC,
75 present it with a well-timed clock fixed at 250MHz. */
76 return (((uint64_t)env
->pcc_ofs
<< 32)
77 | (uint32_t)(qemu_get_clock_ns(vm_clock
) >> 2));
79 /* In user-mode, vm_clock doesn't exist. Just pass through the host cpu
80 clock ticks. Also, don't bother taking PCC_OFS into account. */
81 return (uint32_t)cpu_get_real_ticks();
85 uint64_t helper_load_fpcr (void)
87 return cpu_alpha_load_fpcr (env
);
90 void helper_store_fpcr (uint64_t val
)
92 cpu_alpha_store_fpcr (env
, val
);
95 uint64_t helper_addqv (uint64_t op1
, uint64_t op2
)
99 if (unlikely((tmp
^ op2
^ (-1ULL)) & (tmp
^ op1
) & (1ULL << 63))) {
100 arith_excp(EXC_M_IOV
, 0);
105 uint64_t helper_addlv (uint64_t op1
, uint64_t op2
)
108 op1
= (uint32_t)(op1
+ op2
);
109 if (unlikely((tmp
^ op2
^ (-1UL)) & (tmp
^ op1
) & (1UL << 31))) {
110 arith_excp(EXC_M_IOV
, 0);
115 uint64_t helper_subqv (uint64_t op1
, uint64_t op2
)
119 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1ULL << 63))) {
120 arith_excp(EXC_M_IOV
, 0);
125 uint64_t helper_sublv (uint64_t op1
, uint64_t op2
)
129 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1UL << 31))) {
130 arith_excp(EXC_M_IOV
, 0);
135 uint64_t helper_mullv (uint64_t op1
, uint64_t op2
)
137 int64_t res
= (int64_t)op1
* (int64_t)op2
;
139 if (unlikely((int32_t)res
!= res
)) {
140 arith_excp(EXC_M_IOV
, 0);
142 return (int64_t)((int32_t)res
);
145 uint64_t helper_mulqv (uint64_t op1
, uint64_t op2
)
149 muls64(&tl
, &th
, op1
, op2
);
150 /* If th != 0 && th != -1, then we had an overflow */
151 if (unlikely((th
+ 1) > 1)) {
152 arith_excp(EXC_M_IOV
, 0);
157 uint64_t helper_umulh (uint64_t op1
, uint64_t op2
)
161 mulu64(&tl
, &th
, op1
, op2
);
165 uint64_t helper_ctpop (uint64_t arg
)
170 uint64_t helper_ctlz (uint64_t arg
)
175 uint64_t helper_cttz (uint64_t arg
)
180 static inline uint64_t byte_zap(uint64_t op
, uint8_t mskb
)
185 mask
|= ((mskb
>> 0) & 1) * 0x00000000000000FFULL
;
186 mask
|= ((mskb
>> 1) & 1) * 0x000000000000FF00ULL
;
187 mask
|= ((mskb
>> 2) & 1) * 0x0000000000FF0000ULL
;
188 mask
|= ((mskb
>> 3) & 1) * 0x00000000FF000000ULL
;
189 mask
|= ((mskb
>> 4) & 1) * 0x000000FF00000000ULL
;
190 mask
|= ((mskb
>> 5) & 1) * 0x0000FF0000000000ULL
;
191 mask
|= ((mskb
>> 6) & 1) * 0x00FF000000000000ULL
;
192 mask
|= ((mskb
>> 7) & 1) * 0xFF00000000000000ULL
;
197 uint64_t helper_zap(uint64_t val
, uint64_t mask
)
199 return byte_zap(val
, mask
);
202 uint64_t helper_zapnot(uint64_t val
, uint64_t mask
)
204 return byte_zap(val
, ~mask
);
207 uint64_t helper_cmpbge (uint64_t op1
, uint64_t op2
)
209 uint8_t opa
, opb
, res
;
213 for (i
= 0; i
< 8; i
++) {
214 opa
= op1
>> (i
* 8);
215 opb
= op2
>> (i
* 8);
222 uint64_t helper_minub8 (uint64_t op1
, uint64_t op2
)
225 uint8_t opa
, opb
, opr
;
228 for (i
= 0; i
< 8; ++i
) {
229 opa
= op1
>> (i
* 8);
230 opb
= op2
>> (i
* 8);
231 opr
= opa
< opb
? opa
: opb
;
232 res
|= (uint64_t)opr
<< (i
* 8);
237 uint64_t helper_minsb8 (uint64_t op1
, uint64_t op2
)
244 for (i
= 0; i
< 8; ++i
) {
245 opa
= op1
>> (i
* 8);
246 opb
= op2
>> (i
* 8);
247 opr
= opa
< opb
? opa
: opb
;
248 res
|= (uint64_t)opr
<< (i
* 8);
253 uint64_t helper_minuw4 (uint64_t op1
, uint64_t op2
)
256 uint16_t opa
, opb
, opr
;
259 for (i
= 0; i
< 4; ++i
) {
260 opa
= op1
>> (i
* 16);
261 opb
= op2
>> (i
* 16);
262 opr
= opa
< opb
? opa
: opb
;
263 res
|= (uint64_t)opr
<< (i
* 16);
268 uint64_t helper_minsw4 (uint64_t op1
, uint64_t op2
)
275 for (i
= 0; i
< 4; ++i
) {
276 opa
= op1
>> (i
* 16);
277 opb
= op2
>> (i
* 16);
278 opr
= opa
< opb
? opa
: opb
;
279 res
|= (uint64_t)opr
<< (i
* 16);
284 uint64_t helper_maxub8 (uint64_t op1
, uint64_t op2
)
287 uint8_t opa
, opb
, opr
;
290 for (i
= 0; i
< 8; ++i
) {
291 opa
= op1
>> (i
* 8);
292 opb
= op2
>> (i
* 8);
293 opr
= opa
> opb
? opa
: opb
;
294 res
|= (uint64_t)opr
<< (i
* 8);
299 uint64_t helper_maxsb8 (uint64_t op1
, uint64_t op2
)
306 for (i
= 0; i
< 8; ++i
) {
307 opa
= op1
>> (i
* 8);
308 opb
= op2
>> (i
* 8);
309 opr
= opa
> opb
? opa
: opb
;
310 res
|= (uint64_t)opr
<< (i
* 8);
315 uint64_t helper_maxuw4 (uint64_t op1
, uint64_t op2
)
318 uint16_t opa
, opb
, opr
;
321 for (i
= 0; i
< 4; ++i
) {
322 opa
= op1
>> (i
* 16);
323 opb
= op2
>> (i
* 16);
324 opr
= opa
> opb
? opa
: opb
;
325 res
|= (uint64_t)opr
<< (i
* 16);
330 uint64_t helper_maxsw4 (uint64_t op1
, uint64_t op2
)
337 for (i
= 0; i
< 4; ++i
) {
338 opa
= op1
>> (i
* 16);
339 opb
= op2
>> (i
* 16);
340 opr
= opa
> opb
? opa
: opb
;
341 res
|= (uint64_t)opr
<< (i
* 16);
346 uint64_t helper_perr (uint64_t op1
, uint64_t op2
)
349 uint8_t opa
, opb
, opr
;
352 for (i
= 0; i
< 8; ++i
) {
353 opa
= op1
>> (i
* 8);
354 opb
= op2
>> (i
* 8);
364 uint64_t helper_pklb (uint64_t op1
)
366 return (op1
& 0xff) | ((op1
>> 24) & 0xff00);
369 uint64_t helper_pkwb (uint64_t op1
)
372 | ((op1
>> 8) & 0xff00)
373 | ((op1
>> 16) & 0xff0000)
374 | ((op1
>> 24) & 0xff000000));
377 uint64_t helper_unpkbl (uint64_t op1
)
379 return (op1
& 0xff) | ((op1
& 0xff00) << 24);
382 uint64_t helper_unpkbw (uint64_t op1
)
385 | ((op1
& 0xff00) << 8)
386 | ((op1
& 0xff0000) << 16)
387 | ((op1
& 0xff000000) << 24));
390 /* Floating point helpers */
392 void helper_setroundmode (uint32_t val
)
394 set_float_rounding_mode(val
, &FP_STATUS
);
397 void helper_setflushzero (uint32_t val
)
399 set_flush_to_zero(val
, &FP_STATUS
);
402 void helper_fp_exc_clear (void)
404 set_float_exception_flags(0, &FP_STATUS
);
407 uint32_t helper_fp_exc_get (void)
409 return get_float_exception_flags(&FP_STATUS
);
412 /* Raise exceptions for ieee fp insns without software completion.
413 In that case there are no exceptions that don't trap; the mask
415 void helper_fp_exc_raise(uint32_t exc
, uint32_t regno
)
420 if (exc
& float_flag_invalid
) {
423 if (exc
& float_flag_divbyzero
) {
426 if (exc
& float_flag_overflow
) {
429 if (exc
& float_flag_underflow
) {
432 if (exc
& float_flag_inexact
) {
436 arith_excp(hw_exc
, 1ull << regno
);
440 /* Raise exceptions for ieee fp insns with software completion. */
441 void helper_fp_exc_raise_s(uint32_t exc
, uint32_t regno
)
444 env
->fpcr_exc_status
|= exc
;
446 exc
&= ~env
->fpcr_exc_mask
;
448 helper_fp_exc_raise(exc
, regno
);
453 /* Input remapping without software completion. Handle denormal-map-to-zero
454 and trap for all other non-finite numbers. */
455 uint64_t helper_ieee_input(uint64_t val
)
457 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
458 uint64_t frac
= val
& 0xfffffffffffffull
;
462 /* If DNZ is set flush denormals to zero on input. */
466 arith_excp(EXC_M_UNF
, 0);
469 } else if (exp
== 0x7ff) {
470 /* Infinity or NaN. */
471 /* ??? I'm not sure these exception bit flags are correct. I do
472 know that the Linux kernel, at least, doesn't rely on them and
473 just emulates the insn to figure out what exception to use. */
474 arith_excp(frac
? EXC_M_INV
: EXC_M_FOV
, 0);
479 /* Similar, but does not trap for infinities. Used for comparisons. */
480 uint64_t helper_ieee_input_cmp(uint64_t val
)
482 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
483 uint64_t frac
= val
& 0xfffffffffffffull
;
487 /* If DNZ is set flush denormals to zero on input. */
491 arith_excp(EXC_M_UNF
, 0);
494 } else if (exp
== 0x7ff && frac
) {
496 arith_excp(EXC_M_INV
, 0);
501 /* Input remapping with software completion enabled. All we have to do
502 is handle denormal-map-to-zero; all other inputs get exceptions as
503 needed from the actual operation. */
504 uint64_t helper_ieee_input_s(uint64_t val
)
507 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
515 /* F floating (VAX) */
516 static inline uint64_t float32_to_f(float32 fa
)
518 uint64_t r
, exp
, mant
, sig
;
522 sig
= ((uint64_t)a
.l
& 0x80000000) << 32;
523 exp
= (a
.l
>> 23) & 0xff;
524 mant
= ((uint64_t)a
.l
& 0x007fffff) << 29;
527 /* NaN or infinity */
528 r
= 1; /* VAX dirty zero */
529 } else if (exp
== 0) {
535 r
= sig
| ((exp
+ 1) << 52) | mant
;
540 r
= 1; /* VAX dirty zero */
542 r
= sig
| ((exp
+ 2) << 52);
549 static inline float32
f_to_float32(uint64_t a
)
551 uint32_t exp
, mant_sig
;
554 exp
= ((a
>> 55) & 0x80) | ((a
>> 52) & 0x7f);
555 mant_sig
= ((a
>> 32) & 0x80000000) | ((a
>> 29) & 0x007fffff);
557 if (unlikely(!exp
&& mant_sig
)) {
558 /* Reserved operands / Dirty zero */
559 dynamic_excp(EXCP_OPCDEC
, 0);
566 r
.l
= ((exp
- 2) << 23) | mant_sig
;
572 uint32_t helper_f_to_memory (uint64_t a
)
575 r
= (a
& 0x00001fffe0000000ull
) >> 13;
576 r
|= (a
& 0x07ffe00000000000ull
) >> 45;
577 r
|= (a
& 0xc000000000000000ull
) >> 48;
581 uint64_t helper_memory_to_f (uint32_t a
)
584 r
= ((uint64_t)(a
& 0x0000c000)) << 48;
585 r
|= ((uint64_t)(a
& 0x003fffff)) << 45;
586 r
|= ((uint64_t)(a
& 0xffff0000)) << 13;
587 if (!(a
& 0x00004000))
592 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
593 either implement VAX arithmetic properly or just signal invalid opcode. */
595 uint64_t helper_addf (uint64_t a
, uint64_t b
)
599 fa
= f_to_float32(a
);
600 fb
= f_to_float32(b
);
601 fr
= float32_add(fa
, fb
, &FP_STATUS
);
602 return float32_to_f(fr
);
605 uint64_t helper_subf (uint64_t a
, uint64_t b
)
609 fa
= f_to_float32(a
);
610 fb
= f_to_float32(b
);
611 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
612 return float32_to_f(fr
);
615 uint64_t helper_mulf (uint64_t a
, uint64_t b
)
619 fa
= f_to_float32(a
);
620 fb
= f_to_float32(b
);
621 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
622 return float32_to_f(fr
);
625 uint64_t helper_divf (uint64_t a
, uint64_t b
)
629 fa
= f_to_float32(a
);
630 fb
= f_to_float32(b
);
631 fr
= float32_div(fa
, fb
, &FP_STATUS
);
632 return float32_to_f(fr
);
635 uint64_t helper_sqrtf (uint64_t t
)
639 ft
= f_to_float32(t
);
640 fr
= float32_sqrt(ft
, &FP_STATUS
);
641 return float32_to_f(fr
);
645 /* G floating (VAX) */
646 static inline uint64_t float64_to_g(float64 fa
)
648 uint64_t r
, exp
, mant
, sig
;
652 sig
= a
.ll
& 0x8000000000000000ull
;
653 exp
= (a
.ll
>> 52) & 0x7ff;
654 mant
= a
.ll
& 0x000fffffffffffffull
;
657 /* NaN or infinity */
658 r
= 1; /* VAX dirty zero */
659 } else if (exp
== 0) {
665 r
= sig
| ((exp
+ 1) << 52) | mant
;
670 r
= 1; /* VAX dirty zero */
672 r
= sig
| ((exp
+ 2) << 52);
679 static inline float64
g_to_float64(uint64_t a
)
681 uint64_t exp
, mant_sig
;
684 exp
= (a
>> 52) & 0x7ff;
685 mant_sig
= a
& 0x800fffffffffffffull
;
687 if (!exp
&& mant_sig
) {
688 /* Reserved operands / Dirty zero */
689 dynamic_excp(EXCP_OPCDEC
, 0);
696 r
.ll
= ((exp
- 2) << 52) | mant_sig
;
702 uint64_t helper_g_to_memory (uint64_t a
)
705 r
= (a
& 0x000000000000ffffull
) << 48;
706 r
|= (a
& 0x00000000ffff0000ull
) << 16;
707 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
708 r
|= (a
& 0xffff000000000000ull
) >> 48;
712 uint64_t helper_memory_to_g (uint64_t a
)
715 r
= (a
& 0x000000000000ffffull
) << 48;
716 r
|= (a
& 0x00000000ffff0000ull
) << 16;
717 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
718 r
|= (a
& 0xffff000000000000ull
) >> 48;
722 uint64_t helper_addg (uint64_t a
, uint64_t b
)
726 fa
= g_to_float64(a
);
727 fb
= g_to_float64(b
);
728 fr
= float64_add(fa
, fb
, &FP_STATUS
);
729 return float64_to_g(fr
);
732 uint64_t helper_subg (uint64_t a
, uint64_t b
)
736 fa
= g_to_float64(a
);
737 fb
= g_to_float64(b
);
738 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
739 return float64_to_g(fr
);
742 uint64_t helper_mulg (uint64_t a
, uint64_t b
)
746 fa
= g_to_float64(a
);
747 fb
= g_to_float64(b
);
748 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
749 return float64_to_g(fr
);
752 uint64_t helper_divg (uint64_t a
, uint64_t b
)
756 fa
= g_to_float64(a
);
757 fb
= g_to_float64(b
);
758 fr
= float64_div(fa
, fb
, &FP_STATUS
);
759 return float64_to_g(fr
);
762 uint64_t helper_sqrtg (uint64_t a
)
766 fa
= g_to_float64(a
);
767 fr
= float64_sqrt(fa
, &FP_STATUS
);
768 return float64_to_g(fr
);
772 /* S floating (single) */
774 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
775 static inline uint64_t float32_to_s_int(uint32_t fi
)
777 uint32_t frac
= fi
& 0x7fffff;
778 uint32_t sign
= fi
>> 31;
779 uint32_t exp_msb
= (fi
>> 30) & 1;
780 uint32_t exp_low
= (fi
>> 23) & 0x7f;
783 exp
= (exp_msb
<< 10) | exp_low
;
792 return (((uint64_t)sign
<< 63)
793 | ((uint64_t)exp
<< 52)
794 | ((uint64_t)frac
<< 29));
797 static inline uint64_t float32_to_s(float32 fa
)
801 return float32_to_s_int(a
.l
);
804 static inline uint32_t s_to_float32_int(uint64_t a
)
806 return ((a
>> 32) & 0xc0000000) | ((a
>> 29) & 0x3fffffff);
809 static inline float32
s_to_float32(uint64_t a
)
812 r
.l
= s_to_float32_int(a
);
816 uint32_t helper_s_to_memory (uint64_t a
)
818 return s_to_float32_int(a
);
821 uint64_t helper_memory_to_s (uint32_t a
)
823 return float32_to_s_int(a
);
826 uint64_t helper_adds (uint64_t a
, uint64_t b
)
830 fa
= s_to_float32(a
);
831 fb
= s_to_float32(b
);
832 fr
= float32_add(fa
, fb
, &FP_STATUS
);
833 return float32_to_s(fr
);
836 uint64_t helper_subs (uint64_t a
, uint64_t b
)
840 fa
= s_to_float32(a
);
841 fb
= s_to_float32(b
);
842 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
843 return float32_to_s(fr
);
846 uint64_t helper_muls (uint64_t a
, uint64_t b
)
850 fa
= s_to_float32(a
);
851 fb
= s_to_float32(b
);
852 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
853 return float32_to_s(fr
);
856 uint64_t helper_divs (uint64_t a
, uint64_t b
)
860 fa
= s_to_float32(a
);
861 fb
= s_to_float32(b
);
862 fr
= float32_div(fa
, fb
, &FP_STATUS
);
863 return float32_to_s(fr
);
866 uint64_t helper_sqrts (uint64_t a
)
870 fa
= s_to_float32(a
);
871 fr
= float32_sqrt(fa
, &FP_STATUS
);
872 return float32_to_s(fr
);
876 /* T floating (double) */
877 static inline float64
t_to_float64(uint64_t a
)
879 /* Memory format is the same as float64 */
885 static inline uint64_t float64_to_t(float64 fa
)
887 /* Memory format is the same as float64 */
893 uint64_t helper_addt (uint64_t a
, uint64_t b
)
897 fa
= t_to_float64(a
);
898 fb
= t_to_float64(b
);
899 fr
= float64_add(fa
, fb
, &FP_STATUS
);
900 return float64_to_t(fr
);
903 uint64_t helper_subt (uint64_t a
, uint64_t b
)
907 fa
= t_to_float64(a
);
908 fb
= t_to_float64(b
);
909 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
910 return float64_to_t(fr
);
913 uint64_t helper_mult (uint64_t a
, uint64_t b
)
917 fa
= t_to_float64(a
);
918 fb
= t_to_float64(b
);
919 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
920 return float64_to_t(fr
);
923 uint64_t helper_divt (uint64_t a
, uint64_t b
)
927 fa
= t_to_float64(a
);
928 fb
= t_to_float64(b
);
929 fr
= float64_div(fa
, fb
, &FP_STATUS
);
930 return float64_to_t(fr
);
933 uint64_t helper_sqrtt (uint64_t a
)
937 fa
= t_to_float64(a
);
938 fr
= float64_sqrt(fa
, &FP_STATUS
);
939 return float64_to_t(fr
);
943 uint64_t helper_cmptun (uint64_t a
, uint64_t b
)
947 fa
= t_to_float64(a
);
948 fb
= t_to_float64(b
);
950 if (float64_unordered_quiet(fa
, fb
, &FP_STATUS
)) {
951 return 0x4000000000000000ULL
;
957 uint64_t helper_cmpteq(uint64_t a
, uint64_t b
)
961 fa
= t_to_float64(a
);
962 fb
= t_to_float64(b
);
964 if (float64_eq_quiet(fa
, fb
, &FP_STATUS
))
965 return 0x4000000000000000ULL
;
970 uint64_t helper_cmptle(uint64_t a
, uint64_t b
)
974 fa
= t_to_float64(a
);
975 fb
= t_to_float64(b
);
977 if (float64_le(fa
, fb
, &FP_STATUS
))
978 return 0x4000000000000000ULL
;
983 uint64_t helper_cmptlt(uint64_t a
, uint64_t b
)
987 fa
= t_to_float64(a
);
988 fb
= t_to_float64(b
);
990 if (float64_lt(fa
, fb
, &FP_STATUS
))
991 return 0x4000000000000000ULL
;
996 uint64_t helper_cmpgeq(uint64_t a
, uint64_t b
)
1000 fa
= g_to_float64(a
);
1001 fb
= g_to_float64(b
);
1003 if (float64_eq_quiet(fa
, fb
, &FP_STATUS
))
1004 return 0x4000000000000000ULL
;
1009 uint64_t helper_cmpgle(uint64_t a
, uint64_t b
)
1013 fa
= g_to_float64(a
);
1014 fb
= g_to_float64(b
);
1016 if (float64_le(fa
, fb
, &FP_STATUS
))
1017 return 0x4000000000000000ULL
;
1022 uint64_t helper_cmpglt(uint64_t a
, uint64_t b
)
1026 fa
= g_to_float64(a
);
1027 fb
= g_to_float64(b
);
1029 if (float64_lt(fa
, fb
, &FP_STATUS
))
1030 return 0x4000000000000000ULL
;
1035 /* Floating point format conversion */
1036 uint64_t helper_cvtts (uint64_t a
)
1041 fa
= t_to_float64(a
);
1042 fr
= float64_to_float32(fa
, &FP_STATUS
);
1043 return float32_to_s(fr
);
1046 uint64_t helper_cvtst (uint64_t a
)
1051 fa
= s_to_float32(a
);
1052 fr
= float32_to_float64(fa
, &FP_STATUS
);
1053 return float64_to_t(fr
);
1056 uint64_t helper_cvtqs (uint64_t a
)
1058 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
1059 return float32_to_s(fr
);
1062 /* Implement float64 to uint64 conversion without saturation -- we must
1063 supply the truncated result. This behaviour is used by the compiler
1064 to get unsigned conversion for free with the same instruction.
1066 The VI flag is set when overflow or inexact exceptions should be raised. */
1068 static inline uint64_t helper_cvttq_internal(uint64_t a
, int roundmode
, int VI
)
1070 uint64_t frac
, ret
= 0;
1071 uint32_t exp
, sign
, exc
= 0;
1075 exp
= (uint32_t)(a
>> 52) & 0x7ff;
1076 frac
= a
& 0xfffffffffffffull
;
1079 if (unlikely(frac
!= 0)) {
1082 } else if (exp
== 0x7ff) {
1083 exc
= (frac
? float_flag_invalid
: VI
? float_flag_overflow
: 0);
1085 /* Restore implicit bit. */
1086 frac
|= 0x10000000000000ull
;
1088 shift
= exp
- 1023 - 52;
1090 /* In this case the number is so large that we must shift
1091 the fraction left. There is no rounding to do. */
1093 ret
= frac
<< shift
;
1094 if (VI
&& (ret
>> shift
) != frac
) {
1095 exc
= float_flag_overflow
;
1101 /* In this case the number is smaller than the fraction as
1102 represented by the 52 bit number. Here we must think
1103 about rounding the result. Handle this by shifting the
1104 fractional part of the number into the high bits of ROUND.
1105 This will let us efficiently handle round-to-nearest. */
1108 ret
= frac
>> shift
;
1109 round
= frac
<< (64 - shift
);
1111 /* The exponent is so small we shift out everything.
1112 Leave a sticky bit for proper rounding below. */
1118 exc
= (VI
? float_flag_inexact
: 0);
1119 switch (roundmode
) {
1120 case float_round_nearest_even
:
1121 if (round
== (1ull << 63)) {
1122 /* Fraction is exactly 0.5; round to even. */
1124 } else if (round
> (1ull << 63)) {
1128 case float_round_to_zero
:
1130 case float_round_up
:
1133 case float_round_down
:
1143 if (unlikely(exc
)) {
1144 float_raise(exc
, &FP_STATUS
);
1150 uint64_t helper_cvttq(uint64_t a
)
1152 return helper_cvttq_internal(a
, FP_STATUS
.float_rounding_mode
, 1);
1155 uint64_t helper_cvttq_c(uint64_t a
)
1157 return helper_cvttq_internal(a
, float_round_to_zero
, 0);
1160 uint64_t helper_cvttq_svic(uint64_t a
)
1162 return helper_cvttq_internal(a
, float_round_to_zero
, 1);
1165 uint64_t helper_cvtqt (uint64_t a
)
1167 float64 fr
= int64_to_float64(a
, &FP_STATUS
);
1168 return float64_to_t(fr
);
1171 uint64_t helper_cvtqf (uint64_t a
)
1173 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
1174 return float32_to_f(fr
);
1177 uint64_t helper_cvtgf (uint64_t a
)
1182 fa
= g_to_float64(a
);
1183 fr
= float64_to_float32(fa
, &FP_STATUS
);
1184 return float32_to_f(fr
);
1187 uint64_t helper_cvtgq (uint64_t a
)
1189 float64 fa
= g_to_float64(a
);
1190 return float64_to_int64_round_to_zero(fa
, &FP_STATUS
);
1193 uint64_t helper_cvtqg (uint64_t a
)
1196 fr
= int64_to_float64(a
, &FP_STATUS
);
1197 return float64_to_g(fr
);
1200 /* PALcode support special instructions */
1201 #if !defined (CONFIG_USER_ONLY)
1202 void helper_hw_ret (uint64_t a
)
1206 env
->lock_addr
= -1;
1209 swap_shadow_regs(env
);
1213 void helper_tbia(void)
1218 void helper_tbis(uint64_t p
)
1220 tlb_flush_page(env
, p
);
1223 void helper_halt(uint64_t restart
)
1226 qemu_system_reset_request();
1228 qemu_system_shutdown_request();
1232 uint64_t helper_get_time(void)
1234 return qemu_get_clock_ns(rtc_clock
);
1237 void helper_set_alarm(uint64_t expire
)
1240 env
->alarm_expire
= expire
;
1241 qemu_mod_timer(env
->alarm_timer
, expire
);
1243 qemu_del_timer(env
->alarm_timer
);
1248 /*****************************************************************************/
1249 /* Softmmu support */
1250 #if !defined (CONFIG_USER_ONLY)
1251 uint64_t helper_ldl_phys(uint64_t p
)
1253 return (int32_t)ldl_phys(p
);
1256 uint64_t helper_ldq_phys(uint64_t p
)
1261 uint64_t helper_ldl_l_phys(uint64_t p
)
1264 return env
->lock_value
= (int32_t)ldl_phys(p
);
1267 uint64_t helper_ldq_l_phys(uint64_t p
)
1270 return env
->lock_value
= ldl_phys(p
);
1273 void helper_stl_phys(uint64_t p
, uint64_t v
)
1278 void helper_stq_phys(uint64_t p
, uint64_t v
)
1283 uint64_t helper_stl_c_phys(uint64_t p
, uint64_t v
)
1287 if (p
== env
->lock_addr
) {
1288 int32_t old
= ldl_phys(p
);
1289 if (old
== (int32_t)env
->lock_value
) {
1294 env
->lock_addr
= -1;
1299 uint64_t helper_stq_c_phys(uint64_t p
, uint64_t v
)
1303 if (p
== env
->lock_addr
) {
1304 uint64_t old
= ldq_phys(p
);
1305 if (old
== env
->lock_value
) {
1310 env
->lock_addr
= -1;
1315 static void QEMU_NORETURN
do_unaligned_access(target_ulong addr
, int is_write
,
1316 int is_user
, void *retaddr
)
1321 do_restore_state(retaddr
);
1324 insn
= ldl_code(pc
);
1326 env
->trap_arg0
= addr
;
1327 env
->trap_arg1
= insn
>> 26; /* opcode */
1328 env
->trap_arg2
= (insn
>> 21) & 31; /* dest regno */
1329 helper_excp(EXCP_UNALIGN
, 0);
1332 void QEMU_NORETURN
cpu_unassigned_access(CPUAlphaState
*env1
,
1333 target_phys_addr_t addr
, int is_write
,
1334 int is_exec
, int unused
, int size
)
1337 env
->trap_arg0
= addr
;
1338 env
->trap_arg1
= is_write
;
1339 dynamic_excp(EXCP_MCHK
, 0);
1342 #include "softmmu_exec.h"
1344 #define MMUSUFFIX _mmu
1345 #define ALIGNED_ONLY
1348 #include "softmmu_template.h"
1351 #include "softmmu_template.h"
1354 #include "softmmu_template.h"
1357 #include "softmmu_template.h"
1359 /* try to fill the TLB and return an exception if error. If retaddr is
1360 NULL, it means that the function was called in C code (i.e. not
1361 from generated code or from helper.c) */
1362 /* XXX: fix it to restore all registers */
1363 void tlb_fill(CPUAlphaState
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
1366 CPUAlphaState
*saved_env
;
1371 ret
= cpu_alpha_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
1372 if (unlikely(ret
!= 0)) {
1373 do_restore_state(retaddr
);
1374 /* Exception index and error code are already set */