2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "host-utils.h"
22 #include "softfloat.h"
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
27 void QEMU_NORETURN
helper_excp (int excp
, int error
)
29 env
->exception_index
= excp
;
30 env
->error_code
= error
;
34 uint64_t helper_load_pcc (void)
40 uint64_t helper_load_fpcr (void)
42 return cpu_alpha_load_fpcr (env
);
45 void helper_store_fpcr (uint64_t val
)
47 cpu_alpha_store_fpcr (env
, val
);
50 static spinlock_t intr_cpu_lock
= SPIN_LOCK_UNLOCKED
;
52 uint64_t helper_rs(void)
56 spin_lock(&intr_cpu_lock
);
59 spin_unlock(&intr_cpu_lock
);
64 uint64_t helper_rc(void)
68 spin_lock(&intr_cpu_lock
);
71 spin_unlock(&intr_cpu_lock
);
76 uint64_t helper_addqv (uint64_t op1
, uint64_t op2
)
80 if (unlikely((tmp
^ op2
^ (-1ULL)) & (tmp
^ op1
) & (1ULL << 63))) {
81 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
86 uint64_t helper_addlv (uint64_t op1
, uint64_t op2
)
89 op1
= (uint32_t)(op1
+ op2
);
90 if (unlikely((tmp
^ op2
^ (-1UL)) & (tmp
^ op1
) & (1UL << 31))) {
91 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
96 uint64_t helper_subqv (uint64_t op1
, uint64_t op2
)
100 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1ULL << 63))) {
101 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
106 uint64_t helper_sublv (uint64_t op1
, uint64_t op2
)
110 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1UL << 31))) {
111 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
116 uint64_t helper_mullv (uint64_t op1
, uint64_t op2
)
118 int64_t res
= (int64_t)op1
* (int64_t)op2
;
120 if (unlikely((int32_t)res
!= res
)) {
121 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
123 return (int64_t)((int32_t)res
);
126 uint64_t helper_mulqv (uint64_t op1
, uint64_t op2
)
130 muls64(&tl
, &th
, op1
, op2
);
131 /* If th != 0 && th != -1, then we had an overflow */
132 if (unlikely((th
+ 1) > 1)) {
133 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
138 uint64_t helper_umulh (uint64_t op1
, uint64_t op2
)
142 mulu64(&tl
, &th
, op1
, op2
);
146 uint64_t helper_ctpop (uint64_t arg
)
151 uint64_t helper_ctlz (uint64_t arg
)
156 uint64_t helper_cttz (uint64_t arg
)
161 static inline uint64_t byte_zap(uint64_t op
, uint8_t mskb
)
166 mask
|= ((mskb
>> 0) & 1) * 0x00000000000000FFULL
;
167 mask
|= ((mskb
>> 1) & 1) * 0x000000000000FF00ULL
;
168 mask
|= ((mskb
>> 2) & 1) * 0x0000000000FF0000ULL
;
169 mask
|= ((mskb
>> 3) & 1) * 0x00000000FF000000ULL
;
170 mask
|= ((mskb
>> 4) & 1) * 0x000000FF00000000ULL
;
171 mask
|= ((mskb
>> 5) & 1) * 0x0000FF0000000000ULL
;
172 mask
|= ((mskb
>> 6) & 1) * 0x00FF000000000000ULL
;
173 mask
|= ((mskb
>> 7) & 1) * 0xFF00000000000000ULL
;
178 uint64_t helper_zap(uint64_t val
, uint64_t mask
)
180 return byte_zap(val
, mask
);
183 uint64_t helper_zapnot(uint64_t val
, uint64_t mask
)
185 return byte_zap(val
, ~mask
);
188 uint64_t helper_cmpbge (uint64_t op1
, uint64_t op2
)
190 uint8_t opa
, opb
, res
;
194 for (i
= 0; i
< 8; i
++) {
195 opa
= op1
>> (i
* 8);
196 opb
= op2
>> (i
* 8);
203 uint64_t helper_minub8 (uint64_t op1
, uint64_t op2
)
206 uint8_t opa
, opb
, opr
;
209 for (i
= 0; i
< 8; ++i
) {
210 opa
= op1
>> (i
* 8);
211 opb
= op2
>> (i
* 8);
212 opr
= opa
< opb
? opa
: opb
;
213 res
|= (uint64_t)opr
<< (i
* 8);
218 uint64_t helper_minsb8 (uint64_t op1
, uint64_t op2
)
225 for (i
= 0; i
< 8; ++i
) {
226 opa
= op1
>> (i
* 8);
227 opb
= op2
>> (i
* 8);
228 opr
= opa
< opb
? opa
: opb
;
229 res
|= (uint64_t)opr
<< (i
* 8);
234 uint64_t helper_minuw4 (uint64_t op1
, uint64_t op2
)
237 uint16_t opa
, opb
, opr
;
240 for (i
= 0; i
< 4; ++i
) {
241 opa
= op1
>> (i
* 16);
242 opb
= op2
>> (i
* 16);
243 opr
= opa
< opb
? opa
: opb
;
244 res
|= (uint64_t)opr
<< (i
* 16);
249 uint64_t helper_minsw4 (uint64_t op1
, uint64_t op2
)
256 for (i
= 0; i
< 4; ++i
) {
257 opa
= op1
>> (i
* 16);
258 opb
= op2
>> (i
* 16);
259 opr
= opa
< opb
? opa
: opb
;
260 res
|= (uint64_t)opr
<< (i
* 16);
265 uint64_t helper_maxub8 (uint64_t op1
, uint64_t op2
)
268 uint8_t opa
, opb
, opr
;
271 for (i
= 0; i
< 8; ++i
) {
272 opa
= op1
>> (i
* 8);
273 opb
= op2
>> (i
* 8);
274 opr
= opa
> opb
? opa
: opb
;
275 res
|= (uint64_t)opr
<< (i
* 8);
280 uint64_t helper_maxsb8 (uint64_t op1
, uint64_t op2
)
287 for (i
= 0; i
< 8; ++i
) {
288 opa
= op1
>> (i
* 8);
289 opb
= op2
>> (i
* 8);
290 opr
= opa
> opb
? opa
: opb
;
291 res
|= (uint64_t)opr
<< (i
* 8);
296 uint64_t helper_maxuw4 (uint64_t op1
, uint64_t op2
)
299 uint16_t opa
, opb
, opr
;
302 for (i
= 0; i
< 4; ++i
) {
303 opa
= op1
>> (i
* 16);
304 opb
= op2
>> (i
* 16);
305 opr
= opa
> opb
? opa
: opb
;
306 res
|= (uint64_t)opr
<< (i
* 16);
311 uint64_t helper_maxsw4 (uint64_t op1
, uint64_t op2
)
318 for (i
= 0; i
< 4; ++i
) {
319 opa
= op1
>> (i
* 16);
320 opb
= op2
>> (i
* 16);
321 opr
= opa
> opb
? opa
: opb
;
322 res
|= (uint64_t)opr
<< (i
* 16);
327 uint64_t helper_perr (uint64_t op1
, uint64_t op2
)
330 uint8_t opa
, opb
, opr
;
333 for (i
= 0; i
< 8; ++i
) {
334 opa
= op1
>> (i
* 8);
335 opb
= op2
>> (i
* 8);
345 uint64_t helper_pklb (uint64_t op1
)
347 return (op1
& 0xff) | ((op1
>> 24) & 0xff00);
350 uint64_t helper_pkwb (uint64_t op1
)
353 | ((op1
>> 8) & 0xff00)
354 | ((op1
>> 16) & 0xff0000)
355 | ((op1
>> 24) & 0xff000000));
358 uint64_t helper_unpkbl (uint64_t op1
)
360 return (op1
& 0xff) | ((op1
& 0xff00) << 24);
363 uint64_t helper_unpkbw (uint64_t op1
)
366 | ((op1
& 0xff00) << 8)
367 | ((op1
& 0xff0000) << 16)
368 | ((op1
& 0xff000000) << 24));
371 /* Floating point helpers */
373 void helper_setroundmode (uint32_t val
)
375 set_float_rounding_mode(val
, &FP_STATUS
);
378 void helper_setflushzero (uint32_t val
)
380 set_flush_to_zero(val
, &FP_STATUS
);
383 void helper_fp_exc_clear (void)
385 set_float_exception_flags(0, &FP_STATUS
);
388 uint32_t helper_fp_exc_get (void)
390 return get_float_exception_flags(&FP_STATUS
);
393 /* Raise exceptions for ieee fp insns without software completion.
394 In that case there are no exceptions that don't trap; the mask
396 void helper_fp_exc_raise(uint32_t exc
, uint32_t regno
)
401 env
->ipr
[IPR_EXC_MASK
] |= 1ull << regno
;
403 if (exc
& float_flag_invalid
) {
406 if (exc
& float_flag_divbyzero
) {
409 if (exc
& float_flag_overflow
) {
412 if (exc
& float_flag_underflow
) {
415 if (exc
& float_flag_inexact
) {
418 helper_excp(EXCP_ARITH
, hw_exc
);
422 /* Raise exceptions for ieee fp insns with software completion. */
423 void helper_fp_exc_raise_s(uint32_t exc
, uint32_t regno
)
426 env
->fpcr_exc_status
|= exc
;
428 exc
&= ~env
->fpcr_exc_mask
;
430 helper_fp_exc_raise(exc
, regno
);
435 /* Input remapping without software completion. Handle denormal-map-to-zero
436 and trap for all other non-finite numbers. */
437 uint64_t helper_ieee_input(uint64_t val
)
439 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
440 uint64_t frac
= val
& 0xfffffffffffffull
;
444 /* If DNZ is set flush denormals to zero on input. */
448 helper_excp(EXCP_ARITH
, EXC_M_UNF
);
451 } else if (exp
== 0x7ff) {
452 /* Infinity or NaN. */
453 /* ??? I'm not sure these exception bit flags are correct. I do
454 know that the Linux kernel, at least, doesn't rely on them and
455 just emulates the insn to figure out what exception to use. */
456 helper_excp(EXCP_ARITH
, frac
? EXC_M_INV
: EXC_M_FOV
);
461 /* Similar, but does not trap for infinities. Used for comparisons. */
462 uint64_t helper_ieee_input_cmp(uint64_t val
)
464 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
465 uint64_t frac
= val
& 0xfffffffffffffull
;
469 /* If DNZ is set flush denormals to zero on input. */
473 helper_excp(EXCP_ARITH
, EXC_M_UNF
);
476 } else if (exp
== 0x7ff && frac
) {
478 helper_excp(EXCP_ARITH
, EXC_M_INV
);
483 /* Input remapping with software completion enabled. All we have to do
484 is handle denormal-map-to-zero; all other inputs get exceptions as
485 needed from the actual operation. */
486 uint64_t helper_ieee_input_s(uint64_t val
)
489 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
497 /* F floating (VAX) */
498 static inline uint64_t float32_to_f(float32 fa
)
500 uint64_t r
, exp
, mant
, sig
;
504 sig
= ((uint64_t)a
.l
& 0x80000000) << 32;
505 exp
= (a
.l
>> 23) & 0xff;
506 mant
= ((uint64_t)a
.l
& 0x007fffff) << 29;
509 /* NaN or infinity */
510 r
= 1; /* VAX dirty zero */
511 } else if (exp
== 0) {
517 r
= sig
| ((exp
+ 1) << 52) | mant
;
522 r
= 1; /* VAX dirty zero */
524 r
= sig
| ((exp
+ 2) << 52);
531 static inline float32
f_to_float32(uint64_t a
)
533 uint32_t exp
, mant_sig
;
536 exp
= ((a
>> 55) & 0x80) | ((a
>> 52) & 0x7f);
537 mant_sig
= ((a
>> 32) & 0x80000000) | ((a
>> 29) & 0x007fffff);
539 if (unlikely(!exp
&& mant_sig
)) {
540 /* Reserved operands / Dirty zero */
541 helper_excp(EXCP_OPCDEC
, 0);
548 r
.l
= ((exp
- 2) << 23) | mant_sig
;
554 uint32_t helper_f_to_memory (uint64_t a
)
557 r
= (a
& 0x00001fffe0000000ull
) >> 13;
558 r
|= (a
& 0x07ffe00000000000ull
) >> 45;
559 r
|= (a
& 0xc000000000000000ull
) >> 48;
563 uint64_t helper_memory_to_f (uint32_t a
)
566 r
= ((uint64_t)(a
& 0x0000c000)) << 48;
567 r
|= ((uint64_t)(a
& 0x003fffff)) << 45;
568 r
|= ((uint64_t)(a
& 0xffff0000)) << 13;
569 if (!(a
& 0x00004000))
574 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
575 either implement VAX arithmetic properly or just signal invalid opcode. */
577 uint64_t helper_addf (uint64_t a
, uint64_t b
)
581 fa
= f_to_float32(a
);
582 fb
= f_to_float32(b
);
583 fr
= float32_add(fa
, fb
, &FP_STATUS
);
584 return float32_to_f(fr
);
587 uint64_t helper_subf (uint64_t a
, uint64_t b
)
591 fa
= f_to_float32(a
);
592 fb
= f_to_float32(b
);
593 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
594 return float32_to_f(fr
);
597 uint64_t helper_mulf (uint64_t a
, uint64_t b
)
601 fa
= f_to_float32(a
);
602 fb
= f_to_float32(b
);
603 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
604 return float32_to_f(fr
);
607 uint64_t helper_divf (uint64_t a
, uint64_t b
)
611 fa
= f_to_float32(a
);
612 fb
= f_to_float32(b
);
613 fr
= float32_div(fa
, fb
, &FP_STATUS
);
614 return float32_to_f(fr
);
617 uint64_t helper_sqrtf (uint64_t t
)
621 ft
= f_to_float32(t
);
622 fr
= float32_sqrt(ft
, &FP_STATUS
);
623 return float32_to_f(fr
);
627 /* G floating (VAX) */
628 static inline uint64_t float64_to_g(float64 fa
)
630 uint64_t r
, exp
, mant
, sig
;
634 sig
= a
.ll
& 0x8000000000000000ull
;
635 exp
= (a
.ll
>> 52) & 0x7ff;
636 mant
= a
.ll
& 0x000fffffffffffffull
;
639 /* NaN or infinity */
640 r
= 1; /* VAX dirty zero */
641 } else if (exp
== 0) {
647 r
= sig
| ((exp
+ 1) << 52) | mant
;
652 r
= 1; /* VAX dirty zero */
654 r
= sig
| ((exp
+ 2) << 52);
661 static inline float64
g_to_float64(uint64_t a
)
663 uint64_t exp
, mant_sig
;
666 exp
= (a
>> 52) & 0x7ff;
667 mant_sig
= a
& 0x800fffffffffffffull
;
669 if (!exp
&& mant_sig
) {
670 /* Reserved operands / Dirty zero */
671 helper_excp(EXCP_OPCDEC
, 0);
678 r
.ll
= ((exp
- 2) << 52) | mant_sig
;
684 uint64_t helper_g_to_memory (uint64_t a
)
687 r
= (a
& 0x000000000000ffffull
) << 48;
688 r
|= (a
& 0x00000000ffff0000ull
) << 16;
689 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
690 r
|= (a
& 0xffff000000000000ull
) >> 48;
694 uint64_t helper_memory_to_g (uint64_t a
)
697 r
= (a
& 0x000000000000ffffull
) << 48;
698 r
|= (a
& 0x00000000ffff0000ull
) << 16;
699 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
700 r
|= (a
& 0xffff000000000000ull
) >> 48;
704 uint64_t helper_addg (uint64_t a
, uint64_t b
)
708 fa
= g_to_float64(a
);
709 fb
= g_to_float64(b
);
710 fr
= float64_add(fa
, fb
, &FP_STATUS
);
711 return float64_to_g(fr
);
714 uint64_t helper_subg (uint64_t a
, uint64_t b
)
718 fa
= g_to_float64(a
);
719 fb
= g_to_float64(b
);
720 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
721 return float64_to_g(fr
);
724 uint64_t helper_mulg (uint64_t a
, uint64_t b
)
728 fa
= g_to_float64(a
);
729 fb
= g_to_float64(b
);
730 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
731 return float64_to_g(fr
);
734 uint64_t helper_divg (uint64_t a
, uint64_t b
)
738 fa
= g_to_float64(a
);
739 fb
= g_to_float64(b
);
740 fr
= float64_div(fa
, fb
, &FP_STATUS
);
741 return float64_to_g(fr
);
744 uint64_t helper_sqrtg (uint64_t a
)
748 fa
= g_to_float64(a
);
749 fr
= float64_sqrt(fa
, &FP_STATUS
);
750 return float64_to_g(fr
);
754 /* S floating (single) */
756 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
757 static inline uint64_t float32_to_s_int(uint32_t fi
)
759 uint32_t frac
= fi
& 0x7fffff;
760 uint32_t sign
= fi
>> 31;
761 uint32_t exp_msb
= (fi
>> 30) & 1;
762 uint32_t exp_low
= (fi
>> 23) & 0x7f;
765 exp
= (exp_msb
<< 10) | exp_low
;
774 return (((uint64_t)sign
<< 63)
775 | ((uint64_t)exp
<< 52)
776 | ((uint64_t)frac
<< 29));
779 static inline uint64_t float32_to_s(float32 fa
)
783 return float32_to_s_int(a
.l
);
786 static inline uint32_t s_to_float32_int(uint64_t a
)
788 return ((a
>> 32) & 0xc0000000) | ((a
>> 29) & 0x3fffffff);
791 static inline float32
s_to_float32(uint64_t a
)
794 r
.l
= s_to_float32_int(a
);
798 uint32_t helper_s_to_memory (uint64_t a
)
800 return s_to_float32_int(a
);
803 uint64_t helper_memory_to_s (uint32_t a
)
805 return float32_to_s_int(a
);
808 uint64_t helper_adds (uint64_t a
, uint64_t b
)
812 fa
= s_to_float32(a
);
813 fb
= s_to_float32(b
);
814 fr
= float32_add(fa
, fb
, &FP_STATUS
);
815 return float32_to_s(fr
);
818 uint64_t helper_subs (uint64_t a
, uint64_t b
)
822 fa
= s_to_float32(a
);
823 fb
= s_to_float32(b
);
824 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
825 return float32_to_s(fr
);
828 uint64_t helper_muls (uint64_t a
, uint64_t b
)
832 fa
= s_to_float32(a
);
833 fb
= s_to_float32(b
);
834 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
835 return float32_to_s(fr
);
838 uint64_t helper_divs (uint64_t a
, uint64_t b
)
842 fa
= s_to_float32(a
);
843 fb
= s_to_float32(b
);
844 fr
= float32_div(fa
, fb
, &FP_STATUS
);
845 return float32_to_s(fr
);
848 uint64_t helper_sqrts (uint64_t a
)
852 fa
= s_to_float32(a
);
853 fr
= float32_sqrt(fa
, &FP_STATUS
);
854 return float32_to_s(fr
);
858 /* T floating (double) */
859 static inline float64
t_to_float64(uint64_t a
)
861 /* Memory format is the same as float64 */
867 static inline uint64_t float64_to_t(float64 fa
)
869 /* Memory format is the same as float64 */
875 uint64_t helper_addt (uint64_t a
, uint64_t b
)
879 fa
= t_to_float64(a
);
880 fb
= t_to_float64(b
);
881 fr
= float64_add(fa
, fb
, &FP_STATUS
);
882 return float64_to_t(fr
);
885 uint64_t helper_subt (uint64_t a
, uint64_t b
)
889 fa
= t_to_float64(a
);
890 fb
= t_to_float64(b
);
891 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
892 return float64_to_t(fr
);
895 uint64_t helper_mult (uint64_t a
, uint64_t b
)
899 fa
= t_to_float64(a
);
900 fb
= t_to_float64(b
);
901 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
902 return float64_to_t(fr
);
905 uint64_t helper_divt (uint64_t a
, uint64_t b
)
909 fa
= t_to_float64(a
);
910 fb
= t_to_float64(b
);
911 fr
= float64_div(fa
, fb
, &FP_STATUS
);
912 return float64_to_t(fr
);
915 uint64_t helper_sqrtt (uint64_t a
)
919 fa
= t_to_float64(a
);
920 fr
= float64_sqrt(fa
, &FP_STATUS
);
921 return float64_to_t(fr
);
926 uint64_t helper_cpys(uint64_t a
, uint64_t b
)
928 return (a
& 0x8000000000000000ULL
) | (b
& ~0x8000000000000000ULL
);
931 uint64_t helper_cpysn(uint64_t a
, uint64_t b
)
933 return ((~a
) & 0x8000000000000000ULL
) | (b
& ~0x8000000000000000ULL
);
936 uint64_t helper_cpyse(uint64_t a
, uint64_t b
)
938 return (a
& 0xFFF0000000000000ULL
) | (b
& ~0xFFF0000000000000ULL
);
943 uint64_t helper_cmptun (uint64_t a
, uint64_t b
)
947 fa
= t_to_float64(a
);
948 fb
= t_to_float64(b
);
950 if (float64_is_nan(fa
) || float64_is_nan(fb
))
951 return 0x4000000000000000ULL
;
956 uint64_t helper_cmpteq(uint64_t a
, uint64_t b
)
960 fa
= t_to_float64(a
);
961 fb
= t_to_float64(b
);
963 if (float64_eq(fa
, fb
, &FP_STATUS
))
964 return 0x4000000000000000ULL
;
969 uint64_t helper_cmptle(uint64_t a
, uint64_t b
)
973 fa
= t_to_float64(a
);
974 fb
= t_to_float64(b
);
976 if (float64_le(fa
, fb
, &FP_STATUS
))
977 return 0x4000000000000000ULL
;
982 uint64_t helper_cmptlt(uint64_t a
, uint64_t b
)
986 fa
= t_to_float64(a
);
987 fb
= t_to_float64(b
);
989 if (float64_lt(fa
, fb
, &FP_STATUS
))
990 return 0x4000000000000000ULL
;
995 uint64_t helper_cmpgeq(uint64_t a
, uint64_t b
)
999 fa
= g_to_float64(a
);
1000 fb
= g_to_float64(b
);
1002 if (float64_eq(fa
, fb
, &FP_STATUS
))
1003 return 0x4000000000000000ULL
;
1008 uint64_t helper_cmpgle(uint64_t a
, uint64_t b
)
1012 fa
= g_to_float64(a
);
1013 fb
= g_to_float64(b
);
1015 if (float64_le(fa
, fb
, &FP_STATUS
))
1016 return 0x4000000000000000ULL
;
1021 uint64_t helper_cmpglt(uint64_t a
, uint64_t b
)
1025 fa
= g_to_float64(a
);
1026 fb
= g_to_float64(b
);
1028 if (float64_lt(fa
, fb
, &FP_STATUS
))
1029 return 0x4000000000000000ULL
;
1034 /* Floating point format conversion */
1035 uint64_t helper_cvtts (uint64_t a
)
1040 fa
= t_to_float64(a
);
1041 fr
= float64_to_float32(fa
, &FP_STATUS
);
1042 return float32_to_s(fr
);
1045 uint64_t helper_cvtst (uint64_t a
)
1050 fa
= s_to_float32(a
);
1051 fr
= float32_to_float64(fa
, &FP_STATUS
);
1052 return float64_to_t(fr
);
1055 uint64_t helper_cvtqs (uint64_t a
)
1057 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
1058 return float32_to_s(fr
);
1061 /* Implement float64 to uint64 conversion without saturation -- we must
1062 supply the truncated result. This behaviour is used by the compiler
1063 to get unsigned conversion for free with the same instruction.
1065 The VI flag is set when overflow or inexact exceptions should be raised. */
1067 static inline uint64_t helper_cvttq_internal(uint64_t a
, int roundmode
, int VI
)
1069 uint64_t frac
, ret
= 0;
1070 uint32_t exp
, sign
, exc
= 0;
1074 exp
= (uint32_t)(a
>> 52) & 0x7ff;
1075 frac
= a
& 0xfffffffffffffull
;
1078 if (unlikely(frac
!= 0)) {
1081 } else if (exp
== 0x7ff) {
1082 exc
= (frac
? float_flag_invalid
: VI
? float_flag_overflow
: 0);
1084 /* Restore implicit bit. */
1085 frac
|= 0x10000000000000ull
;
1087 shift
= exp
- 1023 - 52;
1089 /* In this case the number is so large that we must shift
1090 the fraction left. There is no rounding to do. */
1092 ret
= frac
<< shift
;
1093 if (VI
&& (ret
>> shift
) != frac
) {
1094 exc
= float_flag_overflow
;
1100 /* In this case the number is smaller than the fraction as
1101 represented by the 52 bit number. Here we must think
1102 about rounding the result. Handle this by shifting the
1103 fractional part of the number into the high bits of ROUND.
1104 This will let us efficiently handle round-to-nearest. */
1107 ret
= frac
>> shift
;
1108 round
= frac
<< (64 - shift
);
1110 /* The exponent is so small we shift out everything.
1111 Leave a sticky bit for proper rounding below. */
1117 exc
= (VI
? float_flag_inexact
: 0);
1118 switch (roundmode
) {
1119 case float_round_nearest_even
:
1120 if (round
== (1ull << 63)) {
1121 /* Fraction is exactly 0.5; round to even. */
1123 } else if (round
> (1ull << 63)) {
1127 case float_round_to_zero
:
1129 case float_round_up
:
1132 case float_round_down
:
1142 if (unlikely(exc
)) {
1143 float_raise(exc
, &FP_STATUS
);
1149 uint64_t helper_cvttq(uint64_t a
)
1151 return helper_cvttq_internal(a
, FP_STATUS
.float_rounding_mode
, 1);
1154 uint64_t helper_cvttq_c(uint64_t a
)
1156 return helper_cvttq_internal(a
, float_round_to_zero
, 0);
1159 uint64_t helper_cvttq_svic(uint64_t a
)
1161 return helper_cvttq_internal(a
, float_round_to_zero
, 1);
1164 uint64_t helper_cvtqt (uint64_t a
)
1166 float64 fr
= int64_to_float64(a
, &FP_STATUS
);
1167 return float64_to_t(fr
);
1170 uint64_t helper_cvtqf (uint64_t a
)
1172 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
1173 return float32_to_f(fr
);
1176 uint64_t helper_cvtgf (uint64_t a
)
1181 fa
= g_to_float64(a
);
1182 fr
= float64_to_float32(fa
, &FP_STATUS
);
1183 return float32_to_f(fr
);
1186 uint64_t helper_cvtgq (uint64_t a
)
1188 float64 fa
= g_to_float64(a
);
1189 return float64_to_int64_round_to_zero(fa
, &FP_STATUS
);
1192 uint64_t helper_cvtqg (uint64_t a
)
1195 fr
= int64_to_float64(a
, &FP_STATUS
);
1196 return float64_to_g(fr
);
1199 uint64_t helper_cvtlq (uint64_t a
)
1201 int32_t lo
= a
>> 29;
1202 int32_t hi
= a
>> 32;
1203 return (lo
& 0x3FFFFFFF) | (hi
& 0xc0000000);
1206 uint64_t helper_cvtql (uint64_t a
)
1208 return ((a
& 0xC0000000) << 32) | ((a
& 0x7FFFFFFF) << 29);
1211 uint64_t helper_cvtql_v (uint64_t a
)
1213 if ((int32_t)a
!= (int64_t)a
)
1214 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
1215 return helper_cvtql(a
);
1218 uint64_t helper_cvtql_sv (uint64_t a
)
1220 /* ??? I'm pretty sure there's nothing that /sv needs to do that /v
1221 doesn't do. The only thing I can think is that /sv is a valid
1222 instruction merely for completeness in the ISA. */
1223 return helper_cvtql_v(a
);
1226 /* PALcode support special instructions */
1227 #if !defined (CONFIG_USER_ONLY)
1228 void helper_hw_rei (void)
1230 env
->pc
= env
->ipr
[IPR_EXC_ADDR
] & ~3;
1231 env
->ipr
[IPR_EXC_ADDR
] = env
->ipr
[IPR_EXC_ADDR
] & 1;
1232 /* XXX: re-enable interrupts and memory mapping */
1235 void helper_hw_ret (uint64_t a
)
1238 env
->ipr
[IPR_EXC_ADDR
] = a
& 1;
1239 /* XXX: re-enable interrupts and memory mapping */
1242 uint64_t helper_mfpr (int iprn
, uint64_t val
)
1246 if (cpu_alpha_mfpr(env
, iprn
, &tmp
) == 0)
1252 void helper_mtpr (int iprn
, uint64_t val
)
1254 cpu_alpha_mtpr(env
, iprn
, val
, NULL
);
1257 void helper_set_alt_mode (void)
1259 env
->saved_mode
= env
->ps
& 0xC;
1260 env
->ps
= (env
->ps
& ~0xC) | (env
->ipr
[IPR_ALT_MODE
] & 0xC);
1263 void helper_restore_mode (void)
1265 env
->ps
= (env
->ps
& ~0xC) | env
->saved_mode
;
1270 /*****************************************************************************/
1271 /* Softmmu support */
1272 #if !defined (CONFIG_USER_ONLY)
1274 /* XXX: the two following helpers are pure hacks.
1275 * Hopefully, we emulate the PALcode, then we should never see
1276 * HW_LD / HW_ST instructions.
1278 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr
)
1280 uint64_t tlb_addr
, physaddr
;
1284 mmu_idx
= cpu_mmu_index(env
);
1285 index
= (virtaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1287 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_read
;
1288 if ((virtaddr
& TARGET_PAGE_MASK
) ==
1289 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1290 physaddr
= virtaddr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
1292 /* the page is not in the TLB : fill it */
1294 tlb_fill(virtaddr
, 0, mmu_idx
, retaddr
);
1300 uint64_t helper_st_virt_to_phys (uint64_t virtaddr
)
1302 uint64_t tlb_addr
, physaddr
;
1306 mmu_idx
= cpu_mmu_index(env
);
1307 index
= (virtaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1309 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
1310 if ((virtaddr
& TARGET_PAGE_MASK
) ==
1311 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1312 physaddr
= virtaddr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
1314 /* the page is not in the TLB : fill it */
1316 tlb_fill(virtaddr
, 1, mmu_idx
, retaddr
);
1322 void helper_ldl_raw(uint64_t t0
, uint64_t t1
)
1327 void helper_ldq_raw(uint64_t t0
, uint64_t t1
)
1332 void helper_ldl_l_raw(uint64_t t0
, uint64_t t1
)
1338 void helper_ldq_l_raw(uint64_t t0
, uint64_t t1
)
1344 void helper_ldl_kernel(uint64_t t0
, uint64_t t1
)
1349 void helper_ldq_kernel(uint64_t t0
, uint64_t t1
)
1354 void helper_ldl_data(uint64_t t0
, uint64_t t1
)
1359 void helper_ldq_data(uint64_t t0
, uint64_t t1
)
1364 void helper_stl_raw(uint64_t t0
, uint64_t t1
)
1369 void helper_stq_raw(uint64_t t0
, uint64_t t1
)
1374 uint64_t helper_stl_c_raw(uint64_t t0
, uint64_t t1
)
1378 if (t1
== env
->lock
) {
1389 uint64_t helper_stq_c_raw(uint64_t t0
, uint64_t t1
)
1393 if (t1
== env
->lock
) {
1404 #define MMUSUFFIX _mmu
1407 #include "softmmu_template.h"
1410 #include "softmmu_template.h"
1413 #include "softmmu_template.h"
1416 #include "softmmu_template.h"
1418 /* try to fill the TLB and return an exception if error. If retaddr is
1419 NULL, it means that the function was called in C code (i.e. not
1420 from generated code or from helper.c) */
1421 /* XXX: fix it to restore all registers */
1422 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
1424 TranslationBlock
*tb
;
1425 CPUState
*saved_env
;
1429 /* XXX: hack to restore env in all cases, even if not called from
1432 env
= cpu_single_env
;
1433 ret
= cpu_alpha_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
1434 if (!likely(ret
== 0)) {
1435 if (likely(retaddr
)) {
1436 /* now we have a real cpu fault */
1437 pc
= (unsigned long)retaddr
;
1438 tb
= tb_find_pc(pc
);
1440 /* the PC is inside the translated code. It means that we have
1441 a virtual CPU fault */
1442 cpu_restore_state(tb
, env
, pc
, NULL
);
1445 /* Exception index and error code are already set */