2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "host-utils.h"
22 #include "softfloat.h"
24 #include "qemu-timer.h"
26 /*****************************************************************************/
27 /* Exceptions processing helpers */
28 void QEMU_NORETURN
helper_excp (int excp
, int error
)
30 env
->exception_index
= excp
;
31 env
->error_code
= error
;
35 uint64_t helper_load_pcc (void)
37 /* ??? This isn't a timer for which we have any rate info. */
38 return (uint32_t)cpu_get_real_ticks();
41 uint64_t helper_load_fpcr (void)
43 return cpu_alpha_load_fpcr (env
);
46 void helper_store_fpcr (uint64_t val
)
48 cpu_alpha_store_fpcr (env
, val
);
51 uint64_t helper_addqv (uint64_t op1
, uint64_t op2
)
55 if (unlikely((tmp
^ op2
^ (-1ULL)) & (tmp
^ op1
) & (1ULL << 63))) {
56 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
61 uint64_t helper_addlv (uint64_t op1
, uint64_t op2
)
64 op1
= (uint32_t)(op1
+ op2
);
65 if (unlikely((tmp
^ op2
^ (-1UL)) & (tmp
^ op1
) & (1UL << 31))) {
66 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
71 uint64_t helper_subqv (uint64_t op1
, uint64_t op2
)
75 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1ULL << 63))) {
76 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
81 uint64_t helper_sublv (uint64_t op1
, uint64_t op2
)
85 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1UL << 31))) {
86 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
91 uint64_t helper_mullv (uint64_t op1
, uint64_t op2
)
93 int64_t res
= (int64_t)op1
* (int64_t)op2
;
95 if (unlikely((int32_t)res
!= res
)) {
96 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
98 return (int64_t)((int32_t)res
);
101 uint64_t helper_mulqv (uint64_t op1
, uint64_t op2
)
105 muls64(&tl
, &th
, op1
, op2
);
106 /* If th != 0 && th != -1, then we had an overflow */
107 if (unlikely((th
+ 1) > 1)) {
108 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
113 uint64_t helper_umulh (uint64_t op1
, uint64_t op2
)
117 mulu64(&tl
, &th
, op1
, op2
);
121 uint64_t helper_ctpop (uint64_t arg
)
126 uint64_t helper_ctlz (uint64_t arg
)
131 uint64_t helper_cttz (uint64_t arg
)
136 static inline uint64_t byte_zap(uint64_t op
, uint8_t mskb
)
141 mask
|= ((mskb
>> 0) & 1) * 0x00000000000000FFULL
;
142 mask
|= ((mskb
>> 1) & 1) * 0x000000000000FF00ULL
;
143 mask
|= ((mskb
>> 2) & 1) * 0x0000000000FF0000ULL
;
144 mask
|= ((mskb
>> 3) & 1) * 0x00000000FF000000ULL
;
145 mask
|= ((mskb
>> 4) & 1) * 0x000000FF00000000ULL
;
146 mask
|= ((mskb
>> 5) & 1) * 0x0000FF0000000000ULL
;
147 mask
|= ((mskb
>> 6) & 1) * 0x00FF000000000000ULL
;
148 mask
|= ((mskb
>> 7) & 1) * 0xFF00000000000000ULL
;
153 uint64_t helper_zap(uint64_t val
, uint64_t mask
)
155 return byte_zap(val
, mask
);
158 uint64_t helper_zapnot(uint64_t val
, uint64_t mask
)
160 return byte_zap(val
, ~mask
);
163 uint64_t helper_cmpbge (uint64_t op1
, uint64_t op2
)
165 uint8_t opa
, opb
, res
;
169 for (i
= 0; i
< 8; i
++) {
170 opa
= op1
>> (i
* 8);
171 opb
= op2
>> (i
* 8);
178 uint64_t helper_minub8 (uint64_t op1
, uint64_t op2
)
181 uint8_t opa
, opb
, opr
;
184 for (i
= 0; i
< 8; ++i
) {
185 opa
= op1
>> (i
* 8);
186 opb
= op2
>> (i
* 8);
187 opr
= opa
< opb
? opa
: opb
;
188 res
|= (uint64_t)opr
<< (i
* 8);
193 uint64_t helper_minsb8 (uint64_t op1
, uint64_t op2
)
200 for (i
= 0; i
< 8; ++i
) {
201 opa
= op1
>> (i
* 8);
202 opb
= op2
>> (i
* 8);
203 opr
= opa
< opb
? opa
: opb
;
204 res
|= (uint64_t)opr
<< (i
* 8);
209 uint64_t helper_minuw4 (uint64_t op1
, uint64_t op2
)
212 uint16_t opa
, opb
, opr
;
215 for (i
= 0; i
< 4; ++i
) {
216 opa
= op1
>> (i
* 16);
217 opb
= op2
>> (i
* 16);
218 opr
= opa
< opb
? opa
: opb
;
219 res
|= (uint64_t)opr
<< (i
* 16);
224 uint64_t helper_minsw4 (uint64_t op1
, uint64_t op2
)
231 for (i
= 0; i
< 4; ++i
) {
232 opa
= op1
>> (i
* 16);
233 opb
= op2
>> (i
* 16);
234 opr
= opa
< opb
? opa
: opb
;
235 res
|= (uint64_t)opr
<< (i
* 16);
240 uint64_t helper_maxub8 (uint64_t op1
, uint64_t op2
)
243 uint8_t opa
, opb
, opr
;
246 for (i
= 0; i
< 8; ++i
) {
247 opa
= op1
>> (i
* 8);
248 opb
= op2
>> (i
* 8);
249 opr
= opa
> opb
? opa
: opb
;
250 res
|= (uint64_t)opr
<< (i
* 8);
255 uint64_t helper_maxsb8 (uint64_t op1
, uint64_t op2
)
262 for (i
= 0; i
< 8; ++i
) {
263 opa
= op1
>> (i
* 8);
264 opb
= op2
>> (i
* 8);
265 opr
= opa
> opb
? opa
: opb
;
266 res
|= (uint64_t)opr
<< (i
* 8);
271 uint64_t helper_maxuw4 (uint64_t op1
, uint64_t op2
)
274 uint16_t opa
, opb
, opr
;
277 for (i
= 0; i
< 4; ++i
) {
278 opa
= op1
>> (i
* 16);
279 opb
= op2
>> (i
* 16);
280 opr
= opa
> opb
? opa
: opb
;
281 res
|= (uint64_t)opr
<< (i
* 16);
286 uint64_t helper_maxsw4 (uint64_t op1
, uint64_t op2
)
293 for (i
= 0; i
< 4; ++i
) {
294 opa
= op1
>> (i
* 16);
295 opb
= op2
>> (i
* 16);
296 opr
= opa
> opb
? opa
: opb
;
297 res
|= (uint64_t)opr
<< (i
* 16);
302 uint64_t helper_perr (uint64_t op1
, uint64_t op2
)
305 uint8_t opa
, opb
, opr
;
308 for (i
= 0; i
< 8; ++i
) {
309 opa
= op1
>> (i
* 8);
310 opb
= op2
>> (i
* 8);
320 uint64_t helper_pklb (uint64_t op1
)
322 return (op1
& 0xff) | ((op1
>> 24) & 0xff00);
325 uint64_t helper_pkwb (uint64_t op1
)
328 | ((op1
>> 8) & 0xff00)
329 | ((op1
>> 16) & 0xff0000)
330 | ((op1
>> 24) & 0xff000000));
333 uint64_t helper_unpkbl (uint64_t op1
)
335 return (op1
& 0xff) | ((op1
& 0xff00) << 24);
338 uint64_t helper_unpkbw (uint64_t op1
)
341 | ((op1
& 0xff00) << 8)
342 | ((op1
& 0xff0000) << 16)
343 | ((op1
& 0xff000000) << 24));
346 /* Floating point helpers */
348 void helper_setroundmode (uint32_t val
)
350 set_float_rounding_mode(val
, &FP_STATUS
);
353 void helper_setflushzero (uint32_t val
)
355 set_flush_to_zero(val
, &FP_STATUS
);
358 void helper_fp_exc_clear (void)
360 set_float_exception_flags(0, &FP_STATUS
);
363 uint32_t helper_fp_exc_get (void)
365 return get_float_exception_flags(&FP_STATUS
);
368 /* Raise exceptions for ieee fp insns without software completion.
369 In that case there are no exceptions that don't trap; the mask
371 void helper_fp_exc_raise(uint32_t exc
, uint32_t regno
)
376 env
->ipr
[IPR_EXC_MASK
] |= 1ull << regno
;
378 if (exc
& float_flag_invalid
) {
381 if (exc
& float_flag_divbyzero
) {
384 if (exc
& float_flag_overflow
) {
387 if (exc
& float_flag_underflow
) {
390 if (exc
& float_flag_inexact
) {
393 helper_excp(EXCP_ARITH
, hw_exc
);
397 /* Raise exceptions for ieee fp insns with software completion. */
398 void helper_fp_exc_raise_s(uint32_t exc
, uint32_t regno
)
401 env
->fpcr_exc_status
|= exc
;
403 exc
&= ~env
->fpcr_exc_mask
;
405 helper_fp_exc_raise(exc
, regno
);
410 /* Input remapping without software completion. Handle denormal-map-to-zero
411 and trap for all other non-finite numbers. */
412 uint64_t helper_ieee_input(uint64_t val
)
414 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
415 uint64_t frac
= val
& 0xfffffffffffffull
;
419 /* If DNZ is set flush denormals to zero on input. */
423 helper_excp(EXCP_ARITH
, EXC_M_UNF
);
426 } else if (exp
== 0x7ff) {
427 /* Infinity or NaN. */
428 /* ??? I'm not sure these exception bit flags are correct. I do
429 know that the Linux kernel, at least, doesn't rely on them and
430 just emulates the insn to figure out what exception to use. */
431 helper_excp(EXCP_ARITH
, frac
? EXC_M_INV
: EXC_M_FOV
);
436 /* Similar, but does not trap for infinities. Used for comparisons. */
437 uint64_t helper_ieee_input_cmp(uint64_t val
)
439 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
440 uint64_t frac
= val
& 0xfffffffffffffull
;
444 /* If DNZ is set flush denormals to zero on input. */
448 helper_excp(EXCP_ARITH
, EXC_M_UNF
);
451 } else if (exp
== 0x7ff && frac
) {
453 helper_excp(EXCP_ARITH
, EXC_M_INV
);
458 /* Input remapping with software completion enabled. All we have to do
459 is handle denormal-map-to-zero; all other inputs get exceptions as
460 needed from the actual operation. */
461 uint64_t helper_ieee_input_s(uint64_t val
)
464 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
472 /* F floating (VAX) */
473 static inline uint64_t float32_to_f(float32 fa
)
475 uint64_t r
, exp
, mant
, sig
;
479 sig
= ((uint64_t)a
.l
& 0x80000000) << 32;
480 exp
= (a
.l
>> 23) & 0xff;
481 mant
= ((uint64_t)a
.l
& 0x007fffff) << 29;
484 /* NaN or infinity */
485 r
= 1; /* VAX dirty zero */
486 } else if (exp
== 0) {
492 r
= sig
| ((exp
+ 1) << 52) | mant
;
497 r
= 1; /* VAX dirty zero */
499 r
= sig
| ((exp
+ 2) << 52);
506 static inline float32
f_to_float32(uint64_t a
)
508 uint32_t exp
, mant_sig
;
511 exp
= ((a
>> 55) & 0x80) | ((a
>> 52) & 0x7f);
512 mant_sig
= ((a
>> 32) & 0x80000000) | ((a
>> 29) & 0x007fffff);
514 if (unlikely(!exp
&& mant_sig
)) {
515 /* Reserved operands / Dirty zero */
516 helper_excp(EXCP_OPCDEC
, 0);
523 r
.l
= ((exp
- 2) << 23) | mant_sig
;
529 uint32_t helper_f_to_memory (uint64_t a
)
532 r
= (a
& 0x00001fffe0000000ull
) >> 13;
533 r
|= (a
& 0x07ffe00000000000ull
) >> 45;
534 r
|= (a
& 0xc000000000000000ull
) >> 48;
538 uint64_t helper_memory_to_f (uint32_t a
)
541 r
= ((uint64_t)(a
& 0x0000c000)) << 48;
542 r
|= ((uint64_t)(a
& 0x003fffff)) << 45;
543 r
|= ((uint64_t)(a
& 0xffff0000)) << 13;
544 if (!(a
& 0x00004000))
549 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
550 either implement VAX arithmetic properly or just signal invalid opcode. */
552 uint64_t helper_addf (uint64_t a
, uint64_t b
)
556 fa
= f_to_float32(a
);
557 fb
= f_to_float32(b
);
558 fr
= float32_add(fa
, fb
, &FP_STATUS
);
559 return float32_to_f(fr
);
562 uint64_t helper_subf (uint64_t a
, uint64_t b
)
566 fa
= f_to_float32(a
);
567 fb
= f_to_float32(b
);
568 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
569 return float32_to_f(fr
);
572 uint64_t helper_mulf (uint64_t a
, uint64_t b
)
576 fa
= f_to_float32(a
);
577 fb
= f_to_float32(b
);
578 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
579 return float32_to_f(fr
);
582 uint64_t helper_divf (uint64_t a
, uint64_t b
)
586 fa
= f_to_float32(a
);
587 fb
= f_to_float32(b
);
588 fr
= float32_div(fa
, fb
, &FP_STATUS
);
589 return float32_to_f(fr
);
592 uint64_t helper_sqrtf (uint64_t t
)
596 ft
= f_to_float32(t
);
597 fr
= float32_sqrt(ft
, &FP_STATUS
);
598 return float32_to_f(fr
);
602 /* G floating (VAX) */
603 static inline uint64_t float64_to_g(float64 fa
)
605 uint64_t r
, exp
, mant
, sig
;
609 sig
= a
.ll
& 0x8000000000000000ull
;
610 exp
= (a
.ll
>> 52) & 0x7ff;
611 mant
= a
.ll
& 0x000fffffffffffffull
;
614 /* NaN or infinity */
615 r
= 1; /* VAX dirty zero */
616 } else if (exp
== 0) {
622 r
= sig
| ((exp
+ 1) << 52) | mant
;
627 r
= 1; /* VAX dirty zero */
629 r
= sig
| ((exp
+ 2) << 52);
636 static inline float64
g_to_float64(uint64_t a
)
638 uint64_t exp
, mant_sig
;
641 exp
= (a
>> 52) & 0x7ff;
642 mant_sig
= a
& 0x800fffffffffffffull
;
644 if (!exp
&& mant_sig
) {
645 /* Reserved operands / Dirty zero */
646 helper_excp(EXCP_OPCDEC
, 0);
653 r
.ll
= ((exp
- 2) << 52) | mant_sig
;
659 uint64_t helper_g_to_memory (uint64_t a
)
662 r
= (a
& 0x000000000000ffffull
) << 48;
663 r
|= (a
& 0x00000000ffff0000ull
) << 16;
664 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
665 r
|= (a
& 0xffff000000000000ull
) >> 48;
669 uint64_t helper_memory_to_g (uint64_t a
)
672 r
= (a
& 0x000000000000ffffull
) << 48;
673 r
|= (a
& 0x00000000ffff0000ull
) << 16;
674 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
675 r
|= (a
& 0xffff000000000000ull
) >> 48;
679 uint64_t helper_addg (uint64_t a
, uint64_t b
)
683 fa
= g_to_float64(a
);
684 fb
= g_to_float64(b
);
685 fr
= float64_add(fa
, fb
, &FP_STATUS
);
686 return float64_to_g(fr
);
689 uint64_t helper_subg (uint64_t a
, uint64_t b
)
693 fa
= g_to_float64(a
);
694 fb
= g_to_float64(b
);
695 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
696 return float64_to_g(fr
);
699 uint64_t helper_mulg (uint64_t a
, uint64_t b
)
703 fa
= g_to_float64(a
);
704 fb
= g_to_float64(b
);
705 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
706 return float64_to_g(fr
);
709 uint64_t helper_divg (uint64_t a
, uint64_t b
)
713 fa
= g_to_float64(a
);
714 fb
= g_to_float64(b
);
715 fr
= float64_div(fa
, fb
, &FP_STATUS
);
716 return float64_to_g(fr
);
719 uint64_t helper_sqrtg (uint64_t a
)
723 fa
= g_to_float64(a
);
724 fr
= float64_sqrt(fa
, &FP_STATUS
);
725 return float64_to_g(fr
);
729 /* S floating (single) */
731 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
732 static inline uint64_t float32_to_s_int(uint32_t fi
)
734 uint32_t frac
= fi
& 0x7fffff;
735 uint32_t sign
= fi
>> 31;
736 uint32_t exp_msb
= (fi
>> 30) & 1;
737 uint32_t exp_low
= (fi
>> 23) & 0x7f;
740 exp
= (exp_msb
<< 10) | exp_low
;
749 return (((uint64_t)sign
<< 63)
750 | ((uint64_t)exp
<< 52)
751 | ((uint64_t)frac
<< 29));
754 static inline uint64_t float32_to_s(float32 fa
)
758 return float32_to_s_int(a
.l
);
761 static inline uint32_t s_to_float32_int(uint64_t a
)
763 return ((a
>> 32) & 0xc0000000) | ((a
>> 29) & 0x3fffffff);
766 static inline float32
s_to_float32(uint64_t a
)
769 r
.l
= s_to_float32_int(a
);
773 uint32_t helper_s_to_memory (uint64_t a
)
775 return s_to_float32_int(a
);
778 uint64_t helper_memory_to_s (uint32_t a
)
780 return float32_to_s_int(a
);
783 uint64_t helper_adds (uint64_t a
, uint64_t b
)
787 fa
= s_to_float32(a
);
788 fb
= s_to_float32(b
);
789 fr
= float32_add(fa
, fb
, &FP_STATUS
);
790 return float32_to_s(fr
);
793 uint64_t helper_subs (uint64_t a
, uint64_t b
)
797 fa
= s_to_float32(a
);
798 fb
= s_to_float32(b
);
799 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
800 return float32_to_s(fr
);
803 uint64_t helper_muls (uint64_t a
, uint64_t b
)
807 fa
= s_to_float32(a
);
808 fb
= s_to_float32(b
);
809 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
810 return float32_to_s(fr
);
813 uint64_t helper_divs (uint64_t a
, uint64_t b
)
817 fa
= s_to_float32(a
);
818 fb
= s_to_float32(b
);
819 fr
= float32_div(fa
, fb
, &FP_STATUS
);
820 return float32_to_s(fr
);
823 uint64_t helper_sqrts (uint64_t a
)
827 fa
= s_to_float32(a
);
828 fr
= float32_sqrt(fa
, &FP_STATUS
);
829 return float32_to_s(fr
);
833 /* T floating (double) */
834 static inline float64
t_to_float64(uint64_t a
)
836 /* Memory format is the same as float64 */
842 static inline uint64_t float64_to_t(float64 fa
)
844 /* Memory format is the same as float64 */
850 uint64_t helper_addt (uint64_t a
, uint64_t b
)
854 fa
= t_to_float64(a
);
855 fb
= t_to_float64(b
);
856 fr
= float64_add(fa
, fb
, &FP_STATUS
);
857 return float64_to_t(fr
);
860 uint64_t helper_subt (uint64_t a
, uint64_t b
)
864 fa
= t_to_float64(a
);
865 fb
= t_to_float64(b
);
866 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
867 return float64_to_t(fr
);
870 uint64_t helper_mult (uint64_t a
, uint64_t b
)
874 fa
= t_to_float64(a
);
875 fb
= t_to_float64(b
);
876 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
877 return float64_to_t(fr
);
880 uint64_t helper_divt (uint64_t a
, uint64_t b
)
884 fa
= t_to_float64(a
);
885 fb
= t_to_float64(b
);
886 fr
= float64_div(fa
, fb
, &FP_STATUS
);
887 return float64_to_t(fr
);
890 uint64_t helper_sqrtt (uint64_t a
)
894 fa
= t_to_float64(a
);
895 fr
= float64_sqrt(fa
, &FP_STATUS
);
896 return float64_to_t(fr
);
900 uint64_t helper_cmptun (uint64_t a
, uint64_t b
)
904 fa
= t_to_float64(a
);
905 fb
= t_to_float64(b
);
907 if (float64_unordered_quiet(fa
, fb
, &FP_STATUS
)) {
908 return 0x4000000000000000ULL
;
914 uint64_t helper_cmpteq(uint64_t a
, uint64_t b
)
918 fa
= t_to_float64(a
);
919 fb
= t_to_float64(b
);
921 if (float64_eq_quiet(fa
, fb
, &FP_STATUS
))
922 return 0x4000000000000000ULL
;
927 uint64_t helper_cmptle(uint64_t a
, uint64_t b
)
931 fa
= t_to_float64(a
);
932 fb
= t_to_float64(b
);
934 if (float64_le(fa
, fb
, &FP_STATUS
))
935 return 0x4000000000000000ULL
;
940 uint64_t helper_cmptlt(uint64_t a
, uint64_t b
)
944 fa
= t_to_float64(a
);
945 fb
= t_to_float64(b
);
947 if (float64_lt(fa
, fb
, &FP_STATUS
))
948 return 0x4000000000000000ULL
;
953 uint64_t helper_cmpgeq(uint64_t a
, uint64_t b
)
957 fa
= g_to_float64(a
);
958 fb
= g_to_float64(b
);
960 if (float64_eq_quiet(fa
, fb
, &FP_STATUS
))
961 return 0x4000000000000000ULL
;
966 uint64_t helper_cmpgle(uint64_t a
, uint64_t b
)
970 fa
= g_to_float64(a
);
971 fb
= g_to_float64(b
);
973 if (float64_le(fa
, fb
, &FP_STATUS
))
974 return 0x4000000000000000ULL
;
979 uint64_t helper_cmpglt(uint64_t a
, uint64_t b
)
983 fa
= g_to_float64(a
);
984 fb
= g_to_float64(b
);
986 if (float64_lt(fa
, fb
, &FP_STATUS
))
987 return 0x4000000000000000ULL
;
992 /* Floating point format conversion */
993 uint64_t helper_cvtts (uint64_t a
)
998 fa
= t_to_float64(a
);
999 fr
= float64_to_float32(fa
, &FP_STATUS
);
1000 return float32_to_s(fr
);
1003 uint64_t helper_cvtst (uint64_t a
)
1008 fa
= s_to_float32(a
);
1009 fr
= float32_to_float64(fa
, &FP_STATUS
);
1010 return float64_to_t(fr
);
1013 uint64_t helper_cvtqs (uint64_t a
)
1015 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
1016 return float32_to_s(fr
);
1019 /* Implement float64 to uint64 conversion without saturation -- we must
1020 supply the truncated result. This behaviour is used by the compiler
1021 to get unsigned conversion for free with the same instruction.
1023 The VI flag is set when overflow or inexact exceptions should be raised. */
1025 static inline uint64_t helper_cvttq_internal(uint64_t a
, int roundmode
, int VI
)
1027 uint64_t frac
, ret
= 0;
1028 uint32_t exp
, sign
, exc
= 0;
1032 exp
= (uint32_t)(a
>> 52) & 0x7ff;
1033 frac
= a
& 0xfffffffffffffull
;
1036 if (unlikely(frac
!= 0)) {
1039 } else if (exp
== 0x7ff) {
1040 exc
= (frac
? float_flag_invalid
: VI
? float_flag_overflow
: 0);
1042 /* Restore implicit bit. */
1043 frac
|= 0x10000000000000ull
;
1045 shift
= exp
- 1023 - 52;
1047 /* In this case the number is so large that we must shift
1048 the fraction left. There is no rounding to do. */
1050 ret
= frac
<< shift
;
1051 if (VI
&& (ret
>> shift
) != frac
) {
1052 exc
= float_flag_overflow
;
1058 /* In this case the number is smaller than the fraction as
1059 represented by the 52 bit number. Here we must think
1060 about rounding the result. Handle this by shifting the
1061 fractional part of the number into the high bits of ROUND.
1062 This will let us efficiently handle round-to-nearest. */
1065 ret
= frac
>> shift
;
1066 round
= frac
<< (64 - shift
);
1068 /* The exponent is so small we shift out everything.
1069 Leave a sticky bit for proper rounding below. */
1075 exc
= (VI
? float_flag_inexact
: 0);
1076 switch (roundmode
) {
1077 case float_round_nearest_even
:
1078 if (round
== (1ull << 63)) {
1079 /* Fraction is exactly 0.5; round to even. */
1081 } else if (round
> (1ull << 63)) {
1085 case float_round_to_zero
:
1087 case float_round_up
:
1090 case float_round_down
:
1100 if (unlikely(exc
)) {
1101 float_raise(exc
, &FP_STATUS
);
1107 uint64_t helper_cvttq(uint64_t a
)
1109 return helper_cvttq_internal(a
, FP_STATUS
.float_rounding_mode
, 1);
1112 uint64_t helper_cvttq_c(uint64_t a
)
1114 return helper_cvttq_internal(a
, float_round_to_zero
, 0);
1117 uint64_t helper_cvttq_svic(uint64_t a
)
1119 return helper_cvttq_internal(a
, float_round_to_zero
, 1);
1122 uint64_t helper_cvtqt (uint64_t a
)
1124 float64 fr
= int64_to_float64(a
, &FP_STATUS
);
1125 return float64_to_t(fr
);
1128 uint64_t helper_cvtqf (uint64_t a
)
1130 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
1131 return float32_to_f(fr
);
1134 uint64_t helper_cvtgf (uint64_t a
)
1139 fa
= g_to_float64(a
);
1140 fr
= float64_to_float32(fa
, &FP_STATUS
);
1141 return float32_to_f(fr
);
1144 uint64_t helper_cvtgq (uint64_t a
)
1146 float64 fa
= g_to_float64(a
);
1147 return float64_to_int64_round_to_zero(fa
, &FP_STATUS
);
1150 uint64_t helper_cvtqg (uint64_t a
)
1153 fr
= int64_to_float64(a
, &FP_STATUS
);
1154 return float64_to_g(fr
);
1157 /* PALcode support special instructions */
1158 #if !defined (CONFIG_USER_ONLY)
1159 void helper_hw_rei (void)
1161 env
->pc
= env
->ipr
[IPR_EXC_ADDR
] & ~3;
1162 env
->ipr
[IPR_EXC_ADDR
] = env
->ipr
[IPR_EXC_ADDR
] & 1;
1164 env
->lock_addr
= -1;
1165 /* XXX: re-enable interrupts and memory mapping */
1168 void helper_hw_ret (uint64_t a
)
1171 env
->ipr
[IPR_EXC_ADDR
] = a
& 1;
1173 env
->lock_addr
= -1;
1174 /* XXX: re-enable interrupts and memory mapping */
1177 uint64_t helper_mfpr (int iprn
, uint64_t val
)
1181 if (cpu_alpha_mfpr(env
, iprn
, &tmp
) == 0)
1187 void helper_mtpr (int iprn
, uint64_t val
)
1189 cpu_alpha_mtpr(env
, iprn
, val
, NULL
);
1192 void helper_set_alt_mode (void)
1194 env
->saved_mode
= env
->ps
& 0xC;
1195 env
->ps
= (env
->ps
& ~0xC) | (env
->ipr
[IPR_ALT_MODE
] & 0xC);
1198 void helper_restore_mode (void)
1200 env
->ps
= (env
->ps
& ~0xC) | env
->saved_mode
;
1205 /*****************************************************************************/
1206 /* Softmmu support */
1207 #if !defined (CONFIG_USER_ONLY)
1209 /* XXX: the two following helpers are pure hacks.
1210 * Hopefully, we emulate the PALcode, then we should never see
1211 * HW_LD / HW_ST instructions.
1213 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr
)
1215 uint64_t tlb_addr
, physaddr
;
1219 mmu_idx
= cpu_mmu_index(env
);
1220 index
= (virtaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1222 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_read
;
1223 if ((virtaddr
& TARGET_PAGE_MASK
) ==
1224 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1225 physaddr
= virtaddr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
1227 /* the page is not in the TLB : fill it */
1229 tlb_fill(virtaddr
, 0, mmu_idx
, retaddr
);
1235 uint64_t helper_st_virt_to_phys (uint64_t virtaddr
)
1237 uint64_t tlb_addr
, physaddr
;
1241 mmu_idx
= cpu_mmu_index(env
);
1242 index
= (virtaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1244 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
1245 if ((virtaddr
& TARGET_PAGE_MASK
) ==
1246 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1247 physaddr
= virtaddr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
1249 /* the page is not in the TLB : fill it */
1251 tlb_fill(virtaddr
, 1, mmu_idx
, retaddr
);
1257 void helper_ldl_raw(uint64_t t0
, uint64_t t1
)
1262 void helper_ldq_raw(uint64_t t0
, uint64_t t1
)
1267 void helper_ldl_l_raw(uint64_t t0
, uint64_t t1
)
1273 void helper_ldq_l_raw(uint64_t t0
, uint64_t t1
)
1279 void helper_ldl_kernel(uint64_t t0
, uint64_t t1
)
1284 void helper_ldq_kernel(uint64_t t0
, uint64_t t1
)
1289 void helper_ldl_data(uint64_t t0
, uint64_t t1
)
1294 void helper_ldq_data(uint64_t t0
, uint64_t t1
)
1299 void helper_stl_raw(uint64_t t0
, uint64_t t1
)
1304 void helper_stq_raw(uint64_t t0
, uint64_t t1
)
1309 uint64_t helper_stl_c_raw(uint64_t t0
, uint64_t t1
)
1313 if (t1
== env
->lock
) {
1324 uint64_t helper_stq_c_raw(uint64_t t0
, uint64_t t1
)
1328 if (t1
== env
->lock
) {
1339 #define MMUSUFFIX _mmu
1342 #include "softmmu_template.h"
1345 #include "softmmu_template.h"
1348 #include "softmmu_template.h"
1351 #include "softmmu_template.h"
1353 /* try to fill the TLB and return an exception if error. If retaddr is
1354 NULL, it means that the function was called in C code (i.e. not
1355 from generated code or from helper.c) */
1356 /* XXX: fix it to restore all registers */
1357 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
1359 TranslationBlock
*tb
;
1360 CPUState
*saved_env
;
1364 /* XXX: hack to restore env in all cases, even if not called from
1367 env
= cpu_single_env
;
1368 ret
= cpu_alpha_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
1369 if (!likely(ret
== 0)) {
1370 if (likely(retaddr
)) {
1371 /* now we have a real cpu fault */
1372 pc
= (unsigned long)retaddr
;
1373 tb
= tb_find_pc(pc
);
1375 /* the PC is inside the translated code. It means that we have
1376 a virtual CPU fault */
1377 cpu_restore_state(tb
, env
, pc
);
1380 /* Exception index and error code are already set */