2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "host-utils.h"
22 #include "softfloat.h"
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
27 void QEMU_NORETURN
helper_excp (int excp
, int error
)
29 env
->exception_index
= excp
;
30 env
->error_code
= error
;
34 uint64_t helper_load_pcc (void)
40 uint64_t helper_load_fpcr (void)
42 return cpu_alpha_load_fpcr (env
);
45 void helper_store_fpcr (uint64_t val
)
47 cpu_alpha_store_fpcr (env
, val
);
50 uint64_t helper_addqv (uint64_t op1
, uint64_t op2
)
54 if (unlikely((tmp
^ op2
^ (-1ULL)) & (tmp
^ op1
) & (1ULL << 63))) {
55 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
60 uint64_t helper_addlv (uint64_t op1
, uint64_t op2
)
63 op1
= (uint32_t)(op1
+ op2
);
64 if (unlikely((tmp
^ op2
^ (-1UL)) & (tmp
^ op1
) & (1UL << 31))) {
65 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
70 uint64_t helper_subqv (uint64_t op1
, uint64_t op2
)
74 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1ULL << 63))) {
75 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
80 uint64_t helper_sublv (uint64_t op1
, uint64_t op2
)
84 if (unlikely((op1
^ op2
) & (res
^ op1
) & (1UL << 31))) {
85 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
90 uint64_t helper_mullv (uint64_t op1
, uint64_t op2
)
92 int64_t res
= (int64_t)op1
* (int64_t)op2
;
94 if (unlikely((int32_t)res
!= res
)) {
95 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
97 return (int64_t)((int32_t)res
);
100 uint64_t helper_mulqv (uint64_t op1
, uint64_t op2
)
104 muls64(&tl
, &th
, op1
, op2
);
105 /* If th != 0 && th != -1, then we had an overflow */
106 if (unlikely((th
+ 1) > 1)) {
107 helper_excp(EXCP_ARITH
, EXC_M_IOV
);
112 uint64_t helper_umulh (uint64_t op1
, uint64_t op2
)
116 mulu64(&tl
, &th
, op1
, op2
);
120 uint64_t helper_ctpop (uint64_t arg
)
125 uint64_t helper_ctlz (uint64_t arg
)
130 uint64_t helper_cttz (uint64_t arg
)
135 static inline uint64_t byte_zap(uint64_t op
, uint8_t mskb
)
140 mask
|= ((mskb
>> 0) & 1) * 0x00000000000000FFULL
;
141 mask
|= ((mskb
>> 1) & 1) * 0x000000000000FF00ULL
;
142 mask
|= ((mskb
>> 2) & 1) * 0x0000000000FF0000ULL
;
143 mask
|= ((mskb
>> 3) & 1) * 0x00000000FF000000ULL
;
144 mask
|= ((mskb
>> 4) & 1) * 0x000000FF00000000ULL
;
145 mask
|= ((mskb
>> 5) & 1) * 0x0000FF0000000000ULL
;
146 mask
|= ((mskb
>> 6) & 1) * 0x00FF000000000000ULL
;
147 mask
|= ((mskb
>> 7) & 1) * 0xFF00000000000000ULL
;
152 uint64_t helper_zap(uint64_t val
, uint64_t mask
)
154 return byte_zap(val
, mask
);
157 uint64_t helper_zapnot(uint64_t val
, uint64_t mask
)
159 return byte_zap(val
, ~mask
);
162 uint64_t helper_cmpbge (uint64_t op1
, uint64_t op2
)
164 uint8_t opa
, opb
, res
;
168 for (i
= 0; i
< 8; i
++) {
169 opa
= op1
>> (i
* 8);
170 opb
= op2
>> (i
* 8);
177 uint64_t helper_minub8 (uint64_t op1
, uint64_t op2
)
180 uint8_t opa
, opb
, opr
;
183 for (i
= 0; i
< 8; ++i
) {
184 opa
= op1
>> (i
* 8);
185 opb
= op2
>> (i
* 8);
186 opr
= opa
< opb
? opa
: opb
;
187 res
|= (uint64_t)opr
<< (i
* 8);
192 uint64_t helper_minsb8 (uint64_t op1
, uint64_t op2
)
199 for (i
= 0; i
< 8; ++i
) {
200 opa
= op1
>> (i
* 8);
201 opb
= op2
>> (i
* 8);
202 opr
= opa
< opb
? opa
: opb
;
203 res
|= (uint64_t)opr
<< (i
* 8);
208 uint64_t helper_minuw4 (uint64_t op1
, uint64_t op2
)
211 uint16_t opa
, opb
, opr
;
214 for (i
= 0; i
< 4; ++i
) {
215 opa
= op1
>> (i
* 16);
216 opb
= op2
>> (i
* 16);
217 opr
= opa
< opb
? opa
: opb
;
218 res
|= (uint64_t)opr
<< (i
* 16);
223 uint64_t helper_minsw4 (uint64_t op1
, uint64_t op2
)
230 for (i
= 0; i
< 4; ++i
) {
231 opa
= op1
>> (i
* 16);
232 opb
= op2
>> (i
* 16);
233 opr
= opa
< opb
? opa
: opb
;
234 res
|= (uint64_t)opr
<< (i
* 16);
239 uint64_t helper_maxub8 (uint64_t op1
, uint64_t op2
)
242 uint8_t opa
, opb
, opr
;
245 for (i
= 0; i
< 8; ++i
) {
246 opa
= op1
>> (i
* 8);
247 opb
= op2
>> (i
* 8);
248 opr
= opa
> opb
? opa
: opb
;
249 res
|= (uint64_t)opr
<< (i
* 8);
254 uint64_t helper_maxsb8 (uint64_t op1
, uint64_t op2
)
261 for (i
= 0; i
< 8; ++i
) {
262 opa
= op1
>> (i
* 8);
263 opb
= op2
>> (i
* 8);
264 opr
= opa
> opb
? opa
: opb
;
265 res
|= (uint64_t)opr
<< (i
* 8);
270 uint64_t helper_maxuw4 (uint64_t op1
, uint64_t op2
)
273 uint16_t opa
, opb
, opr
;
276 for (i
= 0; i
< 4; ++i
) {
277 opa
= op1
>> (i
* 16);
278 opb
= op2
>> (i
* 16);
279 opr
= opa
> opb
? opa
: opb
;
280 res
|= (uint64_t)opr
<< (i
* 16);
285 uint64_t helper_maxsw4 (uint64_t op1
, uint64_t op2
)
292 for (i
= 0; i
< 4; ++i
) {
293 opa
= op1
>> (i
* 16);
294 opb
= op2
>> (i
* 16);
295 opr
= opa
> opb
? opa
: opb
;
296 res
|= (uint64_t)opr
<< (i
* 16);
301 uint64_t helper_perr (uint64_t op1
, uint64_t op2
)
304 uint8_t opa
, opb
, opr
;
307 for (i
= 0; i
< 8; ++i
) {
308 opa
= op1
>> (i
* 8);
309 opb
= op2
>> (i
* 8);
319 uint64_t helper_pklb (uint64_t op1
)
321 return (op1
& 0xff) | ((op1
>> 24) & 0xff00);
324 uint64_t helper_pkwb (uint64_t op1
)
327 | ((op1
>> 8) & 0xff00)
328 | ((op1
>> 16) & 0xff0000)
329 | ((op1
>> 24) & 0xff000000));
332 uint64_t helper_unpkbl (uint64_t op1
)
334 return (op1
& 0xff) | ((op1
& 0xff00) << 24);
337 uint64_t helper_unpkbw (uint64_t op1
)
340 | ((op1
& 0xff00) << 8)
341 | ((op1
& 0xff0000) << 16)
342 | ((op1
& 0xff000000) << 24));
345 /* Floating point helpers */
347 void helper_setroundmode (uint32_t val
)
349 set_float_rounding_mode(val
, &FP_STATUS
);
352 void helper_setflushzero (uint32_t val
)
354 set_flush_to_zero(val
, &FP_STATUS
);
357 void helper_fp_exc_clear (void)
359 set_float_exception_flags(0, &FP_STATUS
);
362 uint32_t helper_fp_exc_get (void)
364 return get_float_exception_flags(&FP_STATUS
);
367 /* Raise exceptions for ieee fp insns without software completion.
368 In that case there are no exceptions that don't trap; the mask
370 void helper_fp_exc_raise(uint32_t exc
, uint32_t regno
)
375 env
->ipr
[IPR_EXC_MASK
] |= 1ull << regno
;
377 if (exc
& float_flag_invalid
) {
380 if (exc
& float_flag_divbyzero
) {
383 if (exc
& float_flag_overflow
) {
386 if (exc
& float_flag_underflow
) {
389 if (exc
& float_flag_inexact
) {
392 helper_excp(EXCP_ARITH
, hw_exc
);
396 /* Raise exceptions for ieee fp insns with software completion. */
397 void helper_fp_exc_raise_s(uint32_t exc
, uint32_t regno
)
400 env
->fpcr_exc_status
|= exc
;
402 exc
&= ~env
->fpcr_exc_mask
;
404 helper_fp_exc_raise(exc
, regno
);
409 /* Input remapping without software completion. Handle denormal-map-to-zero
410 and trap for all other non-finite numbers. */
411 uint64_t helper_ieee_input(uint64_t val
)
413 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
414 uint64_t frac
= val
& 0xfffffffffffffull
;
418 /* If DNZ is set flush denormals to zero on input. */
422 helper_excp(EXCP_ARITH
, EXC_M_UNF
);
425 } else if (exp
== 0x7ff) {
426 /* Infinity or NaN. */
427 /* ??? I'm not sure these exception bit flags are correct. I do
428 know that the Linux kernel, at least, doesn't rely on them and
429 just emulates the insn to figure out what exception to use. */
430 helper_excp(EXCP_ARITH
, frac
? EXC_M_INV
: EXC_M_FOV
);
435 /* Similar, but does not trap for infinities. Used for comparisons. */
436 uint64_t helper_ieee_input_cmp(uint64_t val
)
438 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
439 uint64_t frac
= val
& 0xfffffffffffffull
;
443 /* If DNZ is set flush denormals to zero on input. */
447 helper_excp(EXCP_ARITH
, EXC_M_UNF
);
450 } else if (exp
== 0x7ff && frac
) {
452 helper_excp(EXCP_ARITH
, EXC_M_INV
);
457 /* Input remapping with software completion enabled. All we have to do
458 is handle denormal-map-to-zero; all other inputs get exceptions as
459 needed from the actual operation. */
460 uint64_t helper_ieee_input_s(uint64_t val
)
463 uint32_t exp
= (uint32_t)(val
>> 52) & 0x7ff;
471 /* F floating (VAX) */
472 static inline uint64_t float32_to_f(float32 fa
)
474 uint64_t r
, exp
, mant
, sig
;
478 sig
= ((uint64_t)a
.l
& 0x80000000) << 32;
479 exp
= (a
.l
>> 23) & 0xff;
480 mant
= ((uint64_t)a
.l
& 0x007fffff) << 29;
483 /* NaN or infinity */
484 r
= 1; /* VAX dirty zero */
485 } else if (exp
== 0) {
491 r
= sig
| ((exp
+ 1) << 52) | mant
;
496 r
= 1; /* VAX dirty zero */
498 r
= sig
| ((exp
+ 2) << 52);
505 static inline float32
f_to_float32(uint64_t a
)
507 uint32_t exp
, mant_sig
;
510 exp
= ((a
>> 55) & 0x80) | ((a
>> 52) & 0x7f);
511 mant_sig
= ((a
>> 32) & 0x80000000) | ((a
>> 29) & 0x007fffff);
513 if (unlikely(!exp
&& mant_sig
)) {
514 /* Reserved operands / Dirty zero */
515 helper_excp(EXCP_OPCDEC
, 0);
522 r
.l
= ((exp
- 2) << 23) | mant_sig
;
528 uint32_t helper_f_to_memory (uint64_t a
)
531 r
= (a
& 0x00001fffe0000000ull
) >> 13;
532 r
|= (a
& 0x07ffe00000000000ull
) >> 45;
533 r
|= (a
& 0xc000000000000000ull
) >> 48;
537 uint64_t helper_memory_to_f (uint32_t a
)
540 r
= ((uint64_t)(a
& 0x0000c000)) << 48;
541 r
|= ((uint64_t)(a
& 0x003fffff)) << 45;
542 r
|= ((uint64_t)(a
& 0xffff0000)) << 13;
543 if (!(a
& 0x00004000))
548 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
549 either implement VAX arithmetic properly or just signal invalid opcode. */
551 uint64_t helper_addf (uint64_t a
, uint64_t b
)
555 fa
= f_to_float32(a
);
556 fb
= f_to_float32(b
);
557 fr
= float32_add(fa
, fb
, &FP_STATUS
);
558 return float32_to_f(fr
);
561 uint64_t helper_subf (uint64_t a
, uint64_t b
)
565 fa
= f_to_float32(a
);
566 fb
= f_to_float32(b
);
567 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
568 return float32_to_f(fr
);
571 uint64_t helper_mulf (uint64_t a
, uint64_t b
)
575 fa
= f_to_float32(a
);
576 fb
= f_to_float32(b
);
577 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
578 return float32_to_f(fr
);
581 uint64_t helper_divf (uint64_t a
, uint64_t b
)
585 fa
= f_to_float32(a
);
586 fb
= f_to_float32(b
);
587 fr
= float32_div(fa
, fb
, &FP_STATUS
);
588 return float32_to_f(fr
);
591 uint64_t helper_sqrtf (uint64_t t
)
595 ft
= f_to_float32(t
);
596 fr
= float32_sqrt(ft
, &FP_STATUS
);
597 return float32_to_f(fr
);
601 /* G floating (VAX) */
602 static inline uint64_t float64_to_g(float64 fa
)
604 uint64_t r
, exp
, mant
, sig
;
608 sig
= a
.ll
& 0x8000000000000000ull
;
609 exp
= (a
.ll
>> 52) & 0x7ff;
610 mant
= a
.ll
& 0x000fffffffffffffull
;
613 /* NaN or infinity */
614 r
= 1; /* VAX dirty zero */
615 } else if (exp
== 0) {
621 r
= sig
| ((exp
+ 1) << 52) | mant
;
626 r
= 1; /* VAX dirty zero */
628 r
= sig
| ((exp
+ 2) << 52);
635 static inline float64
g_to_float64(uint64_t a
)
637 uint64_t exp
, mant_sig
;
640 exp
= (a
>> 52) & 0x7ff;
641 mant_sig
= a
& 0x800fffffffffffffull
;
643 if (!exp
&& mant_sig
) {
644 /* Reserved operands / Dirty zero */
645 helper_excp(EXCP_OPCDEC
, 0);
652 r
.ll
= ((exp
- 2) << 52) | mant_sig
;
658 uint64_t helper_g_to_memory (uint64_t a
)
661 r
= (a
& 0x000000000000ffffull
) << 48;
662 r
|= (a
& 0x00000000ffff0000ull
) << 16;
663 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
664 r
|= (a
& 0xffff000000000000ull
) >> 48;
668 uint64_t helper_memory_to_g (uint64_t a
)
671 r
= (a
& 0x000000000000ffffull
) << 48;
672 r
|= (a
& 0x00000000ffff0000ull
) << 16;
673 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
674 r
|= (a
& 0xffff000000000000ull
) >> 48;
678 uint64_t helper_addg (uint64_t a
, uint64_t b
)
682 fa
= g_to_float64(a
);
683 fb
= g_to_float64(b
);
684 fr
= float64_add(fa
, fb
, &FP_STATUS
);
685 return float64_to_g(fr
);
688 uint64_t helper_subg (uint64_t a
, uint64_t b
)
692 fa
= g_to_float64(a
);
693 fb
= g_to_float64(b
);
694 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
695 return float64_to_g(fr
);
698 uint64_t helper_mulg (uint64_t a
, uint64_t b
)
702 fa
= g_to_float64(a
);
703 fb
= g_to_float64(b
);
704 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
705 return float64_to_g(fr
);
708 uint64_t helper_divg (uint64_t a
, uint64_t b
)
712 fa
= g_to_float64(a
);
713 fb
= g_to_float64(b
);
714 fr
= float64_div(fa
, fb
, &FP_STATUS
);
715 return float64_to_g(fr
);
718 uint64_t helper_sqrtg (uint64_t a
)
722 fa
= g_to_float64(a
);
723 fr
= float64_sqrt(fa
, &FP_STATUS
);
724 return float64_to_g(fr
);
728 /* S floating (single) */
730 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
731 static inline uint64_t float32_to_s_int(uint32_t fi
)
733 uint32_t frac
= fi
& 0x7fffff;
734 uint32_t sign
= fi
>> 31;
735 uint32_t exp_msb
= (fi
>> 30) & 1;
736 uint32_t exp_low
= (fi
>> 23) & 0x7f;
739 exp
= (exp_msb
<< 10) | exp_low
;
748 return (((uint64_t)sign
<< 63)
749 | ((uint64_t)exp
<< 52)
750 | ((uint64_t)frac
<< 29));
753 static inline uint64_t float32_to_s(float32 fa
)
757 return float32_to_s_int(a
.l
);
760 static inline uint32_t s_to_float32_int(uint64_t a
)
762 return ((a
>> 32) & 0xc0000000) | ((a
>> 29) & 0x3fffffff);
765 static inline float32
s_to_float32(uint64_t a
)
768 r
.l
= s_to_float32_int(a
);
772 uint32_t helper_s_to_memory (uint64_t a
)
774 return s_to_float32_int(a
);
777 uint64_t helper_memory_to_s (uint32_t a
)
779 return float32_to_s_int(a
);
782 uint64_t helper_adds (uint64_t a
, uint64_t b
)
786 fa
= s_to_float32(a
);
787 fb
= s_to_float32(b
);
788 fr
= float32_add(fa
, fb
, &FP_STATUS
);
789 return float32_to_s(fr
);
792 uint64_t helper_subs (uint64_t a
, uint64_t b
)
796 fa
= s_to_float32(a
);
797 fb
= s_to_float32(b
);
798 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
799 return float32_to_s(fr
);
802 uint64_t helper_muls (uint64_t a
, uint64_t b
)
806 fa
= s_to_float32(a
);
807 fb
= s_to_float32(b
);
808 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
809 return float32_to_s(fr
);
812 uint64_t helper_divs (uint64_t a
, uint64_t b
)
816 fa
= s_to_float32(a
);
817 fb
= s_to_float32(b
);
818 fr
= float32_div(fa
, fb
, &FP_STATUS
);
819 return float32_to_s(fr
);
822 uint64_t helper_sqrts (uint64_t a
)
826 fa
= s_to_float32(a
);
827 fr
= float32_sqrt(fa
, &FP_STATUS
);
828 return float32_to_s(fr
);
832 /* T floating (double) */
833 static inline float64
t_to_float64(uint64_t a
)
835 /* Memory format is the same as float64 */
841 static inline uint64_t float64_to_t(float64 fa
)
843 /* Memory format is the same as float64 */
849 uint64_t helper_addt (uint64_t a
, uint64_t b
)
853 fa
= t_to_float64(a
);
854 fb
= t_to_float64(b
);
855 fr
= float64_add(fa
, fb
, &FP_STATUS
);
856 return float64_to_t(fr
);
859 uint64_t helper_subt (uint64_t a
, uint64_t b
)
863 fa
= t_to_float64(a
);
864 fb
= t_to_float64(b
);
865 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
866 return float64_to_t(fr
);
869 uint64_t helper_mult (uint64_t a
, uint64_t b
)
873 fa
= t_to_float64(a
);
874 fb
= t_to_float64(b
);
875 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
876 return float64_to_t(fr
);
879 uint64_t helper_divt (uint64_t a
, uint64_t b
)
883 fa
= t_to_float64(a
);
884 fb
= t_to_float64(b
);
885 fr
= float64_div(fa
, fb
, &FP_STATUS
);
886 return float64_to_t(fr
);
889 uint64_t helper_sqrtt (uint64_t a
)
893 fa
= t_to_float64(a
);
894 fr
= float64_sqrt(fa
, &FP_STATUS
);
895 return float64_to_t(fr
);
899 uint64_t helper_cmptun (uint64_t a
, uint64_t b
)
903 fa
= t_to_float64(a
);
904 fb
= t_to_float64(b
);
906 if (float64_is_nan(fa
) || float64_is_nan(fb
))
907 return 0x4000000000000000ULL
;
912 uint64_t helper_cmpteq(uint64_t a
, uint64_t b
)
916 fa
= t_to_float64(a
);
917 fb
= t_to_float64(b
);
919 if (float64_eq(fa
, fb
, &FP_STATUS
))
920 return 0x4000000000000000ULL
;
925 uint64_t helper_cmptle(uint64_t a
, uint64_t b
)
929 fa
= t_to_float64(a
);
930 fb
= t_to_float64(b
);
932 if (float64_le(fa
, fb
, &FP_STATUS
))
933 return 0x4000000000000000ULL
;
938 uint64_t helper_cmptlt(uint64_t a
, uint64_t b
)
942 fa
= t_to_float64(a
);
943 fb
= t_to_float64(b
);
945 if (float64_lt(fa
, fb
, &FP_STATUS
))
946 return 0x4000000000000000ULL
;
951 uint64_t helper_cmpgeq(uint64_t a
, uint64_t b
)
955 fa
= g_to_float64(a
);
956 fb
= g_to_float64(b
);
958 if (float64_eq(fa
, fb
, &FP_STATUS
))
959 return 0x4000000000000000ULL
;
964 uint64_t helper_cmpgle(uint64_t a
, uint64_t b
)
968 fa
= g_to_float64(a
);
969 fb
= g_to_float64(b
);
971 if (float64_le(fa
, fb
, &FP_STATUS
))
972 return 0x4000000000000000ULL
;
977 uint64_t helper_cmpglt(uint64_t a
, uint64_t b
)
981 fa
= g_to_float64(a
);
982 fb
= g_to_float64(b
);
984 if (float64_lt(fa
, fb
, &FP_STATUS
))
985 return 0x4000000000000000ULL
;
990 /* Floating point format conversion */
991 uint64_t helper_cvtts (uint64_t a
)
996 fa
= t_to_float64(a
);
997 fr
= float64_to_float32(fa
, &FP_STATUS
);
998 return float32_to_s(fr
);
1001 uint64_t helper_cvtst (uint64_t a
)
1006 fa
= s_to_float32(a
);
1007 fr
= float32_to_float64(fa
, &FP_STATUS
);
1008 return float64_to_t(fr
);
1011 uint64_t helper_cvtqs (uint64_t a
)
1013 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
1014 return float32_to_s(fr
);
1017 /* Implement float64 to uint64 conversion without saturation -- we must
1018 supply the truncated result. This behaviour is used by the compiler
1019 to get unsigned conversion for free with the same instruction.
1021 The VI flag is set when overflow or inexact exceptions should be raised. */
1023 static inline uint64_t helper_cvttq_internal(uint64_t a
, int roundmode
, int VI
)
1025 uint64_t frac
, ret
= 0;
1026 uint32_t exp
, sign
, exc
= 0;
1030 exp
= (uint32_t)(a
>> 52) & 0x7ff;
1031 frac
= a
& 0xfffffffffffffull
;
1034 if (unlikely(frac
!= 0)) {
1037 } else if (exp
== 0x7ff) {
1038 exc
= (frac
? float_flag_invalid
: VI
? float_flag_overflow
: 0);
1040 /* Restore implicit bit. */
1041 frac
|= 0x10000000000000ull
;
1043 shift
= exp
- 1023 - 52;
1045 /* In this case the number is so large that we must shift
1046 the fraction left. There is no rounding to do. */
1048 ret
= frac
<< shift
;
1049 if (VI
&& (ret
>> shift
) != frac
) {
1050 exc
= float_flag_overflow
;
1056 /* In this case the number is smaller than the fraction as
1057 represented by the 52 bit number. Here we must think
1058 about rounding the result. Handle this by shifting the
1059 fractional part of the number into the high bits of ROUND.
1060 This will let us efficiently handle round-to-nearest. */
1063 ret
= frac
>> shift
;
1064 round
= frac
<< (64 - shift
);
1066 /* The exponent is so small we shift out everything.
1067 Leave a sticky bit for proper rounding below. */
1073 exc
= (VI
? float_flag_inexact
: 0);
1074 switch (roundmode
) {
1075 case float_round_nearest_even
:
1076 if (round
== (1ull << 63)) {
1077 /* Fraction is exactly 0.5; round to even. */
1079 } else if (round
> (1ull << 63)) {
1083 case float_round_to_zero
:
1085 case float_round_up
:
1088 case float_round_down
:
1098 if (unlikely(exc
)) {
1099 float_raise(exc
, &FP_STATUS
);
1105 uint64_t helper_cvttq(uint64_t a
)
1107 return helper_cvttq_internal(a
, FP_STATUS
.float_rounding_mode
, 1);
1110 uint64_t helper_cvttq_c(uint64_t a
)
1112 return helper_cvttq_internal(a
, float_round_to_zero
, 0);
1115 uint64_t helper_cvttq_svic(uint64_t a
)
1117 return helper_cvttq_internal(a
, float_round_to_zero
, 1);
1120 uint64_t helper_cvtqt (uint64_t a
)
1122 float64 fr
= int64_to_float64(a
, &FP_STATUS
);
1123 return float64_to_t(fr
);
1126 uint64_t helper_cvtqf (uint64_t a
)
1128 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
1129 return float32_to_f(fr
);
1132 uint64_t helper_cvtgf (uint64_t a
)
1137 fa
= g_to_float64(a
);
1138 fr
= float64_to_float32(fa
, &FP_STATUS
);
1139 return float32_to_f(fr
);
1142 uint64_t helper_cvtgq (uint64_t a
)
1144 float64 fa
= g_to_float64(a
);
1145 return float64_to_int64_round_to_zero(fa
, &FP_STATUS
);
1148 uint64_t helper_cvtqg (uint64_t a
)
1151 fr
= int64_to_float64(a
, &FP_STATUS
);
1152 return float64_to_g(fr
);
1155 /* PALcode support special instructions */
1156 #if !defined (CONFIG_USER_ONLY)
1157 void helper_hw_rei (void)
1159 env
->pc
= env
->ipr
[IPR_EXC_ADDR
] & ~3;
1160 env
->ipr
[IPR_EXC_ADDR
] = env
->ipr
[IPR_EXC_ADDR
] & 1;
1162 /* XXX: re-enable interrupts and memory mapping */
1165 void helper_hw_ret (uint64_t a
)
1168 env
->ipr
[IPR_EXC_ADDR
] = a
& 1;
1170 /* XXX: re-enable interrupts and memory mapping */
1173 uint64_t helper_mfpr (int iprn
, uint64_t val
)
1177 if (cpu_alpha_mfpr(env
, iprn
, &tmp
) == 0)
1183 void helper_mtpr (int iprn
, uint64_t val
)
1185 cpu_alpha_mtpr(env
, iprn
, val
, NULL
);
1188 void helper_set_alt_mode (void)
1190 env
->saved_mode
= env
->ps
& 0xC;
1191 env
->ps
= (env
->ps
& ~0xC) | (env
->ipr
[IPR_ALT_MODE
] & 0xC);
1194 void helper_restore_mode (void)
1196 env
->ps
= (env
->ps
& ~0xC) | env
->saved_mode
;
1201 /*****************************************************************************/
1202 /* Softmmu support */
1203 #if !defined (CONFIG_USER_ONLY)
1205 /* XXX: the two following helpers are pure hacks.
1206 * Hopefully, we emulate the PALcode, then we should never see
1207 * HW_LD / HW_ST instructions.
1209 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr
)
1211 uint64_t tlb_addr
, physaddr
;
1215 mmu_idx
= cpu_mmu_index(env
);
1216 index
= (virtaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1218 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_read
;
1219 if ((virtaddr
& TARGET_PAGE_MASK
) ==
1220 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1221 physaddr
= virtaddr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
1223 /* the page is not in the TLB : fill it */
1225 tlb_fill(virtaddr
, 0, mmu_idx
, retaddr
);
1231 uint64_t helper_st_virt_to_phys (uint64_t virtaddr
)
1233 uint64_t tlb_addr
, physaddr
;
1237 mmu_idx
= cpu_mmu_index(env
);
1238 index
= (virtaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1240 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
1241 if ((virtaddr
& TARGET_PAGE_MASK
) ==
1242 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1243 physaddr
= virtaddr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
1245 /* the page is not in the TLB : fill it */
1247 tlb_fill(virtaddr
, 1, mmu_idx
, retaddr
);
1253 void helper_ldl_raw(uint64_t t0
, uint64_t t1
)
1258 void helper_ldq_raw(uint64_t t0
, uint64_t t1
)
1263 void helper_ldl_l_raw(uint64_t t0
, uint64_t t1
)
1269 void helper_ldq_l_raw(uint64_t t0
, uint64_t t1
)
1275 void helper_ldl_kernel(uint64_t t0
, uint64_t t1
)
1280 void helper_ldq_kernel(uint64_t t0
, uint64_t t1
)
1285 void helper_ldl_data(uint64_t t0
, uint64_t t1
)
1290 void helper_ldq_data(uint64_t t0
, uint64_t t1
)
1295 void helper_stl_raw(uint64_t t0
, uint64_t t1
)
1300 void helper_stq_raw(uint64_t t0
, uint64_t t1
)
1305 uint64_t helper_stl_c_raw(uint64_t t0
, uint64_t t1
)
1309 if (t1
== env
->lock
) {
1320 uint64_t helper_stq_c_raw(uint64_t t0
, uint64_t t1
)
1324 if (t1
== env
->lock
) {
1335 #define MMUSUFFIX _mmu
1338 #include "softmmu_template.h"
1341 #include "softmmu_template.h"
1344 #include "softmmu_template.h"
1347 #include "softmmu_template.h"
1349 /* try to fill the TLB and return an exception if error. If retaddr is
1350 NULL, it means that the function was called in C code (i.e. not
1351 from generated code or from helper.c) */
1352 /* XXX: fix it to restore all registers */
1353 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
1355 TranslationBlock
*tb
;
1356 CPUState
*saved_env
;
1360 /* XXX: hack to restore env in all cases, even if not called from
1363 env
= cpu_single_env
;
1364 ret
= cpu_alpha_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
1365 if (!likely(ret
== 0)) {
1366 if (likely(retaddr
)) {
1367 /* now we have a real cpu fault */
1368 pc
= (unsigned long)retaddr
;
1369 tb
= tb_find_pc(pc
);
1371 /* the PC is inside the translated code. It means that we have
1372 a virtual CPU fault */
1373 cpu_restore_state(tb
, env
, pc
, NULL
);
1376 /* Exception index and error code are already set */