2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #include "host-utils.h"
23 #include "softfloat.h"
26 void helper_tb_flush (void)
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
33 void helper_excp (int excp
, int error
)
35 env
->exception_index
= excp
;
36 env
->error_code
= error
;
40 uint64_t helper_amask (uint64_t arg
)
42 switch (env
->implver
) {
44 /* EV4, EV45, LCA, LCA45 & EV5 */
55 uint64_t helper_load_pcc (void)
61 uint64_t helper_load_implver (void)
66 uint64_t helper_load_fpcr (void)
69 #ifdef CONFIG_SOFTFLOAT
70 ret
|= env
->fp_status
.float_exception_flags
<< 52;
71 if (env
->fp_status
.float_exception_flags
)
73 env
->ipr
[IPR_EXC_SUM
] &= ~0x3E:
74 env
->ipr
[IPR_EXC_SUM
] |= env
->fp_status
.float_exception_flags
<< 1;
76 switch (env
->fp_status
.float_rounding_mode
) {
77 case float_round_nearest_even
:
80 case float_round_down
:
86 case float_round_to_zero
:
92 void helper_store_fpcr (uint64_t val
)
94 #ifdef CONFIG_SOFTFLOAT
95 set_float_exception_flags((val
>> 52) & 0x3F, &FP_STATUS
);
97 switch ((val
>> 58) & 3) {
99 set_float_rounding_mode(float_round_to_zero
, &FP_STATUS
);
102 set_float_rounding_mode(float_round_down
, &FP_STATUS
);
105 set_float_rounding_mode(float_round_nearest_even
, &FP_STATUS
);
108 set_float_rounding_mode(float_round_up
, &FP_STATUS
);
113 spinlock_t intr_cpu_lock
= SPIN_LOCK_UNLOCKED
;
115 uint64_t helper_rs(void)
119 spin_lock(&intr_cpu_lock
);
120 tmp
= env
->intr_flag
;
122 spin_unlock(&intr_cpu_lock
);
127 uint64_t helper_rc(void)
131 spin_lock(&intr_cpu_lock
);
132 tmp
= env
->intr_flag
;
134 spin_unlock(&intr_cpu_lock
);
139 uint64_t helper_addqv (uint64_t op1
, uint64_t op2
)
143 if (unlikely((tmp
^ op2
^ (-1ULL)) & (tmp
^ op1
) & (1ULL << 63))) {
144 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
149 uint64_t helper_addlv (uint64_t op1
, uint64_t op2
)
152 op1
= (uint32_t)(op1
+ op2
);
153 if (unlikely((tmp
^ op2
^ (-1UL)) & (tmp
^ op1
) & (1UL << 31))) {
154 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
159 uint64_t helper_subqv (uint64_t op1
, uint64_t op2
)
163 if (unlikely(((~tmp
) ^ op1
^ (-1ULL)) & ((~tmp
) ^ op2
) & (1ULL << 63))) {
164 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
169 uint64_t helper_sublv (uint64_t op1
, uint64_t op2
)
172 op1
= (uint32_t)(op1
- op2
);
173 if (unlikely(((~tmp
) ^ op1
^ (-1UL)) & ((~tmp
) ^ op2
) & (1UL << 31))) {
174 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
179 uint64_t helper_mullv (uint64_t op1
, uint64_t op2
)
181 int64_t res
= (int64_t)op1
* (int64_t)op2
;
183 if (unlikely((int32_t)res
!= res
)) {
184 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
186 return (int64_t)((int32_t)res
);
189 uint64_t helper_mulqv (uint64_t op1
, uint64_t op2
)
193 muls64(&tl
, &th
, op1
, op2
);
194 /* If th != 0 && th != -1, then we had an overflow */
195 if (unlikely((th
+ 1) > 1)) {
196 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
201 uint64_t helper_umulh (uint64_t op1
, uint64_t op2
)
205 mulu64(&tl
, &th
, op1
, op2
);
209 uint64_t helper_ctpop (uint64_t arg
)
214 uint64_t helper_ctlz (uint64_t arg
)
219 uint64_t helper_cttz (uint64_t arg
)
224 static always_inline
uint64_t byte_zap (uint64_t op
, uint8_t mskb
)
229 mask
|= ((mskb
>> 0) & 1) * 0x00000000000000FFULL
;
230 mask
|= ((mskb
>> 1) & 1) * 0x000000000000FF00ULL
;
231 mask
|= ((mskb
>> 2) & 1) * 0x0000000000FF0000ULL
;
232 mask
|= ((mskb
>> 3) & 1) * 0x00000000FF000000ULL
;
233 mask
|= ((mskb
>> 4) & 1) * 0x000000FF00000000ULL
;
234 mask
|= ((mskb
>> 5) & 1) * 0x0000FF0000000000ULL
;
235 mask
|= ((mskb
>> 6) & 1) * 0x00FF000000000000ULL
;
236 mask
|= ((mskb
>> 7) & 1) * 0xFF00000000000000ULL
;
241 uint64_t helper_mskbl(uint64_t val
, uint64_t mask
)
243 return byte_zap(val
, 0x01 << (mask
& 7));
246 uint64_t helper_insbl(uint64_t val
, uint64_t mask
)
248 val
<<= (mask
& 7) * 8;
249 return byte_zap(val
, ~(0x01 << (mask
& 7)));
252 uint64_t helper_mskwl(uint64_t val
, uint64_t mask
)
254 return byte_zap(val
, 0x03 << (mask
& 7));
257 uint64_t helper_inswl(uint64_t val
, uint64_t mask
)
259 val
<<= (mask
& 7) * 8;
260 return byte_zap(val
, ~(0x03 << (mask
& 7)));
263 uint64_t helper_mskll(uint64_t val
, uint64_t mask
)
265 return byte_zap(val
, 0x0F << (mask
& 7));
268 uint64_t helper_insll(uint64_t val
, uint64_t mask
)
270 val
<<= (mask
& 7) * 8;
271 return byte_zap(val
, ~(0x0F << (mask
& 7)));
274 uint64_t helper_zap(uint64_t val
, uint64_t mask
)
276 return byte_zap(val
, mask
);
279 uint64_t helper_zapnot(uint64_t val
, uint64_t mask
)
281 return byte_zap(val
, ~mask
);
284 uint64_t helper_mskql(uint64_t val
, uint64_t mask
)
286 return byte_zap(val
, 0xFF << (mask
& 7));
289 uint64_t helper_insql(uint64_t val
, uint64_t mask
)
291 val
<<= (mask
& 7) * 8;
292 return byte_zap(val
, ~(0xFF << (mask
& 7)));
295 uint64_t helper_mskwh(uint64_t val
, uint64_t mask
)
297 return byte_zap(val
, (0x03 << (mask
& 7)) >> 8);
300 uint64_t helper_inswh(uint64_t val
, uint64_t mask
)
302 val
>>= 64 - ((mask
& 7) * 8);
303 return byte_zap(val
, ~((0x03 << (mask
& 7)) >> 8));
306 uint64_t helper_msklh(uint64_t val
, uint64_t mask
)
308 return byte_zap(val
, (0x0F << (mask
& 7)) >> 8);
311 uint64_t helper_inslh(uint64_t val
, uint64_t mask
)
313 val
>>= 64 - ((mask
& 7) * 8);
314 return byte_zap(val
, ~((0x0F << (mask
& 7)) >> 8));
317 uint64_t helper_mskqh(uint64_t val
, uint64_t mask
)
319 return byte_zap(val
, (0xFF << (mask
& 7)) >> 8);
322 uint64_t helper_insqh(uint64_t val
, uint64_t mask
)
324 val
>>= 64 - ((mask
& 7) * 8);
325 return byte_zap(val
, ~((0xFF << (mask
& 7)) >> 8));
328 uint64_t helper_cmpbge (uint64_t op1
, uint64_t op2
)
330 uint8_t opa
, opb
, res
;
334 for (i
= 0; i
< 8; i
++) {
335 opa
= op1
>> (i
* 8);
336 opb
= op2
>> (i
* 8);
343 /* Floating point helpers */
345 /* F floating (VAX) */
346 static always_inline
uint64_t float32_to_f (float32 fa
)
348 uint64_t r
, exp
, mant
, sig
;
352 sig
= ((uint64_t)a
.l
& 0x80000000) << 32;
353 exp
= (a
.l
>> 23) & 0xff;
354 mant
= ((uint64_t)a
.l
& 0x007fffff) << 29;
357 /* NaN or infinity */
358 r
= 1; /* VAX dirty zero */
359 } else if (exp
== 0) {
365 r
= sig
| ((exp
+ 1) << 52) | mant
;
370 r
= 1; /* VAX dirty zero */
372 r
= sig
| ((exp
+ 2) << 52);
379 static always_inline float32
f_to_float32 (uint64_t a
)
381 uint32_t exp
, mant_sig
;
384 exp
= ((a
>> 55) & 0x80) | ((a
>> 52) & 0x7f);
385 mant_sig
= ((a
>> 32) & 0x80000000) | ((a
>> 29) & 0x007fffff);
387 if (unlikely(!exp
&& mant_sig
)) {
388 /* Reserved operands / Dirty zero */
389 helper_excp(EXCP_OPCDEC
, 0);
396 r
.l
= ((exp
- 2) << 23) | mant_sig
;
402 uint32_t helper_f_to_memory (uint64_t a
)
405 r
= (a
& 0x00001fffe0000000ull
) >> 13;
406 r
|= (a
& 0x07ffe00000000000ull
) >> 45;
407 r
|= (a
& 0xc000000000000000ull
) >> 48;
411 uint64_t helper_memory_to_f (uint32_t a
)
414 r
= ((uint64_t)(a
& 0x0000c000)) << 48;
415 r
|= ((uint64_t)(a
& 0x003fffff)) << 45;
416 r
|= ((uint64_t)(a
& 0xffff0000)) << 13;
417 if (!(a
& 0x00004000))
422 uint64_t helper_addf (uint64_t a
, uint64_t b
)
426 fa
= f_to_float32(a
);
427 fb
= f_to_float32(b
);
428 fr
= float32_add(fa
, fb
, &FP_STATUS
);
429 return float32_to_f(fr
);
432 uint64_t helper_subf (uint64_t a
, uint64_t b
)
436 fa
= f_to_float32(a
);
437 fb
= f_to_float32(b
);
438 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
439 return float32_to_f(fr
);
442 uint64_t helper_mulf (uint64_t a
, uint64_t b
)
446 fa
= f_to_float32(a
);
447 fb
= f_to_float32(b
);
448 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
449 return float32_to_f(fr
);
452 uint64_t helper_divf (uint64_t a
, uint64_t b
)
456 fa
= f_to_float32(a
);
457 fb
= f_to_float32(b
);
458 fr
= float32_div(fa
, fb
, &FP_STATUS
);
459 return float32_to_f(fr
);
462 uint64_t helper_sqrtf (uint64_t t
)
466 ft
= f_to_float32(t
);
467 fr
= float32_sqrt(ft
, &FP_STATUS
);
468 return float32_to_f(fr
);
472 /* G floating (VAX) */
473 static always_inline
uint64_t float64_to_g (float64 fa
)
475 uint64_t r
, exp
, mant
, sig
;
479 sig
= a
.ll
& 0x8000000000000000ull
;
480 exp
= (a
.ll
>> 52) & 0x7ff;
481 mant
= a
.ll
& 0x000fffffffffffffull
;
484 /* NaN or infinity */
485 r
= 1; /* VAX dirty zero */
486 } else if (exp
== 0) {
492 r
= sig
| ((exp
+ 1) << 52) | mant
;
497 r
= 1; /* VAX dirty zero */
499 r
= sig
| ((exp
+ 2) << 52);
506 static always_inline float64
g_to_float64 (uint64_t a
)
508 uint64_t exp
, mant_sig
;
511 exp
= (a
>> 52) & 0x7ff;
512 mant_sig
= a
& 0x800fffffffffffffull
;
514 if (!exp
&& mant_sig
) {
515 /* Reserved operands / Dirty zero */
516 helper_excp(EXCP_OPCDEC
, 0);
523 r
.ll
= ((exp
- 2) << 52) | mant_sig
;
529 uint64_t helper_g_to_memory (uint64_t a
)
532 r
= (a
& 0x000000000000ffffull
) << 48;
533 r
|= (a
& 0x00000000ffff0000ull
) << 16;
534 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
535 r
|= (a
& 0xffff000000000000ull
) >> 48;
539 uint64_t helper_memory_to_g (uint64_t a
)
542 r
= (a
& 0x000000000000ffffull
) << 48;
543 r
|= (a
& 0x00000000ffff0000ull
) << 16;
544 r
|= (a
& 0x0000ffff00000000ull
) >> 16;
545 r
|= (a
& 0xffff000000000000ull
) >> 48;
549 uint64_t helper_addg (uint64_t a
, uint64_t b
)
553 fa
= g_to_float64(a
);
554 fb
= g_to_float64(b
);
555 fr
= float64_add(fa
, fb
, &FP_STATUS
);
556 return float64_to_g(fr
);
559 uint64_t helper_subg (uint64_t a
, uint64_t b
)
563 fa
= g_to_float64(a
);
564 fb
= g_to_float64(b
);
565 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
566 return float64_to_g(fr
);
569 uint64_t helper_mulg (uint64_t a
, uint64_t b
)
573 fa
= g_to_float64(a
);
574 fb
= g_to_float64(b
);
575 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
576 return float64_to_g(fr
);
579 uint64_t helper_divg (uint64_t a
, uint64_t b
)
583 fa
= g_to_float64(a
);
584 fb
= g_to_float64(b
);
585 fr
= float64_div(fa
, fb
, &FP_STATUS
);
586 return float64_to_g(fr
);
589 uint64_t helper_sqrtg (uint64_t a
)
593 fa
= g_to_float64(a
);
594 fr
= float64_sqrt(fa
, &FP_STATUS
);
595 return float64_to_g(fr
);
599 /* S floating (single) */
600 static always_inline
uint64_t float32_to_s (float32 fa
)
607 r
= (((uint64_t)(a
.l
& 0xc0000000)) << 32) | (((uint64_t)(a
.l
& 0x3fffffff)) << 29);
608 if (((a
.l
& 0x7f800000) != 0x7f800000) && (!(a
.l
& 0x40000000)))
613 static always_inline float32
s_to_float32 (uint64_t a
)
616 r
.l
= ((a
>> 32) & 0xc0000000) | ((a
>> 29) & 0x3fffffff);
620 uint32_t helper_s_to_memory (uint64_t a
)
622 /* Memory format is the same as float32 */
623 float32 fa
= s_to_float32(a
);
624 return *(uint32_t*)(&fa
);
627 uint64_t helper_memory_to_s (uint32_t a
)
629 /* Memory format is the same as float32 */
630 return float32_to_s(*(float32
*)(&a
));
633 uint64_t helper_adds (uint64_t a
, uint64_t b
)
637 fa
= s_to_float32(a
);
638 fb
= s_to_float32(b
);
639 fr
= float32_add(fa
, fb
, &FP_STATUS
);
640 return float32_to_s(fr
);
643 uint64_t helper_subs (uint64_t a
, uint64_t b
)
647 fa
= s_to_float32(a
);
648 fb
= s_to_float32(b
);
649 fr
= float32_sub(fa
, fb
, &FP_STATUS
);
650 return float32_to_s(fr
);
653 uint64_t helper_muls (uint64_t a
, uint64_t b
)
657 fa
= s_to_float32(a
);
658 fb
= s_to_float32(b
);
659 fr
= float32_mul(fa
, fb
, &FP_STATUS
);
660 return float32_to_s(fr
);
663 uint64_t helper_divs (uint64_t a
, uint64_t b
)
667 fa
= s_to_float32(a
);
668 fb
= s_to_float32(b
);
669 fr
= float32_div(fa
, fb
, &FP_STATUS
);
670 return float32_to_s(fr
);
673 uint64_t helper_sqrts (uint64_t a
)
677 fa
= s_to_float32(a
);
678 fr
= float32_sqrt(fa
, &FP_STATUS
);
679 return float32_to_s(fr
);
683 /* T floating (double) */
684 static always_inline float64
t_to_float64 (uint64_t a
)
686 /* Memory format is the same as float64 */
692 static always_inline
uint64_t float64_to_t (float64 fa
)
694 /* Memory format is the same as float64 */
700 uint64_t helper_addt (uint64_t a
, uint64_t b
)
704 fa
= t_to_float64(a
);
705 fb
= t_to_float64(b
);
706 fr
= float64_add(fa
, fb
, &FP_STATUS
);
707 return float64_to_t(fr
);
710 uint64_t helper_subt (uint64_t a
, uint64_t b
)
714 fa
= t_to_float64(a
);
715 fb
= t_to_float64(b
);
716 fr
= float64_sub(fa
, fb
, &FP_STATUS
);
717 return float64_to_t(fr
);
720 uint64_t helper_mult (uint64_t a
, uint64_t b
)
724 fa
= t_to_float64(a
);
725 fb
= t_to_float64(b
);
726 fr
= float64_mul(fa
, fb
, &FP_STATUS
);
727 return float64_to_t(fr
);
730 uint64_t helper_divt (uint64_t a
, uint64_t b
)
734 fa
= t_to_float64(a
);
735 fb
= t_to_float64(b
);
736 fr
= float64_div(fa
, fb
, &FP_STATUS
);
737 return float64_to_t(fr
);
740 uint64_t helper_sqrtt (uint64_t a
)
744 fa
= t_to_float64(a
);
745 fr
= float64_sqrt(fa
, &FP_STATUS
);
746 return float64_to_t(fr
);
751 uint64_t helper_cpys(uint64_t a
, uint64_t b
)
753 return (a
& 0x8000000000000000ULL
) | (b
& ~0x8000000000000000ULL
);
756 uint64_t helper_cpysn(uint64_t a
, uint64_t b
)
758 return ((~a
) & 0x8000000000000000ULL
) | (b
& ~0x8000000000000000ULL
);
761 uint64_t helper_cpyse(uint64_t a
, uint64_t b
)
763 return (a
& 0xFFF0000000000000ULL
) | (b
& ~0xFFF0000000000000ULL
);
768 uint64_t helper_cmptun (uint64_t a
, uint64_t b
)
772 fa
= t_to_float64(a
);
773 fb
= t_to_float64(b
);
775 if (float64_is_nan(fa
) || float64_is_nan(fb
))
776 return 0x4000000000000000ULL
;
781 uint64_t helper_cmpteq(uint64_t a
, uint64_t b
)
785 fa
= t_to_float64(a
);
786 fb
= t_to_float64(b
);
788 if (float64_eq(fa
, fb
, &FP_STATUS
))
789 return 0x4000000000000000ULL
;
794 uint64_t helper_cmptle(uint64_t a
, uint64_t b
)
798 fa
= t_to_float64(a
);
799 fb
= t_to_float64(b
);
801 if (float64_le(fa
, fb
, &FP_STATUS
))
802 return 0x4000000000000000ULL
;
807 uint64_t helper_cmptlt(uint64_t a
, uint64_t b
)
811 fa
= t_to_float64(a
);
812 fb
= t_to_float64(b
);
814 if (float64_lt(fa
, fb
, &FP_STATUS
))
815 return 0x4000000000000000ULL
;
820 uint64_t helper_cmpgeq(uint64_t a
, uint64_t b
)
824 fa
= g_to_float64(a
);
825 fb
= g_to_float64(b
);
827 if (float64_eq(fa
, fb
, &FP_STATUS
))
828 return 0x4000000000000000ULL
;
833 uint64_t helper_cmpgle(uint64_t a
, uint64_t b
)
837 fa
= g_to_float64(a
);
838 fb
= g_to_float64(b
);
840 if (float64_le(fa
, fb
, &FP_STATUS
))
841 return 0x4000000000000000ULL
;
846 uint64_t helper_cmpglt(uint64_t a
, uint64_t b
)
850 fa
= g_to_float64(a
);
851 fb
= g_to_float64(b
);
853 if (float64_lt(fa
, fb
, &FP_STATUS
))
854 return 0x4000000000000000ULL
;
859 uint64_t helper_cmpfeq (uint64_t a
)
861 return !(a
& 0x7FFFFFFFFFFFFFFFULL
);
864 uint64_t helper_cmpfne (uint64_t a
)
866 return (a
& 0x7FFFFFFFFFFFFFFFULL
);
869 uint64_t helper_cmpflt (uint64_t a
)
871 return (a
& 0x8000000000000000ULL
) && (a
& 0x7FFFFFFFFFFFFFFFULL
);
874 uint64_t helper_cmpfle (uint64_t a
)
876 return (a
& 0x8000000000000000ULL
) || !(a
& 0x7FFFFFFFFFFFFFFFULL
);
879 uint64_t helper_cmpfgt (uint64_t a
)
881 return !(a
& 0x8000000000000000ULL
) && (a
& 0x7FFFFFFFFFFFFFFFULL
);
884 uint64_t helper_cmpfge (uint64_t a
)
886 return !(a
& 0x8000000000000000ULL
) || !(a
& 0x7FFFFFFFFFFFFFFFULL
);
890 /* Floating point format conversion */
891 uint64_t helper_cvtts (uint64_t a
)
896 fa
= t_to_float64(a
);
897 fr
= float64_to_float32(fa
, &FP_STATUS
);
898 return float32_to_s(fr
);
901 uint64_t helper_cvtst (uint64_t a
)
906 fa
= s_to_float32(a
);
907 fr
= float32_to_float64(fa
, &FP_STATUS
);
908 return float64_to_t(fr
);
911 uint64_t helper_cvtqs (uint64_t a
)
913 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
914 return float32_to_s(fr
);
917 uint64_t helper_cvttq (uint64_t a
)
919 float64 fa
= t_to_float64(a
);
920 return float64_to_int64_round_to_zero(fa
, &FP_STATUS
);
923 uint64_t helper_cvtqt (uint64_t a
)
925 float64 fr
= int64_to_float64(a
, &FP_STATUS
);
926 return float64_to_t(fr
);
929 uint64_t helper_cvtqf (uint64_t a
)
931 float32 fr
= int64_to_float32(a
, &FP_STATUS
);
932 return float32_to_f(fr
);
935 uint64_t helper_cvtgf (uint64_t a
)
940 fa
= g_to_float64(a
);
941 fr
= float64_to_float32(fa
, &FP_STATUS
);
942 return float32_to_f(fr
);
945 uint64_t helper_cvtgq (uint64_t a
)
947 float64 fa
= g_to_float64(a
);
948 return float64_to_int64_round_to_zero(fa
, &FP_STATUS
);
951 uint64_t helper_cvtqg (uint64_t a
)
954 fr
= int64_to_float64(a
, &FP_STATUS
);
955 return float64_to_g(fr
);
958 uint64_t helper_cvtlq (uint64_t a
)
960 return (int64_t)((int32_t)((a
>> 32) | ((a
>> 29) & 0x3FFFFFFF)));
963 static always_inline
uint64_t __helper_cvtql (uint64_t a
, int s
, int v
)
967 r
= ((uint64_t)(a
& 0xC0000000)) << 32;
968 r
|= ((uint64_t)(a
& 0x7FFFFFFF)) << 29;
970 if (v
&& (int64_t)((int32_t)r
) != (int64_t)r
) {
971 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
979 uint64_t helper_cvtql (uint64_t a
)
981 return __helper_cvtql(a
, 0, 0);
984 uint64_t helper_cvtqlv (uint64_t a
)
986 return __helper_cvtql(a
, 0, 1);
989 uint64_t helper_cvtqlsv (uint64_t a
)
991 return __helper_cvtql(a
, 1, 1);
994 /* PALcode support special instructions */
995 #if !defined (CONFIG_USER_ONLY)
996 void helper_hw_rei (void)
998 env
->pc
= env
->ipr
[IPR_EXC_ADDR
] & ~3;
999 env
->ipr
[IPR_EXC_ADDR
] = env
->ipr
[IPR_EXC_ADDR
] & 1;
1000 /* XXX: re-enable interrupts and memory mapping */
1003 void helper_hw_ret (uint64_t a
)
1006 env
->ipr
[IPR_EXC_ADDR
] = a
& 1;
1007 /* XXX: re-enable interrupts and memory mapping */
1010 uint64_t helper_mfpr (int iprn
, uint64_t val
)
1014 if (cpu_alpha_mfpr(env
, iprn
, &tmp
) == 0)
1020 void helper_mtpr (int iprn
, uint64_t val
)
1022 cpu_alpha_mtpr(env
, iprn
, val
, NULL
);
1025 void helper_set_alt_mode (void)
1027 env
->saved_mode
= env
->ps
& 0xC;
1028 env
->ps
= (env
->ps
& ~0xC) | (env
->ipr
[IPR_ALT_MODE
] & 0xC);
1031 void helper_restore_mode (void)
1033 env
->ps
= (env
->ps
& ~0xC) | env
->saved_mode
;
1038 /*****************************************************************************/
1039 /* Softmmu support */
1040 #if !defined (CONFIG_USER_ONLY)
1042 /* XXX: the two following helpers are pure hacks.
1043 * Hopefully, we emulate the PALcode, then we should never see
1044 * HW_LD / HW_ST instructions.
1046 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr
)
1048 uint64_t tlb_addr
, physaddr
;
1052 mmu_idx
= cpu_mmu_index(env
);
1053 index
= (virtaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1055 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_read
;
1056 if ((virtaddr
& TARGET_PAGE_MASK
) ==
1057 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1058 physaddr
= virtaddr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
1060 /* the page is not in the TLB : fill it */
1062 tlb_fill(virtaddr
, 0, mmu_idx
, retaddr
);
1068 uint64_t helper_st_virt_to_phys (uint64_t virtaddr
)
1070 uint64_t tlb_addr
, physaddr
;
1074 mmu_idx
= cpu_mmu_index(env
);
1075 index
= (virtaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1077 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
1078 if ((virtaddr
& TARGET_PAGE_MASK
) ==
1079 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1080 physaddr
= virtaddr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
1082 /* the page is not in the TLB : fill it */
1084 tlb_fill(virtaddr
, 1, mmu_idx
, retaddr
);
1090 void helper_ldl_raw(uint64_t t0
, uint64_t t1
)
1095 void helper_ldq_raw(uint64_t t0
, uint64_t t1
)
1100 void helper_ldl_l_raw(uint64_t t0
, uint64_t t1
)
1106 void helper_ldq_l_raw(uint64_t t0
, uint64_t t1
)
1112 void helper_ldl_kernel(uint64_t t0
, uint64_t t1
)
1117 void helper_ldq_kernel(uint64_t t0
, uint64_t t1
)
1122 void helper_ldl_data(uint64_t t0
, uint64_t t1
)
1127 void helper_ldq_data(uint64_t t0
, uint64_t t1
)
1132 void helper_stl_raw(uint64_t t0
, uint64_t t1
)
1137 void helper_stq_raw(uint64_t t0
, uint64_t t1
)
1142 uint64_t helper_stl_c_raw(uint64_t t0
, uint64_t t1
)
1146 if (t1
== env
->lock
) {
1157 uint64_t helper_stq_c_raw(uint64_t t0
, uint64_t t1
)
1161 if (t1
== env
->lock
) {
1172 #define MMUSUFFIX _mmu
1175 #include "softmmu_template.h"
1178 #include "softmmu_template.h"
1181 #include "softmmu_template.h"
1184 #include "softmmu_template.h"
1186 /* try to fill the TLB and return an exception if error. If retaddr is
1187 NULL, it means that the function was called in C code (i.e. not
1188 from generated code or from helper.c) */
1189 /* XXX: fix it to restore all registers */
1190 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
1192 TranslationBlock
*tb
;
1193 CPUState
*saved_env
;
1197 /* XXX: hack to restore env in all cases, even if not called from
1200 env
= cpu_single_env
;
1201 ret
= cpu_alpha_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
1202 if (!likely(ret
== 0)) {
1203 if (likely(retaddr
)) {
1204 /* now we have a real cpu fault */
1205 pc
= (unsigned long)retaddr
;
1206 tb
= tb_find_pc(pc
);
1208 /* the PC is inside the translated code. It means that we have
1209 a virtual CPU fault */
1210 cpu_restore_state(tb
, env
, pc
, NULL
);
1213 /* Exception index and error code are already set */