2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "softfloat.h"
24 #include "op_helper.h"
26 #define MEMSUFFIX _raw
27 #include "op_helper_mem.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #define MEMSUFFIX _user
31 #include "op_helper_mem.h"
33 #define MEMSUFFIX _kernel
34 #include "op_helper_mem.h"
36 /* Those are used for supervisor and executive modes */
37 #define MEMSUFFIX _data
38 #include "op_helper_mem.h"
41 void helper_tb_flush (void)
46 void cpu_dump_EA (target_ulong EA
);
47 void helper_print_mem_EA (target_ulong EA
)
52 /*****************************************************************************/
53 /* Exceptions processing helpers */
54 void helper_excp (uint32_t excp
, uint32_t error
)
56 env
->exception_index
= excp
;
57 env
->error_code
= error
;
61 void helper_amask (void)
63 switch (env
->implver
) {
65 /* EV4, EV45, LCA, LCA45 & EV5 */
75 void helper_load_pcc (void)
81 void helper_load_implver (void)
86 void helper_load_fpcr (void)
89 #ifdef CONFIG_SOFTFLOAT
90 T0
|= env
->fp_status
.float_exception_flags
<< 52;
91 if (env
->fp_status
.float_exception_flags
)
93 env
->ipr
[IPR_EXC_SUM
] &= ~0x3E:
94 env
->ipr
[IPR_EXC_SUM
] |= env
->fp_status
.float_exception_flags
<< 1;
96 switch (env
->fp_status
.float_rounding_mode
) {
97 case float_round_nearest_even
:
100 case float_round_down
:
106 case float_round_to_zero
:
111 void helper_store_fpcr (void)
113 #ifdef CONFIG_SOFTFLOAT
114 set_float_exception_flags((T0
>> 52) & 0x3F, &FP_STATUS
);
116 switch ((T0
>> 58) & 3) {
118 set_float_rounding_mode(float_round_to_zero
, &FP_STATUS
);
121 set_float_rounding_mode(float_round_down
, &FP_STATUS
);
124 set_float_rounding_mode(float_round_nearest_even
, &FP_STATUS
);
127 set_float_rounding_mode(float_round_up
, &FP_STATUS
);
132 void helper_load_irf (void)
138 void helper_set_irf (void)
143 void helper_clear_irf (void)
148 void helper_addqv (void)
152 if (unlikely((T2
^ T1
^ (-1ULL)) & (T2
^ T0
) & (1ULL << 63))) {
153 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
157 void helper_addlv (void)
160 T0
= (uint32_t)(T0
+ T1
);
161 if (unlikely((T2
^ T1
^ (-1UL)) & (T2
^ T0
) & (1UL << 31))) {
162 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
166 void helper_subqv (void)
170 if (unlikely(((~T2
) ^ T0
^ (-1ULL)) & ((~T2
) ^ T1
) & (1ULL << 63))) {
171 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
175 void helper_sublv (void)
178 T0
= (uint32_t)(T0
- T1
);
179 if (unlikely(((~T2
) ^ T0
^ (-1UL)) & ((~T2
) ^ T1
) & (1UL << 31))) {
180 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
184 void helper_mullv (void)
186 int64_t res
= (int64_t)T0
* (int64_t)T1
;
188 if (unlikely((int32_t)res
!= res
)) {
189 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
191 T0
= (int64_t)((int32_t)res
);
196 uint64_t res
, tmp0
, tmp1
;
198 res
= (T0
>> 32) * (T1
>> 32);
199 tmp0
= ((T0
& 0xFFFFFFFF) * (T1
>> 32)) +
200 ((T0
>> 32) * (T1
& 0xFFFFFFFF));
201 tmp1
= (T0
& 0xFFFFFFFF) * (T1
& 0xFFFFFFFF);
205 if (unlikely(res
!= 0)) {
206 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
210 void helper_umulh (void)
214 tmp0
= ((T0
& 0xFFFFFFFF) * (T1
>> 32)) +
215 ((T0
>> 32) * (T1
& 0xFFFFFFFF));
216 tmp1
= (T0
& 0xFFFFFFFF) * (T1
& 0xFFFFFFFF);
218 T0
= (T0
>> 32) * (T0
>> 32);
222 void helper_ctpop (void)
226 for (n
= 0; T0
!= 0; n
++)
231 void helper_ctlz (void)
237 if (!(T0
& 0xFFFFFFFF00000000ULL
)) {
241 /* Make it easier for 32 bits hosts */
243 if (!(op32
& 0xFFFF0000UL
)) {
247 if (!(op32
& 0xFF000000UL
)) {
251 if (!(op32
& 0xF0000000UL
)) {
255 if (!(op32
& 0xC0000000UL
)) {
259 if (!(op32
& 0x80000000UL
)) {
263 if (!(op32
& 0x80000000UL
)) {
269 void helper_cttz (void)
275 if (!(T0
& 0x00000000FFFFFFFFULL
)) {
279 /* Make it easier for 32 bits hosts */
281 if (!(op32
& 0x0000FFFFUL
)) {
285 if (!(op32
& 0x000000FFUL
)) {
289 if (!(op32
& 0x0000000FUL
)) {
293 if (!(op32
& 0x00000003UL
)) {
297 if (!(op32
& 0x00000001UL
)) {
301 if (!(op32
& 0x00000001UL
)) {
307 static inline uint64_t byte_zap (uint64_t op
, uint8_t mskb
)
312 mask
|= ((mskb
>> 0) & 1) * 0x00000000000000FFULL
;
313 mask
|= ((mskb
>> 1) & 1) * 0x000000000000FF00ULL
;
314 mask
|= ((mskb
>> 2) & 1) * 0x0000000000FF0000ULL
;
315 mask
|= ((mskb
>> 3) & 1) * 0x00000000FF000000ULL
;
316 mask
|= ((mskb
>> 4) & 1) * 0x000000FF00000000ULL
;
317 mask
|= ((mskb
>> 5) & 1) * 0x0000FF0000000000ULL
;
318 mask
|= ((mskb
>> 6) & 1) * 0x00FF000000000000ULL
;
319 mask
|= ((mskb
>> 7) & 1) * 0xFF00000000000000ULL
;
324 void helper_mskbl (void)
326 T0
= byte_zap(T0
, 0x01 << (T1
& 7));
329 void helper_extbl (void)
332 T0
= byte_zap(T0
, 0xFE);
335 void helper_insbl (void)
338 T0
= byte_zap(T0
, ~(0x01 << (T1
& 7)));
341 void helper_mskwl (void)
343 T0
= byte_zap(T0
, 0x03 << (T1
& 7));
346 void helper_extwl (void)
349 T0
= byte_zap(T0
, 0xFC);
352 void helper_inswl (void)
355 T0
= byte_zap(T0
, ~(0x03 << (T1
& 7)));
358 void helper_mskll (void)
360 T0
= byte_zap(T0
, 0x0F << (T1
& 7));
363 void helper_extll (void)
366 T0
= byte_zap(T0
, 0xF0);
369 void helper_insll (void)
372 T0
= byte_zap(T0
, ~(0x0F << (T1
& 7)));
375 void helper_zap (void)
377 T0
= byte_zap(T0
, T1
);
380 void helper_zapnot (void)
382 T0
= byte_zap(T0
, ~T1
);
385 void helper_mskql (void)
387 T0
= byte_zap(T0
, 0xFF << (T1
& 7));
390 void helper_extql (void)
393 T0
= byte_zap(T0
, 0x00);
396 void helper_insql (void)
399 T0
= byte_zap(T0
, ~(0xFF << (T1
& 7)));
402 void helper_mskwh (void)
404 T0
= byte_zap(T0
, (0x03 << (T1
& 7)) >> 8);
407 void helper_inswh (void)
409 T0
>>= 64 - ((T1
& 7) * 8);
410 T0
= byte_zap(T0
, ~((0x03 << (T1
& 7)) >> 8));
413 void helper_extwh (void)
415 T0
<<= 64 - ((T1
& 7) * 8);
416 T0
= byte_zap(T0
, ~0x07);
419 void helper_msklh (void)
421 T0
= byte_zap(T0
, (0x0F << (T1
& 7)) >> 8);
424 void helper_inslh (void)
426 T0
>>= 64 - ((T1
& 7) * 8);
427 T0
= byte_zap(T0
, ~((0x0F << (T1
& 7)) >> 8));
430 void helper_extlh (void)
432 T0
<<= 64 - ((T1
& 7) * 8);
433 T0
= byte_zap(T0
, ~0x0F);
436 void helper_mskqh (void)
438 T0
= byte_zap(T0
, (0xFF << (T1
& 7)) >> 8);
441 void helper_insqh (void)
443 T0
>>= 64 - ((T1
& 7) * 8);
444 T0
= byte_zap(T0
, ~((0xFF << (T1
& 7)) >> 8));
447 void helper_extqh (void)
449 T0
<<= 64 - ((T1
& 7) * 8);
450 T0
= byte_zap(T0
, 0x00);
453 void helper_cmpbge (void)
455 uint8_t opa
, opb
, res
;
459 for (i
= 0; i
< 7; i
++) {
468 void helper_cmov_fir (int freg
)
471 env
->fir
[freg
] = FT1
;
474 void helper_sqrts (void)
476 FT0
= float32_sqrt(FT0
, &FP_STATUS
);
479 void helper_cpys (void)
488 r
.i
= p
.i
& 0x8000000000000000ULL
;
489 r
.i
|= q
.i
& ~0x8000000000000000ULL
;
493 void helper_cpysn (void)
502 r
.i
= (~p
.i
) & 0x8000000000000000ULL
;
503 r
.i
|= q
.i
& ~0x8000000000000000ULL
;
507 void helper_cpyse (void)
516 r
.i
= p
.i
& 0xFFF0000000000000ULL
;
517 r
.i
|= q
.i
& ~0xFFF0000000000000ULL
;
521 void helper_itofs (void)
529 FT0
= int64_to_float32(p
.i
, &FP_STATUS
);
532 void helper_ftois (void)
539 p
.i
= float32_to_int64(FT0
, &FP_STATUS
);
543 void helper_sqrtt (void)
545 FT0
= float64_sqrt(FT0
, &FP_STATUS
);
548 void helper_cmptun (void)
556 if (float64_is_nan(FT0
) || float64_is_nan(FT1
))
557 p
.i
= 0x4000000000000000ULL
;
561 void helper_cmpteq (void)
569 if (float64_eq(FT0
, FT1
, &FP_STATUS
))
570 p
.i
= 0x4000000000000000ULL
;
574 void helper_cmptle (void)
582 if (float64_le(FT0
, FT1
, &FP_STATUS
))
583 p
.i
= 0x4000000000000000ULL
;
587 void helper_cmptlt (void)
595 if (float64_lt(FT0
, FT1
, &FP_STATUS
))
596 p
.i
= 0x4000000000000000ULL
;
600 void helper_itoft (void)
608 FT0
= int64_to_float64(p
.i
, &FP_STATUS
);
611 void helper_ftoit (void)
618 p
.i
= float64_to_int64(FT0
, &FP_STATUS
);
622 static int vaxf_is_valid (float ff
)
631 exp
= (p
.i
>> 23) & 0xFF;
632 mant
= p
.i
& 0x007FFFFF;
633 if (exp
== 0 && ((p
.i
& 0x80000000) || mant
!= 0)) {
634 /* Reserved operands / Dirty zero */
641 static float vaxf_to_ieee32 (float ff
)
650 exp
= (p
.i
>> 23) & 0xFF;
661 static float ieee32_to_vaxf (float fi
)
670 exp
= (p
.i
>> 23) & 0xFF;
671 mant
= p
.i
& 0x007FFFFF;
673 /* NaN or infinity */
675 } else if (exp
== 0) {
695 void helper_addf (void)
699 if (!vaxf_is_valid(FT0
) || !vaxf_is_valid(FT1
)) {
702 ft0
= vaxf_to_ieee32(FT0
);
703 ft1
= vaxf_to_ieee32(FT1
);
704 ft2
= float32_add(ft0
, ft1
, &FP_STATUS
);
705 FT0
= ieee32_to_vaxf(ft2
);
708 void helper_subf (void)
712 if (!vaxf_is_valid(FT0
) || !vaxf_is_valid(FT1
)) {
715 ft0
= vaxf_to_ieee32(FT0
);
716 ft1
= vaxf_to_ieee32(FT1
);
717 ft2
= float32_sub(ft0
, ft1
, &FP_STATUS
);
718 FT0
= ieee32_to_vaxf(ft2
);
721 void helper_mulf (void)
725 if (!vaxf_is_valid(FT0
) || !vaxf_is_valid(FT1
)) {
728 ft0
= vaxf_to_ieee32(FT0
);
729 ft1
= vaxf_to_ieee32(FT1
);
730 ft2
= float32_mul(ft0
, ft1
, &FP_STATUS
);
731 FT0
= ieee32_to_vaxf(ft2
);
734 void helper_divf (void)
738 if (!vaxf_is_valid(FT0
) || !vaxf_is_valid(FT1
)) {
741 ft0
= vaxf_to_ieee32(FT0
);
742 ft1
= vaxf_to_ieee32(FT1
);
743 ft2
= float32_div(ft0
, ft1
, &FP_STATUS
);
744 FT0
= ieee32_to_vaxf(ft2
);
747 void helper_sqrtf (void)
751 if (!vaxf_is_valid(FT0
) || !vaxf_is_valid(FT1
)) {
754 ft0
= vaxf_to_ieee32(FT0
);
755 ft1
= float32_sqrt(ft0
, &FP_STATUS
);
756 FT0
= ieee32_to_vaxf(ft1
);
759 void helper_itoff (void)
764 static int vaxg_is_valid (double ff
)
773 exp
= (p
.i
>> 52) & 0x7FF;
774 mant
= p
.i
& 0x000FFFFFFFFFFFFFULL
;
775 if (exp
== 0 && ((p
.i
& 0x8000000000000000ULL
) || mant
!= 0)) {
776 /* Reserved operands / Dirty zero */
783 static double vaxg_to_ieee64 (double fg
)
792 exp
= (p
.i
>> 52) & 0x7FF;
803 static double ieee64_to_vaxg (double fi
)
813 exp
= (p
.i
>> 52) & 0x7FF;
814 mant
= p
.i
& 0x000FFFFFFFFFFFFFULL
;
816 /* NaN or infinity */
817 p
.i
= 1; /* VAX dirty zero */
818 } else if (exp
== 0) {
829 p
.i
= 1; /* VAX dirty zero */
838 void helper_addg (void)
840 double ft0
, ft1
, ft2
;
842 if (!vaxg_is_valid(FT0
) || !vaxg_is_valid(FT1
)) {
845 ft0
= vaxg_to_ieee64(FT0
);
846 ft1
= vaxg_to_ieee64(FT1
);
847 ft2
= float64_add(ft0
, ft1
, &FP_STATUS
);
848 FT0
= ieee64_to_vaxg(ft2
);
851 void helper_subg (void)
853 double ft0
, ft1
, ft2
;
855 if (!vaxg_is_valid(FT0
) || !vaxg_is_valid(FT1
)) {
858 ft0
= vaxg_to_ieee64(FT0
);
859 ft1
= vaxg_to_ieee64(FT1
);
860 ft2
= float64_sub(ft0
, ft1
, &FP_STATUS
);
861 FT0
= ieee64_to_vaxg(ft2
);
864 void helper_mulg (void)
866 double ft0
, ft1
, ft2
;
868 if (!vaxg_is_valid(FT0
) || !vaxg_is_valid(FT1
)) {
871 ft0
= vaxg_to_ieee64(FT0
);
872 ft1
= vaxg_to_ieee64(FT1
);
873 ft2
= float64_mul(ft0
, ft1
, &FP_STATUS
);
874 FT0
= ieee64_to_vaxg(ft2
);
877 void helper_divg (void)
879 double ft0
, ft1
, ft2
;
881 if (!vaxg_is_valid(FT0
) || !vaxg_is_valid(FT1
)) {
884 ft0
= vaxg_to_ieee64(FT0
);
885 ft1
= vaxg_to_ieee64(FT1
);
886 ft2
= float64_div(ft0
, ft1
, &FP_STATUS
);
887 FT0
= ieee64_to_vaxg(ft2
);
890 void helper_sqrtg (void)
894 if (!vaxg_is_valid(FT0
) || !vaxg_is_valid(FT1
)) {
897 ft0
= vaxg_to_ieee64(FT0
);
898 ft1
= float64_sqrt(ft0
, &FP_STATUS
);
899 FT0
= ieee64_to_vaxg(ft1
);
902 void helper_cmpgeq (void)
910 if (!vaxg_is_valid(FT0
) || !vaxg_is_valid(FT1
)) {
913 ft0
= vaxg_to_ieee64(FT0
);
914 ft1
= vaxg_to_ieee64(FT1
);
916 if (float64_eq(ft0
, ft1
, &FP_STATUS
))
917 p
.u
= 0x4000000000000000ULL
;
921 void helper_cmpglt (void)
929 if (!vaxg_is_valid(FT0
) || !vaxg_is_valid(FT1
)) {
932 ft0
= vaxg_to_ieee64(FT0
);
933 ft1
= vaxg_to_ieee64(FT1
);
935 if (float64_lt(ft0
, ft1
, &FP_STATUS
))
936 p
.u
= 0x4000000000000000ULL
;
940 void helper_cmpgle (void)
948 if (!vaxg_is_valid(FT0
) || !vaxg_is_valid(FT1
)) {
951 ft0
= vaxg_to_ieee64(FT0
);
952 ft1
= vaxg_to_ieee64(FT1
);
954 if (float64_le(ft0
, ft1
, &FP_STATUS
))
955 p
.u
= 0x4000000000000000ULL
;
959 void helper_cvtqs (void)
970 void helper_cvttq (void)
981 void helper_cvtqt (void)
992 void helper_cvtqf (void)
1000 FT0
= ieee32_to_vaxf(p
.u
);
1003 void helper_cvtgf (void)
1007 ft0
= vaxg_to_ieee64(FT0
);
1008 FT0
= ieee32_to_vaxf(ft0
);
1011 void helper_cvtgd (void)
1016 void helper_cvtgq (void)
1023 p
.u
= vaxg_to_ieee64(FT0
);
1027 void helper_cvtqg (void)
1035 FT0
= ieee64_to_vaxg(p
.u
);
1038 void helper_cvtdg (void)
1043 void helper_cvtlq (void)
1051 q
.u
= (p
.u
>> 29) & 0x3FFFFFFF;
1053 q
.u
= (int64_t)((int32_t)q
.u
);
1057 static inline void __helper_cvtql (int s
, int v
)
1065 q
.u
= ((uint64_t)(p
.u
& 0xC0000000)) << 32;
1066 q
.u
|= ((uint64_t)(p
.u
& 0x7FFFFFFF)) << 29;
1068 if (v
&& (int64_t)((int32_t)p
.u
) != (int64_t)p
.u
) {
1069 helper_excp(EXCP_ARITH
, EXCP_ARITH_OVERFLOW
);
1076 void helper_cvtql (void)
1078 __helper_cvtql(0, 0);
1081 void helper_cvtqlv (void)
1083 __helper_cvtql(0, 1);
1086 void helper_cvtqlsv (void)
1088 __helper_cvtql(1, 1);
1091 void helper_cmpfeq (void)
1093 if (float64_eq(FT0
, FT1
, &FP_STATUS
))
1099 void helper_cmpfne (void)
1101 if (float64_eq(FT0
, FT1
, &FP_STATUS
))
1107 void helper_cmpflt (void)
1109 if (float64_lt(FT0
, FT1
, &FP_STATUS
))
1115 void helper_cmpfle (void)
1117 if (float64_lt(FT0
, FT1
, &FP_STATUS
))
1123 void helper_cmpfgt (void)
1125 if (float64_le(FT0
, FT1
, &FP_STATUS
))
1131 void helper_cmpfge (void)
1133 if (float64_lt(FT0
, FT1
, &FP_STATUS
))
1139 #if !defined (CONFIG_USER_ONLY)
1140 void helper_mfpr (int iprn
)
1144 if (cpu_alpha_mfpr(env
, iprn
, &val
) == 0)
1148 void helper_mtpr (int iprn
)
1150 cpu_alpha_mtpr(env
, iprn
, T0
, NULL
);
1154 /*****************************************************************************/
1155 /* Softmmu support */
1156 #if !defined (CONFIG_USER_ONLY)
1158 #define GETPC() (__builtin_return_address(0))
1160 /* XXX: the two following helpers are pure hacks.
1161 * Hopefully, we emulate the PALcode, then we should never see
1162 * HW_LD / HW_ST instructions.
1164 void helper_ld_phys_to_virt (void)
1166 uint64_t tlb_addr
, physaddr
;
1170 is_user
= (env
->ps
>> 3) & 3;
1171 index
= (T0
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1173 tlb_addr
= env
->tlb_table
[is_user
][index
].addr_read
;
1174 if ((T0
& TARGET_PAGE_MASK
) ==
1175 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1176 physaddr
= T0
+ env
->tlb_table
[is_user
][index
].addend
;
1178 /* the page is not in the TLB : fill it */
1180 tlb_fill(T0
, 0, is_user
, retaddr
);
1186 void helper_st_phys_to_virt (void)
1188 uint64_t tlb_addr
, physaddr
;
1192 is_user
= (env
->ps
>> 3) & 3;
1193 index
= (T0
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1195 tlb_addr
= env
->tlb_table
[is_user
][index
].addr_write
;
1196 if ((T0
& TARGET_PAGE_MASK
) ==
1197 (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1198 physaddr
= T0
+ env
->tlb_table
[is_user
][index
].addend
;
1200 /* the page is not in the TLB : fill it */
1202 tlb_fill(T0
, 1, is_user
, retaddr
);
1208 #define MMUSUFFIX _mmu
1211 #include "softmmu_template.h"
1214 #include "softmmu_template.h"
1217 #include "softmmu_template.h"
1220 #include "softmmu_template.h"
1222 /* try to fill the TLB and return an exception if error. If retaddr is
1223 NULL, it means that the function was called in C code (i.e. not
1224 from generated code or from helper.c) */
1225 /* XXX: fix it to restore all registers */
1226 void tlb_fill (target_ulong addr
, int is_write
, int is_user
, void *retaddr
)
1228 TranslationBlock
*tb
;
1229 CPUState
*saved_env
;
1230 target_phys_addr_t pc
;
1233 /* XXX: hack to restore env in all cases, even if not called from
1236 env
= cpu_single_env
;
1237 ret
= cpu_alpha_handle_mmu_fault(env
, addr
, is_write
, is_user
, 1);
1238 if (!likely(ret
== 0)) {
1239 if (likely(retaddr
)) {
1240 /* now we have a real cpu fault */
1241 pc
= (target_phys_addr_t
)retaddr
;
1242 tb
= tb_find_pc(pc
);
1244 /* the PC is inside the translated code. It means that we have
1245 a virtual CPU fault */
1246 cpu_restore_state(tb
, env
, pc
, NULL
);
1249 /* Exception index and error code are already set */