2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DISAS(...) do { } while (0)
38 typedef struct DisasContext DisasContext
;
40 struct TranslationBlock
*tb
;
44 /* Current rounding mode for this TB. */
46 /* Current flush-to-zero setting for this TB. */
49 /* implver value for this CPU. */
52 bool singlestep_enabled
;
55 /* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
61 /* We have emitted one or more goto_tb. No fixup required. */
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
78 /* global register indexes */
79 static TCGv_ptr cpu_env
;
80 static TCGv cpu_ir
[31];
81 static TCGv cpu_fir
[31];
83 static TCGv cpu_lock_addr
;
84 static TCGv cpu_lock_st_addr
;
85 static TCGv cpu_lock_value
;
86 static TCGv cpu_unique
;
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_sysval
;
93 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
95 #include "exec/gen-icount.h"
97 void alpha_translate_init(void)
101 static int done_init
= 0;
106 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
109 for (i
= 0; i
< 31; i
++) {
110 sprintf(p
, "ir%d", i
);
111 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
112 offsetof(CPUAlphaState
, ir
[i
]), p
);
113 p
+= (i
< 10) ? 4 : 5;
115 sprintf(p
, "fir%d", i
);
116 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
117 offsetof(CPUAlphaState
, fir
[i
]), p
);
118 p
+= (i
< 10) ? 5 : 6;
121 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
122 offsetof(CPUAlphaState
, pc
), "pc");
124 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
125 offsetof(CPUAlphaState
, lock_addr
),
127 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
128 offsetof(CPUAlphaState
, lock_st_addr
),
130 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
131 offsetof(CPUAlphaState
, lock_value
),
134 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
135 offsetof(CPUAlphaState
, unique
), "unique");
136 #ifndef CONFIG_USER_ONLY
137 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
138 offsetof(CPUAlphaState
, sysval
), "sysval");
139 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
140 offsetof(CPUAlphaState
, usp
), "usp");
143 /* register helpers */
150 static void gen_excp_1(int exception
, int error_code
)
154 tmp1
= tcg_const_i32(exception
);
155 tmp2
= tcg_const_i32(error_code
);
156 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
157 tcg_temp_free_i32(tmp2
);
158 tcg_temp_free_i32(tmp1
);
161 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
163 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
164 gen_excp_1(exception
, error_code
);
165 return EXIT_NORETURN
;
168 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
170 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
173 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
175 TCGv tmp
= tcg_temp_new();
176 TCGv_i32 tmp32
= tcg_temp_new_i32();
177 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
178 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
179 gen_helper_memory_to_f(t0
, tmp32
);
180 tcg_temp_free_i32(tmp32
);
184 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
186 TCGv tmp
= tcg_temp_new();
187 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
188 gen_helper_memory_to_g(t0
, tmp
);
192 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
194 TCGv tmp
= tcg_temp_new();
195 TCGv_i32 tmp32
= tcg_temp_new_i32();
196 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
197 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
198 gen_helper_memory_to_s(t0
, tmp32
);
199 tcg_temp_free_i32(tmp32
);
203 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
205 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
206 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
207 tcg_gen_mov_i64(cpu_lock_value
, t0
);
210 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
212 tcg_gen_qemu_ld64(t0
, t1
, flags
);
213 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
214 tcg_gen_mov_i64(cpu_lock_value
, t0
);
217 static inline void gen_load_mem(DisasContext
*ctx
,
218 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
220 int ra
, int rb
, int32_t disp16
, int fp
,
225 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
226 prefetches, which we can treat as nops. No worries about
227 missed exceptions here. */
228 if (unlikely(ra
== 31)) {
232 addr
= tcg_temp_new();
234 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
236 tcg_gen_andi_i64(addr
, addr
, ~0x7);
242 tcg_gen_movi_i64(addr
, disp16
);
245 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
246 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
251 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
253 TCGv_i32 tmp32
= tcg_temp_new_i32();
254 TCGv tmp
= tcg_temp_new();
255 gen_helper_f_to_memory(tmp32
, t0
);
256 tcg_gen_extu_i32_i64(tmp
, tmp32
);
257 tcg_gen_qemu_st32(tmp
, t1
, flags
);
259 tcg_temp_free_i32(tmp32
);
262 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
264 TCGv tmp
= tcg_temp_new();
265 gen_helper_g_to_memory(tmp
, t0
);
266 tcg_gen_qemu_st64(tmp
, t1
, flags
);
270 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
272 TCGv_i32 tmp32
= tcg_temp_new_i32();
273 TCGv tmp
= tcg_temp_new();
274 gen_helper_s_to_memory(tmp32
, t0
);
275 tcg_gen_extu_i32_i64(tmp
, tmp32
);
276 tcg_gen_qemu_st32(tmp
, t1
, flags
);
278 tcg_temp_free_i32(tmp32
);
281 static inline void gen_store_mem(DisasContext
*ctx
,
282 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
284 int ra
, int rb
, int32_t disp16
, int fp
,
289 addr
= tcg_temp_new();
291 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
293 tcg_gen_andi_i64(addr
, addr
, ~0x7);
299 tcg_gen_movi_i64(addr
, disp16
);
303 va
= tcg_const_i64(0);
305 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
307 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
315 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
316 int32_t disp16
, int quad
)
321 /* ??? Don't bother storing anything. The user can't tell
322 the difference, since the zero register always reads zero. */
326 #if defined(CONFIG_USER_ONLY)
327 addr
= cpu_lock_st_addr
;
329 addr
= tcg_temp_local_new();
333 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
335 tcg_gen_movi_i64(addr
, disp16
);
338 #if defined(CONFIG_USER_ONLY)
339 /* ??? This is handled via a complicated version of compare-and-swap
340 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
341 in TCG so that this isn't necessary. */
342 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
344 /* ??? In system mode we are never multi-threaded, so CAS can be
345 implemented via a non-atomic load-compare-store sequence. */
347 int lab_fail
, lab_done
;
350 lab_fail
= gen_new_label();
351 lab_done
= gen_new_label();
352 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
354 val
= tcg_temp_new();
356 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
358 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
360 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
363 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
365 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
367 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
368 tcg_gen_br(lab_done
);
370 gen_set_label(lab_fail
);
371 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
373 gen_set_label(lab_done
);
374 tcg_gen_movi_i64(cpu_lock_addr
, -1);
382 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
384 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
386 && ((addr
>> 41) & 3) == 2
387 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
390 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
392 /* Suppress goto_tb in the case of single-steping and IO. */
393 if (ctx
->singlestep_enabled
|| (ctx
->tb
->cflags
& CF_LAST_IO
)) {
396 /* If the destination is in the superpage, the page perms can't change. */
397 if (in_superpage(ctx
, dest
)) {
400 /* Check for the dest on the same page as the start of the TB. */
401 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
404 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
406 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
409 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
412 /* Notice branch-to-next; used to initialize RA with the PC. */
415 } else if (use_goto_tb(ctx
, dest
)) {
417 tcg_gen_movi_i64(cpu_pc
, dest
);
418 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
421 tcg_gen_movi_i64(cpu_pc
, dest
);
422 return EXIT_PC_UPDATED
;
426 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
427 TCGv cmp
, int32_t disp
)
429 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
430 int lab_true
= gen_new_label();
432 if (use_goto_tb(ctx
, dest
)) {
433 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
436 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
437 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
439 gen_set_label(lab_true
);
441 tcg_gen_movi_i64(cpu_pc
, dest
);
442 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
446 TCGv_i64 z
= tcg_const_i64(0);
447 TCGv_i64 d
= tcg_const_i64(dest
);
448 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
450 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
452 tcg_temp_free_i64(z
);
453 tcg_temp_free_i64(d
);
454 tcg_temp_free_i64(p
);
455 return EXIT_PC_UPDATED
;
459 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
460 int32_t disp
, int mask
)
464 if (unlikely(ra
== 31)) {
465 cmp_tmp
= tcg_const_i64(0);
467 cmp_tmp
= tcg_temp_new();
469 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
471 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
475 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
478 /* Fold -0.0 for comparison with COND. */
480 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
482 uint64_t mzero
= 1ull << 63;
487 /* For <= or >, the -0.0 value directly compares the way we want. */
488 tcg_gen_mov_i64(dest
, src
);
493 /* For == or !=, we can simply mask off the sign bit and compare. */
494 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
499 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
500 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
501 tcg_gen_neg_i64(dest
, dest
);
502 tcg_gen_and_i64(dest
, dest
, src
);
510 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
515 if (unlikely(ra
== 31)) {
516 /* Very uncommon case, but easier to optimize it to an integer
517 comparison than continuing with the floating point comparison. */
518 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
521 cmp_tmp
= tcg_temp_new();
522 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
523 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
526 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
527 int islit
, uint8_t lit
, int mask
)
531 if (unlikely(rc
== 31)) {
536 /* Very uncommon case - Do not bother to optimize. */
537 c1
= tcg_const_i64(0);
539 c1
= tcg_const_i64(1);
540 tcg_gen_and_i64(c1
, c1
, cpu_ir
[ra
]);
545 v1
= tcg_const_i64(lit
);
549 z
= tcg_const_i64(0);
551 tcg_gen_movcond_i64(cond
, cpu_ir
[rc
], c1
, z
, v1
, cpu_ir
[rc
]);
553 tcg_temp_free_i64(z
);
554 if (ra
== 31 || mask
) {
555 tcg_temp_free_i64(c1
);
558 tcg_temp_free_i64(v1
);
562 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
566 if (unlikely(rc
== 31)) {
570 c1
= tcg_temp_new_i64();
571 if (unlikely(ra
== 31)) {
572 tcg_gen_movi_i64(c1
, 0);
574 gen_fold_mzero(cond
, c1
, cpu_fir
[ra
]);
577 v1
= tcg_const_i64(0);
581 z
= tcg_const_i64(0);
583 tcg_gen_movcond_i64(cond
, cpu_fir
[rc
], c1
, z
, v1
, cpu_fir
[rc
]);
585 tcg_temp_free_i64(z
);
586 tcg_temp_free_i64(c1
);
588 tcg_temp_free_i64(v1
);
592 #define QUAL_RM_N 0x080 /* Round mode nearest even */
593 #define QUAL_RM_C 0x000 /* Round mode chopped */
594 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
595 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
596 #define QUAL_RM_MASK 0x0c0
598 #define QUAL_U 0x100 /* Underflow enable (fp output) */
599 #define QUAL_V 0x100 /* Overflow enable (int output) */
600 #define QUAL_S 0x400 /* Software completion enable */
601 #define QUAL_I 0x200 /* Inexact detection enable */
603 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
607 fn11
&= QUAL_RM_MASK
;
608 if (fn11
== ctx
->tb_rm
) {
613 tmp
= tcg_temp_new_i32();
616 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
619 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
622 tcg_gen_movi_i32(tmp
, float_round_down
);
625 tcg_gen_ld8u_i32(tmp
, cpu_env
,
626 offsetof(CPUAlphaState
, fpcr_dyn_round
));
630 #if defined(CONFIG_SOFTFLOAT_INLINE)
631 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
632 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
633 sets the one field. */
634 tcg_gen_st8_i32(tmp
, cpu_env
,
635 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
637 gen_helper_setroundmode(tmp
);
640 tcg_temp_free_i32(tmp
);
643 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
648 if (fn11
== ctx
->tb_ftz
) {
653 tmp
= tcg_temp_new_i32();
655 /* Underflow is enabled, use the FPCR setting. */
656 tcg_gen_ld8u_i32(tmp
, cpu_env
,
657 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
659 /* Underflow is disabled, force flush-to-zero. */
660 tcg_gen_movi_i32(tmp
, 1);
663 #if defined(CONFIG_SOFTFLOAT_INLINE)
664 tcg_gen_st8_i32(tmp
, cpu_env
,
665 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
667 gen_helper_setflushzero(tmp
);
670 tcg_temp_free_i32(tmp
);
673 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
677 val
= tcg_const_i64(0);
679 if ((fn11
& QUAL_S
) == 0) {
681 gen_helper_ieee_input_cmp(cpu_env
, cpu_fir
[reg
]);
683 gen_helper_ieee_input(cpu_env
, cpu_fir
[reg
]);
686 val
= tcg_temp_new();
687 tcg_gen_mov_i64(val
, cpu_fir
[reg
]);
692 static void gen_fp_exc_clear(void)
694 #if defined(CONFIG_SOFTFLOAT_INLINE)
695 TCGv_i32 zero
= tcg_const_i32(0);
696 tcg_gen_st8_i32(zero
, cpu_env
,
697 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
698 tcg_temp_free_i32(zero
);
700 gen_helper_fp_exc_clear(cpu_env
);
704 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
706 /* ??? We ought to be able to do something with imprecise exceptions.
707 E.g. notice we're still in the trap shadow of something within the
708 TB and do not generate the code to signal the exception; end the TB
709 when an exception is forced to arrive, either by consumption of a
710 register value or TRAPB or EXCB. */
711 TCGv_i32 exc
= tcg_temp_new_i32();
714 #if defined(CONFIG_SOFTFLOAT_INLINE)
715 tcg_gen_ld8u_i32(exc
, cpu_env
,
716 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
718 gen_helper_fp_exc_get(exc
, cpu_env
);
722 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
725 /* ??? Pass in the regno of the destination so that the helper can
726 set EXC_MASK, which contains a bitmask of destination registers
727 that have caused arithmetic traps. A simple userspace emulation
728 does not require this. We do need it for a guest kernel's entArith,
729 or if we were to do something clever with imprecise exceptions. */
730 reg
= tcg_const_i32(rc
+ 32);
733 gen_helper_fp_exc_raise_s(cpu_env
, exc
, reg
);
735 gen_helper_fp_exc_raise(cpu_env
, exc
, reg
);
738 tcg_temp_free_i32(reg
);
739 tcg_temp_free_i32(exc
);
742 static inline void gen_fp_exc_raise(int rc
, int fn11
)
744 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
747 static void gen_fcvtlq(int rb
, int rc
)
749 if (unlikely(rc
== 31)) {
752 if (unlikely(rb
== 31)) {
753 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
755 TCGv tmp
= tcg_temp_new();
757 /* The arithmetic right shift here, plus the sign-extended mask below
758 yields a sign-extended result without an explicit ext32s_i64. */
759 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
760 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
761 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
762 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
763 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
769 static void gen_fcvtql(int rb
, int rc
)
771 if (unlikely(rc
== 31)) {
774 if (unlikely(rb
== 31)) {
775 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
777 TCGv tmp
= tcg_temp_new();
779 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
780 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
781 tcg_gen_shli_i64(tmp
, tmp
, 32);
782 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
783 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
789 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
792 int lab
= gen_new_label();
793 TCGv tmp
= tcg_temp_new();
795 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
796 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
797 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
804 #define FARITH2(name) \
805 static inline void glue(gen_f, name)(int rb, int rc) \
807 if (unlikely(rc == 31)) { \
811 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
813 TCGv tmp = tcg_const_i64(0); \
814 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
815 tcg_temp_free(tmp); \
819 /* ??? VAX instruction qualifiers ignored. */
827 static void gen_ieee_arith2(DisasContext
*ctx
,
828 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
829 int rb
, int rc
, int fn11
)
833 /* ??? This is wrong: the instruction is not a nop, it still may
835 if (unlikely(rc
== 31)) {
839 gen_qual_roundmode(ctx
, fn11
);
840 gen_qual_flushzero(ctx
, fn11
);
843 vb
= gen_ieee_input(rb
, fn11
, 0);
844 helper(cpu_fir
[rc
], cpu_env
, vb
);
847 gen_fp_exc_raise(rc
, fn11
);
850 #define IEEE_ARITH2(name) \
851 static inline void glue(gen_f, name)(DisasContext *ctx, \
852 int rb, int rc, int fn11) \
854 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
861 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
866 /* ??? This is wrong: the instruction is not a nop, it still may
868 if (unlikely(rc
== 31)) {
872 /* No need to set flushzero, since we have an integer output. */
874 vb
= gen_ieee_input(rb
, fn11
, 0);
876 /* Almost all integer conversions use cropped rounding, and most
877 also do not have integer overflow enabled. Special case that. */
880 gen_helper_cvttq_c(cpu_fir
[rc
], cpu_env
, vb
);
882 case QUAL_V
| QUAL_RM_C
:
883 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
884 ignore
= float_flag_inexact
;
886 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
887 gen_helper_cvttq_svic(cpu_fir
[rc
], cpu_env
, vb
);
890 gen_qual_roundmode(ctx
, fn11
);
891 gen_helper_cvttq(cpu_fir
[rc
], cpu_env
, vb
);
892 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
893 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
898 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
901 static void gen_ieee_intcvt(DisasContext
*ctx
,
902 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
903 int rb
, int rc
, int fn11
)
907 /* ??? This is wrong: the instruction is not a nop, it still may
909 if (unlikely(rc
== 31)) {
913 gen_qual_roundmode(ctx
, fn11
);
916 vb
= tcg_const_i64(0);
921 /* The only exception that can be raised by integer conversion
922 is inexact. Thus we only need to worry about exceptions when
923 inexact handling is requested. */
926 helper(cpu_fir
[rc
], cpu_env
, vb
);
927 gen_fp_exc_raise(rc
, fn11
);
929 helper(cpu_fir
[rc
], cpu_env
, vb
);
937 #define IEEE_INTCVT(name) \
938 static inline void glue(gen_f, name)(DisasContext *ctx, \
939 int rb, int rc, int fn11) \
941 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
946 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
951 if (unlikely(rc
== 31)) {
955 vmask
= tcg_const_i64(mask
);
965 va
= tcg_temp_new_i64();
966 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
968 tcg_gen_andc_i64(va
, vmask
, va
);
970 tcg_gen_and_i64(va
, va
, vmask
);
978 vb
= tcg_temp_new_i64();
979 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
982 switch (za
<< 1 | zb
) {
984 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
987 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
990 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
993 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
997 tcg_temp_free(vmask
);
1006 static inline void gen_fcpys(int ra
, int rb
, int rc
)
1008 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
1011 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
1013 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
1016 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1018 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1021 #define FARITH3(name) \
1022 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1026 if (unlikely(rc == 31)) { \
1030 va = tcg_const_i64(0); \
1035 vb = tcg_const_i64(0); \
1040 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1043 tcg_temp_free(va); \
1046 tcg_temp_free(vb); \
1050 /* ??? VAX instruction qualifiers ignored. */
1063 static void gen_ieee_arith3(DisasContext
*ctx
,
1064 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1065 int ra
, int rb
, int rc
, int fn11
)
1069 /* ??? This is wrong: the instruction is not a nop, it still may
1070 raise exceptions. */
1071 if (unlikely(rc
== 31)) {
1075 gen_qual_roundmode(ctx
, fn11
);
1076 gen_qual_flushzero(ctx
, fn11
);
1079 va
= gen_ieee_input(ra
, fn11
, 0);
1080 vb
= gen_ieee_input(rb
, fn11
, 0);
1081 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1085 gen_fp_exc_raise(rc
, fn11
);
1088 #define IEEE_ARITH3(name) \
1089 static inline void glue(gen_f, name)(DisasContext *ctx, \
1090 int ra, int rb, int rc, int fn11) \
1092 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1103 static void gen_ieee_compare(DisasContext
*ctx
,
1104 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1105 int ra
, int rb
, int rc
, int fn11
)
1109 /* ??? This is wrong: the instruction is not a nop, it still may
1110 raise exceptions. */
1111 if (unlikely(rc
== 31)) {
1117 va
= gen_ieee_input(ra
, fn11
, 1);
1118 vb
= gen_ieee_input(rb
, fn11
, 1);
1119 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1123 gen_fp_exc_raise(rc
, fn11
);
1126 #define IEEE_CMP3(name) \
1127 static inline void glue(gen_f, name)(DisasContext *ctx, \
1128 int ra, int rb, int rc, int fn11) \
1130 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1137 static inline uint64_t zapnot_mask(uint8_t lit
)
1142 for (i
= 0; i
< 8; ++i
) {
1144 mask
|= 0xffull
<< (i
* 8);
1149 /* Implement zapnot with an immediate operand, which expands to some
1150 form of immediate AND. This is a basic building block in the
1151 definition of many of the other byte manipulation instructions. */
1152 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1156 tcg_gen_movi_i64(dest
, 0);
1159 tcg_gen_ext8u_i64(dest
, src
);
1162 tcg_gen_ext16u_i64(dest
, src
);
1165 tcg_gen_ext32u_i64(dest
, src
);
1168 tcg_gen_mov_i64(dest
, src
);
1171 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1176 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1178 if (unlikely(rc
== 31))
1180 else if (unlikely(ra
== 31))
1181 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1183 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1185 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1188 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1190 if (unlikely(rc
== 31))
1192 else if (unlikely(ra
== 31))
1193 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1195 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1197 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1201 /* EXTWH, EXTLH, EXTQH */
1202 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1203 uint8_t lit
, uint8_t byte_mask
)
1205 if (unlikely(rc
== 31))
1207 else if (unlikely(ra
== 31))
1208 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1211 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1212 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1214 TCGv tmp1
= tcg_temp_new();
1215 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1216 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1217 tcg_gen_neg_i64(tmp1
, tmp1
);
1218 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1219 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1220 tcg_temp_free(tmp1
);
1222 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1226 /* EXTBL, EXTWL, EXTLL, EXTQL */
1227 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1228 uint8_t lit
, uint8_t byte_mask
)
1230 if (unlikely(rc
== 31))
1232 else if (unlikely(ra
== 31))
1233 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1236 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1238 TCGv tmp
= tcg_temp_new();
1239 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1240 tcg_gen_shli_i64(tmp
, tmp
, 3);
1241 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1244 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1248 /* INSWH, INSLH, INSQH */
1249 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1250 uint8_t lit
, uint8_t byte_mask
)
1252 if (unlikely(rc
== 31))
1254 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1255 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1257 TCGv tmp
= tcg_temp_new();
1259 /* The instruction description has us left-shift the byte mask
1260 and extract bits <15:8> and apply that zap at the end. This
1261 is equivalent to simply performing the zap first and shifting
1263 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1266 /* Note that we have handled the lit==0 case above. */
1267 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1269 TCGv shift
= tcg_temp_new();
1271 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1272 Do this portably by splitting the shift into two parts:
1273 shift_count-1 and 1. Arrange for the -1 by using
1274 ones-complement instead of twos-complement in the negation:
1275 ~((B & 7) * 8) & 63. */
1277 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1278 tcg_gen_shli_i64(shift
, shift
, 3);
1279 tcg_gen_not_i64(shift
, shift
);
1280 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1282 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1283 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1284 tcg_temp_free(shift
);
1290 /* INSBL, INSWL, INSLL, INSQL */
1291 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1292 uint8_t lit
, uint8_t byte_mask
)
1294 if (unlikely(rc
== 31))
1296 else if (unlikely(ra
== 31))
1297 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1299 TCGv tmp
= tcg_temp_new();
1301 /* The instruction description has us left-shift the byte mask
1302 the same number of byte slots as the data and apply the zap
1303 at the end. This is equivalent to simply performing the zap
1304 first and shifting afterward. */
1305 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1308 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1310 TCGv shift
= tcg_temp_new();
1311 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1312 tcg_gen_shli_i64(shift
, shift
, 3);
1313 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1314 tcg_temp_free(shift
);
1320 /* MSKWH, MSKLH, MSKQH */
1321 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1322 uint8_t lit
, uint8_t byte_mask
)
1324 if (unlikely(rc
== 31))
1326 else if (unlikely(ra
== 31))
1327 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1329 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1331 TCGv shift
= tcg_temp_new();
1332 TCGv mask
= tcg_temp_new();
1334 /* The instruction description is as above, where the byte_mask
1335 is shifted left, and then we extract bits <15:8>. This can be
1336 emulated with a right-shift on the expanded byte mask. This
1337 requires extra care because for an input <2:0> == 0 we need a
1338 shift of 64 bits in order to generate a zero. This is done by
1339 splitting the shift into two parts, the variable shift - 1
1340 followed by a constant 1 shift. The code we expand below is
1341 equivalent to ~((B & 7) * 8) & 63. */
1343 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1344 tcg_gen_shli_i64(shift
, shift
, 3);
1345 tcg_gen_not_i64(shift
, shift
);
1346 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1347 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1348 tcg_gen_shr_i64(mask
, mask
, shift
);
1349 tcg_gen_shri_i64(mask
, mask
, 1);
1351 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1353 tcg_temp_free(mask
);
1354 tcg_temp_free(shift
);
1358 /* MSKBL, MSKWL, MSKLL, MSKQL */
1359 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1360 uint8_t lit
, uint8_t byte_mask
)
1362 if (unlikely(rc
== 31))
1364 else if (unlikely(ra
== 31))
1365 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1367 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1369 TCGv shift
= tcg_temp_new();
1370 TCGv mask
= tcg_temp_new();
1372 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1373 tcg_gen_shli_i64(shift
, shift
, 3);
1374 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1375 tcg_gen_shl_i64(mask
, mask
, shift
);
1377 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1379 tcg_temp_free(mask
);
1380 tcg_temp_free(shift
);
1384 /* Code to call arith3 helpers */
1385 #define ARITH3(name) \
1386 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1389 if (unlikely(rc == 31)) \
1394 TCGv tmp = tcg_const_i64(lit); \
1395 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1396 tcg_temp_free(tmp); \
1398 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1400 TCGv tmp1 = tcg_const_i64(0); \
1402 TCGv tmp2 = tcg_const_i64(lit); \
1403 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1404 tcg_temp_free(tmp2); \
1406 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1407 tcg_temp_free(tmp1); \
1421 /* Code to call arith3 helpers */
1422 #define ARITH3_EX(name) \
1423 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1424 int islit, uint8_t lit) \
1426 if (unlikely(rc == 31)) { \
1431 TCGv tmp = tcg_const_i64(lit); \
1432 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1434 tcg_temp_free(tmp); \
1436 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1437 cpu_ir[ra], cpu_ir[rb]); \
1440 TCGv tmp1 = tcg_const_i64(0); \
1442 TCGv tmp2 = tcg_const_i64(lit); \
1443 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1444 tcg_temp_free(tmp2); \
1446 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1448 tcg_temp_free(tmp1); \
1458 #define MVIOP2(name) \
1459 static inline void glue(gen_, name)(int rb, int rc) \
1461 if (unlikely(rc == 31)) \
1463 if (unlikely(rb == 31)) \
1464 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1466 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1473 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1474 int islit
, uint8_t lit
)
1478 if (unlikely(rc
== 31)) {
1483 va
= tcg_const_i64(0);
1488 vb
= tcg_const_i64(lit
);
1493 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1503 static void gen_rx(int ra
, int set
)
1508 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1511 tmp
= tcg_const_i32(set
);
1512 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1513 tcg_temp_free_i32(tmp
);
1516 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1518 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1519 to internal cpu registers. */
1521 /* Unprivileged PAL call */
1522 if (palcode
>= 0x80 && palcode
< 0xC0) {
1526 /* No-op inside QEMU. */
1530 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1534 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1543 #ifndef CONFIG_USER_ONLY
1544 /* Privileged PAL code */
1545 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1549 /* No-op inside QEMU. */
1553 /* No-op inside QEMU. */
1557 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUAlphaState
, vptptr
));
1561 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1565 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1572 /* Note that we already know we're in kernel mode, so we know
1573 that PS only contains the 3 IPL bits. */
1574 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1576 /* But make sure and store only the 3 IPL bits from the user. */
1577 tmp
= tcg_temp_new();
1578 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1579 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1586 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1590 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1594 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1598 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1599 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1609 return gen_invalid(ctx
);
1612 #ifdef CONFIG_USER_ONLY
1613 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1616 TCGv pc
= tcg_const_i64(ctx
->pc
);
1617 TCGv entry
= tcg_const_i64(palcode
& 0x80
1618 ? 0x2000 + (palcode
- 0x80) * 64
1619 : 0x1000 + palcode
* 64);
1621 gen_helper_call_pal(cpu_env
, pc
, entry
);
1623 tcg_temp_free(entry
);
1626 /* Since the destination is running in PALmode, we don't really
1627 need the page permissions check. We'll see the existance of
1628 the page when we create the TB, and we'll flush all TBs if
1629 we change the PAL base register. */
1630 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1632 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
1633 return EXIT_GOTO_TB
;
1636 return EXIT_PC_UPDATED
;
1641 #ifndef CONFIG_USER_ONLY
1643 #define PR_BYTE 0x100000
1644 #define PR_LONG 0x200000
1646 static int cpu_pr_data(int pr
)
1649 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1650 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1651 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1652 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1653 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1654 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1655 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1656 case 7: return offsetof(CPUAlphaState
, palbr
);
1657 case 8: return offsetof(CPUAlphaState
, ptbr
);
1658 case 9: return offsetof(CPUAlphaState
, vptptr
);
1659 case 10: return offsetof(CPUAlphaState
, unique
);
1660 case 11: return offsetof(CPUAlphaState
, sysval
);
1661 case 12: return offsetof(CPUAlphaState
, usp
);
1664 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1666 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1669 return offsetof(CPUAlphaState
, alarm_expire
);
1674 static ExitStatus
gen_mfpr(int ra
, int regno
)
1676 int data
= cpu_pr_data(regno
);
1678 /* In our emulated PALcode, these processor registers have no
1679 side effects from reading. */
1684 /* Special help for VMTIME and WALLTIME. */
1685 if (regno
== 250 || regno
== 249) {
1686 void (*helper
)(TCGv
) = gen_helper_get_walltime
;
1688 helper
= gen_helper_get_vmtime
;
1694 return EXIT_PC_STALE
;
1701 /* The basic registers are data only, and unknown registers
1702 are read-zero, write-ignore. */
1704 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1705 } else if (data
& PR_BYTE
) {
1706 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1707 } else if (data
& PR_LONG
) {
1708 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1710 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1715 static ExitStatus
gen_mtpr(DisasContext
*ctx
, int rb
, int regno
)
1721 tmp
= tcg_const_i64(0);
1729 gen_helper_tbia(cpu_env
);
1734 gen_helper_tbis(cpu_env
, tmp
);
1739 tmp
= tcg_const_i64(1);
1740 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1741 offsetof(CPUState
, halted
));
1742 return gen_excp(ctx
, EXCP_HLT
, 0);
1746 gen_helper_halt(tmp
);
1747 return EXIT_PC_STALE
;
1751 gen_helper_set_alarm(cpu_env
, tmp
);
1756 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1757 /* Changing the PAL base register implies un-chaining all of the TBs
1758 that ended with a CALL_PAL. Since the base register usually only
1759 changes during boot, flushing everything works well. */
1760 gen_helper_tb_flush(cpu_env
);
1761 return EXIT_PC_STALE
;
1764 /* The basic registers are data only, and unknown registers
1765 are read-zero, write-ignore. */
1766 data
= cpu_pr_data(regno
);
1768 if (data
& PR_BYTE
) {
1769 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1770 } else if (data
& PR_LONG
) {
1771 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1773 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1785 #endif /* !USER_ONLY*/
1787 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1790 int32_t disp21
, disp16
;
1791 #ifndef CONFIG_USER_ONLY
1795 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1799 /* Decode all instruction fields */
1801 ra
= (insn
>> 21) & 0x1F;
1802 rb
= (insn
>> 16) & 0x1F;
1804 real_islit
= islit
= (insn
>> 12) & 1;
1805 if (rb
== 31 && !islit
) {
1809 lit
= (insn
>> 13) & 0xFF;
1810 palcode
= insn
& 0x03FFFFFF;
1811 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1812 disp16
= (int16_t)(insn
& 0x0000FFFF);
1813 #ifndef CONFIG_USER_ONLY
1814 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1816 fn11
= (insn
>> 5) & 0x000007FF;
1818 fn7
= (insn
>> 5) & 0x0000007F;
1819 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1820 opc
, ra
, rb
, rc
, disp16
);
1826 ret
= gen_call_pal(ctx
, palcode
);
1851 if (likely(ra
!= 31)) {
1853 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1855 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1860 if (likely(ra
!= 31)) {
1862 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1864 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1869 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1870 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1876 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1880 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1881 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1887 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1891 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1895 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1901 if (likely(rc
!= 31)) {
1904 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1905 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1907 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1908 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1912 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1914 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1920 if (likely(rc
!= 31)) {
1922 TCGv tmp
= tcg_temp_new();
1923 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1925 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1927 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1928 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1932 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1934 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1940 if (likely(rc
!= 31)) {
1943 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1945 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1946 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1949 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1951 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1952 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1958 if (likely(rc
!= 31)) {
1960 TCGv tmp
= tcg_temp_new();
1961 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1963 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1965 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1966 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1970 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1972 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1973 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1980 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1984 if (likely(rc
!= 31)) {
1986 TCGv tmp
= tcg_temp_new();
1987 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1989 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1991 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1992 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1996 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1998 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2004 if (likely(rc
!= 31)) {
2006 TCGv tmp
= tcg_temp_new();
2007 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2009 tcg_gen_subi_i64(tmp
, tmp
, lit
);
2011 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
2012 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
2016 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2018 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2019 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2026 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
2030 if (likely(rc
!= 31)) {
2033 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2035 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2038 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2040 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2046 if (likely(rc
!= 31)) {
2048 TCGv tmp
= tcg_temp_new();
2049 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
2051 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2053 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2057 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2059 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2065 if (likely(rc
!= 31)) {
2068 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2070 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2073 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2075 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2081 if (likely(rc
!= 31)) {
2083 TCGv tmp
= tcg_temp_new();
2084 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
2086 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2088 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2092 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2094 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2100 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
2104 if (likely(rc
!= 31)) {
2106 TCGv tmp
= tcg_temp_new();
2107 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2109 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2111 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2115 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2117 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2123 if (likely(rc
!= 31)) {
2125 TCGv tmp
= tcg_temp_new();
2126 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2128 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2130 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2134 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2136 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2142 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2146 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2150 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2154 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2158 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2162 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2166 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2176 if (likely(rc
!= 31)) {
2178 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2180 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2182 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2187 if (likely(rc
!= 31)) {
2190 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2192 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2194 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2199 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2203 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2207 if (likely(rc
!= 31)) {
2210 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2212 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2215 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2217 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2223 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2227 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2231 if (likely(rc
!= 31)) {
2234 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2236 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2239 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2241 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2247 if (likely(rc
!= 31)) {
2250 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2252 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2255 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2257 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2263 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2267 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2271 if (likely(rc
!= 31)) {
2274 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2276 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2279 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2281 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2287 if (likely(rc
!= 31)) {
2288 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2291 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2293 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2299 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2303 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2308 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->implver
);
2319 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2323 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2327 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2331 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2335 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2339 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2343 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2347 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2351 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2355 gen_zap(ra
, rb
, rc
, islit
, lit
);
2359 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2363 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2367 if (likely(rc
!= 31)) {
2370 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2372 TCGv shift
= tcg_temp_new();
2373 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2374 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2375 tcg_temp_free(shift
);
2378 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2383 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2387 if (likely(rc
!= 31)) {
2390 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2392 TCGv shift
= tcg_temp_new();
2393 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2394 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2395 tcg_temp_free(shift
);
2398 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2403 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2407 if (likely(rc
!= 31)) {
2410 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2412 TCGv shift
= tcg_temp_new();
2413 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2414 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2415 tcg_temp_free(shift
);
2418 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2423 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2427 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2431 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2435 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2439 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2443 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2447 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2451 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2455 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2465 if (likely(rc
!= 31)) {
2467 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2470 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2472 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2473 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2479 if (likely(rc
!= 31)) {
2481 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2483 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2485 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2492 if (unlikely(rc
== 31)){
2496 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2499 low
= tcg_temp_new();
2501 tcg_gen_movi_tl(low
, lit
);
2502 tcg_gen_mulu2_i64(low
, cpu_ir
[rc
], cpu_ir
[ra
], low
);
2504 tcg_gen_mulu2_i64(low
, cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2511 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2515 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2522 switch (fpfn
) { /* fn11 & 0x3F */
2525 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2528 if (likely(rc
!= 31)) {
2530 TCGv_i32 tmp
= tcg_temp_new_i32();
2531 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2532 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2533 tcg_temp_free_i32(tmp
);
2535 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2540 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2547 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2548 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2554 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2557 if (likely(rc
!= 31)) {
2559 TCGv_i32 tmp
= tcg_temp_new_i32();
2560 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2561 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2562 tcg_temp_free_i32(tmp
);
2564 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2569 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2572 if (likely(rc
!= 31)) {
2574 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2576 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2581 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2588 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2589 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2598 /* VAX floating point */
2599 /* XXX: rounding mode and trap are ignored (!) */
2600 switch (fpfn
) { /* fn11 & 0x3F */
2603 gen_faddf(ra
, rb
, rc
);
2607 gen_fsubf(ra
, rb
, rc
);
2611 gen_fmulf(ra
, rb
, rc
);
2615 gen_fdivf(ra
, rb
, rc
);
2627 gen_faddg(ra
, rb
, rc
);
2631 gen_fsubg(ra
, rb
, rc
);
2635 gen_fmulg(ra
, rb
, rc
);
2639 gen_fdivg(ra
, rb
, rc
);
2643 gen_fcmpgeq(ra
, rb
, rc
);
2647 gen_fcmpglt(ra
, rb
, rc
);
2651 gen_fcmpgle(ra
, rb
, rc
);
2682 /* IEEE floating-point */
2683 switch (fpfn
) { /* fn11 & 0x3F */
2686 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2690 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2694 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2698 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2702 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2706 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2710 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2714 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2718 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2722 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2726 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2730 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2733 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2735 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2738 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2743 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2747 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2751 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2764 if (likely(rc
!= 31)) {
2768 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2770 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2773 gen_fcpys(ra
, rb
, rc
);
2779 gen_fcpysn(ra
, rb
, rc
);
2783 gen_fcpyse(ra
, rb
, rc
);
2787 if (likely(ra
!= 31))
2788 gen_helper_store_fpcr(cpu_env
, cpu_fir
[ra
]);
2790 TCGv tmp
= tcg_const_i64(0);
2791 gen_helper_store_fpcr(cpu_env
, tmp
);
2797 if (likely(ra
!= 31))
2798 gen_helper_load_fpcr(cpu_fir
[ra
], cpu_env
);
2802 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2806 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2810 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2814 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2818 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2822 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2832 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2833 /v doesn't do. The only thing I can think is that /sv is a
2834 valid instruction merely for completeness in the ISA. */
2835 gen_fcvtql_v(ctx
, rb
, rc
);
2842 switch ((uint16_t)disp16
) {
2872 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2874 ret
= EXIT_PC_STALE
;
2876 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2900 /* HW_MFPR (PALcode) */
2901 #ifndef CONFIG_USER_ONLY
2902 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2903 return gen_mfpr(ra
, insn
& 0xffff);
2908 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2909 prediction stack action, which of course we don't implement. */
2911 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2913 tcg_gen_movi_i64(cpu_pc
, 0);
2916 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2918 ret
= EXIT_PC_UPDATED
;
2921 /* HW_LD (PALcode) */
2922 #ifndef CONFIG_USER_ONLY
2923 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2930 addr
= tcg_temp_new();
2932 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2934 tcg_gen_movi_i64(addr
, disp12
);
2935 switch ((insn
>> 12) & 0xF) {
2937 /* Longword physical access (hw_ldl/p) */
2938 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2941 /* Quadword physical access (hw_ldq/p) */
2942 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2945 /* Longword physical access with lock (hw_ldl_l/p) */
2946 gen_helper_ldl_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2949 /* Quadword physical access with lock (hw_ldq_l/p) */
2950 gen_helper_ldq_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2953 /* Longword virtual PTE fetch (hw_ldl/v) */
2956 /* Quadword virtual PTE fetch (hw_ldq/v) */
2960 /* Incpu_ir[ra]id */
2963 /* Incpu_ir[ra]id */
2966 /* Longword virtual access (hw_ldl) */
2969 /* Quadword virtual access (hw_ldq) */
2972 /* Longword virtual access with protection check (hw_ldl/w) */
2973 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2976 /* Quadword virtual access with protection check (hw_ldq/w) */
2977 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2980 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2983 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2986 /* Longword virtual access with alternate access mode and
2987 protection checks (hw_ldl/wa) */
2988 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2991 /* Quadword virtual access with alternate access mode and
2992 protection checks (hw_ldq/wa) */
2993 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2996 tcg_temp_free(addr
);
3005 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
3008 if (likely(rc
!= 31)) {
3010 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
3012 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
3017 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
3018 if (likely(rc
!= 31)) {
3020 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
3022 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
3030 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3031 if (likely(rc
!= 31)) {
3033 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
3035 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
3043 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3044 gen_perr(ra
, rb
, rc
, islit
, lit
);
3050 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3051 if (likely(rc
!= 31)) {
3053 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
3055 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
3063 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3064 if (likely(rc
!= 31)) {
3066 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
3068 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
3076 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3077 if (real_islit
|| ra
!= 31) {
3086 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3087 if (real_islit
|| ra
!= 31) {
3096 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3097 if (real_islit
|| ra
!= 31) {
3106 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3107 if (real_islit
|| ra
!= 31) {
3116 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3117 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
3123 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3124 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
3130 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3131 gen_minub8(ra
, rb
, rc
, islit
, lit
);
3137 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3138 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
3144 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3145 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
3151 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3152 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3158 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3159 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3165 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3166 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3172 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3175 if (likely(rc
!= 31)) {
3177 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3179 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3184 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3188 TCGv_i32 tmp1
= tcg_temp_new_i32();
3190 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3192 TCGv tmp2
= tcg_const_i64(0);
3193 gen_helper_s_to_memory(tmp1
, tmp2
);
3194 tcg_temp_free(tmp2
);
3196 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3197 tcg_temp_free_i32(tmp1
);
3205 /* HW_MTPR (PALcode) */
3206 #ifndef CONFIG_USER_ONLY
3207 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3208 return gen_mtpr(ctx
, rb
, insn
& 0xffff);
3213 /* HW_RET (PALcode) */
3214 #ifndef CONFIG_USER_ONLY
3215 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3217 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3218 address from EXC_ADDR. This turns out to be useful for our
3219 emulation PALcode, so continue to accept it. */
3220 TCGv tmp
= tcg_temp_new();
3221 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
3222 gen_helper_hw_ret(cpu_env
, tmp
);
3225 gen_helper_hw_ret(cpu_env
, cpu_ir
[rb
]);
3227 ret
= EXIT_PC_UPDATED
;
3233 /* HW_ST (PALcode) */
3234 #ifndef CONFIG_USER_ONLY
3235 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3237 addr
= tcg_temp_new();
3239 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3241 tcg_gen_movi_i64(addr
, disp12
);
3245 val
= tcg_temp_new();
3246 tcg_gen_movi_i64(val
, 0);
3248 switch ((insn
>> 12) & 0xF) {
3250 /* Longword physical access */
3251 gen_helper_stl_phys(addr
, val
);
3254 /* Quadword physical access */
3255 gen_helper_stq_phys(addr
, val
);
3258 /* Longword physical access with lock */
3259 gen_helper_stl_c_phys(val
, cpu_env
, addr
, val
);
3262 /* Quadword physical access with lock */
3263 gen_helper_stq_c_phys(val
, cpu_env
, addr
, val
);
3266 /* Longword virtual access */
3269 /* Quadword virtual access */
3290 /* Longword virtual access with alternate access mode */
3293 /* Quadword virtual access with alternate access mode */
3304 tcg_temp_free(addr
);
3311 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3315 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3319 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3323 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3327 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3331 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3335 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3339 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3343 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3347 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3351 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3355 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3359 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3363 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3367 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3371 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3375 ret
= gen_bdirect(ctx
, ra
, disp21
);
3377 case 0x31: /* FBEQ */
3378 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3380 case 0x32: /* FBLT */
3381 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3383 case 0x33: /* FBLE */
3384 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3388 ret
= gen_bdirect(ctx
, ra
, disp21
);
3390 case 0x35: /* FBNE */
3391 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3393 case 0x36: /* FBGE */
3394 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3396 case 0x37: /* FBGT */
3397 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3401 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3405 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3409 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3413 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3417 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3421 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3425 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3429 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3432 ret
= gen_invalid(ctx
);
3439 static inline void gen_intermediate_code_internal(AlphaCPU
*cpu
,
3440 TranslationBlock
*tb
,
3443 CPUState
*cs
= CPU(cpu
);
3444 CPUAlphaState
*env
= &cpu
->env
;
3445 DisasContext ctx
, *ctxp
= &ctx
;
3446 target_ulong pc_start
;
3447 target_ulong pc_mask
;
3449 uint16_t *gen_opc_end
;
3457 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
3461 ctx
.mem_idx
= cpu_mmu_index(env
);
3462 ctx
.implver
= env
->implver
;
3463 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
3465 /* ??? Every TB begins with unset rounding mode, to be initialized on
3466 the first fp insn of the TB. Alternately we could define a proper
3467 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3468 to reset the FP_STATUS to that default at the end of any TB that
3469 changes the default. We could even (gasp) dynamiclly figure out
3470 what default would be most efficient given the running program. */
3472 /* Similarly for flush-to-zero. */
3476 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3477 if (max_insns
== 0) {
3478 max_insns
= CF_COUNT_MASK
;
3481 if (in_superpage(&ctx
, pc_start
)) {
3482 pc_mask
= (1ULL << 41) - 1;
3484 pc_mask
= ~TARGET_PAGE_MASK
;
3489 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3490 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3491 if (bp
->pc
== ctx
.pc
) {
3492 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3498 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3502 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3504 tcg_ctx
.gen_opc_pc
[lj
] = ctx
.pc
;
3505 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
3506 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
3508 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3510 insn
= cpu_ldl_code(env
, ctx
.pc
);
3513 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
3514 tcg_gen_debug_insn_start(ctx
.pc
);
3518 ret
= translate_one(ctxp
, insn
);
3520 /* If we reach a page boundary, are single stepping,
3521 or exhaust instruction count, stop generation. */
3523 && ((ctx
.pc
& pc_mask
) == 0
3524 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
3525 || num_insns
>= max_insns
3527 || ctx
.singlestep_enabled
)) {
3528 ret
= EXIT_PC_STALE
;
3530 } while (ret
== NO_EXIT
);
3532 if (tb
->cflags
& CF_LAST_IO
) {
3541 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3543 case EXIT_PC_UPDATED
:
3544 if (ctx
.singlestep_enabled
) {
3545 gen_excp_1(EXCP_DEBUG
, 0);
3554 gen_tb_end(tb
, num_insns
);
3555 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
3557 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3560 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3562 tb
->size
= ctx
.pc
- pc_start
;
3563 tb
->icount
= num_insns
;
3567 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3568 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3569 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 1);
3575 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3577 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, false);
3580 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3582 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, true);
3585 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
3587 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];