2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/translator.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 DisasContextBase base
;
48 #ifndef CONFIG_USER_ONLY
54 /* implver and amask values for this CPU. */
58 /* Current rounding mode for this TB. */
60 /* Current flush-to-zero setting for this TB. */
63 /* The set of registers active in the current context. */
66 /* Temporaries for $31 and $f31 as source and destination. */
69 /* Temporary for immediate constants. */
73 /* Target-specific return values from translate_one, indicating the
74 state of the TB. Note that DISAS_NEXT indicates that we are not
76 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
77 #define DISAS_PC_UPDATED DISAS_TARGET_1
78 #define DISAS_PC_STALE DISAS_TARGET_2
80 /* global register indexes */
81 static TCGv_env cpu_env
;
82 static TCGv cpu_std_ir
[31];
83 static TCGv cpu_fir
[31];
85 static TCGv cpu_lock_addr
;
86 static TCGv cpu_lock_value
;
88 #ifndef CONFIG_USER_ONLY
89 static TCGv cpu_pal_ir
[31];
92 #include "exec/gen-icount.h"
94 void alpha_translate_init(void)
96 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
98 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
99 static const GlobalVar vars
[] = {
107 /* Use the symbolic register names that match the disassembler. */
108 static const char greg_names
[31][4] = {
109 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
110 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
111 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
112 "t10", "t11", "ra", "t12", "at", "gp", "sp"
114 static const char freg_names
[31][4] = {
115 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
116 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
117 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
118 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
120 #ifndef CONFIG_USER_ONLY
121 static const char shadow_names
[8][8] = {
122 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
123 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
129 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
130 tcg_ctx
.tcg_env
= cpu_env
;
132 for (i
= 0; i
< 31; i
++) {
133 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
134 offsetof(CPUAlphaState
, ir
[i
]),
138 for (i
= 0; i
< 31; i
++) {
139 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
140 offsetof(CPUAlphaState
, fir
[i
]),
144 #ifndef CONFIG_USER_ONLY
145 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
146 for (i
= 0; i
< 8; i
++) {
147 int r
= (i
== 7 ? 25 : i
+ 8);
148 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
149 offsetof(CPUAlphaState
,
155 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
156 const GlobalVar
*v
= &vars
[i
];
157 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
161 static TCGv
load_zero(DisasContext
*ctx
)
163 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
164 ctx
->zero
= tcg_const_i64(0);
169 static TCGv
dest_sink(DisasContext
*ctx
)
171 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
172 ctx
->sink
= tcg_temp_new();
177 static void free_context_temps(DisasContext
*ctx
)
179 if (!TCGV_IS_UNUSED_I64(ctx
->sink
)) {
180 tcg_gen_discard_i64(ctx
->sink
);
181 tcg_temp_free(ctx
->sink
);
182 TCGV_UNUSED_I64(ctx
->sink
);
184 if (!TCGV_IS_UNUSED_I64(ctx
->zero
)) {
185 tcg_temp_free(ctx
->zero
);
186 TCGV_UNUSED_I64(ctx
->zero
);
188 if (!TCGV_IS_UNUSED_I64(ctx
->lit
)) {
189 tcg_temp_free(ctx
->lit
);
190 TCGV_UNUSED_I64(ctx
->lit
);
194 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
196 if (likely(reg
< 31)) {
199 return load_zero(ctx
);
203 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
204 uint8_t lit
, bool islit
)
207 ctx
->lit
= tcg_const_i64(lit
);
209 } else if (likely(reg
< 31)) {
212 return load_zero(ctx
);
216 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
218 if (likely(reg
< 31)) {
221 return dest_sink(ctx
);
225 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
227 if (likely(reg
< 31)) {
230 return load_zero(ctx
);
234 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
236 if (likely(reg
< 31)) {
239 return dest_sink(ctx
);
243 static int get_flag_ofs(unsigned shift
)
245 int ofs
= offsetof(CPUAlphaState
, flags
);
246 #ifdef HOST_WORDS_BIGENDIAN
247 ofs
+= 3 - (shift
/ 8);
254 static void ld_flag_byte(TCGv val
, unsigned shift
)
256 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
259 static void st_flag_byte(TCGv val
, unsigned shift
)
261 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
264 static void gen_excp_1(int exception
, int error_code
)
268 tmp1
= tcg_const_i32(exception
);
269 tmp2
= tcg_const_i32(error_code
);
270 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
271 tcg_temp_free_i32(tmp2
);
272 tcg_temp_free_i32(tmp1
);
275 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
277 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
278 gen_excp_1(exception
, error_code
);
279 return DISAS_NORETURN
;
282 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
284 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
287 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
289 TCGv_i32 tmp32
= tcg_temp_new_i32();
290 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
291 gen_helper_memory_to_f(t0
, tmp32
);
292 tcg_temp_free_i32(tmp32
);
295 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
297 TCGv tmp
= tcg_temp_new();
298 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
299 gen_helper_memory_to_g(t0
, tmp
);
303 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
305 TCGv_i32 tmp32
= tcg_temp_new_i32();
306 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
307 gen_helper_memory_to_s(t0
, tmp32
);
308 tcg_temp_free_i32(tmp32
);
311 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
313 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
314 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
315 tcg_gen_mov_i64(cpu_lock_value
, t0
);
318 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
320 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
321 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
322 tcg_gen_mov_i64(cpu_lock_value
, t0
);
325 static inline void gen_load_mem(DisasContext
*ctx
,
326 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
328 int ra
, int rb
, int32_t disp16
, bool fp
,
333 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
334 prefetches, which we can treat as nops. No worries about
335 missed exceptions here. */
336 if (unlikely(ra
== 31)) {
340 tmp
= tcg_temp_new();
341 addr
= load_gpr(ctx
, rb
);
344 tcg_gen_addi_i64(tmp
, addr
, disp16
);
348 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
352 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
353 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
358 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
360 TCGv_i32 tmp32
= tcg_temp_new_i32();
361 gen_helper_f_to_memory(tmp32
, t0
);
362 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
363 tcg_temp_free_i32(tmp32
);
366 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
368 TCGv tmp
= tcg_temp_new();
369 gen_helper_g_to_memory(tmp
, t0
);
370 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
374 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
376 TCGv_i32 tmp32
= tcg_temp_new_i32();
377 gen_helper_s_to_memory(tmp32
, t0
);
378 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
379 tcg_temp_free_i32(tmp32
);
382 static inline void gen_store_mem(DisasContext
*ctx
,
383 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
385 int ra
, int rb
, int32_t disp16
, bool fp
,
390 tmp
= tcg_temp_new();
391 addr
= load_gpr(ctx
, rb
);
394 tcg_gen_addi_i64(tmp
, addr
, disp16
);
398 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
402 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
403 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
408 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
409 int32_t disp16
, int mem_idx
,
412 TCGLabel
*lab_fail
, *lab_done
;
415 addr
= tcg_temp_new_i64();
416 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
417 free_context_temps(ctx
);
419 lab_fail
= gen_new_label();
420 lab_done
= gen_new_label();
421 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
422 tcg_temp_free_i64(addr
);
424 val
= tcg_temp_new_i64();
425 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
426 load_gpr(ctx
, ra
), mem_idx
, op
);
427 free_context_temps(ctx
);
430 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
432 tcg_temp_free_i64(val
);
433 tcg_gen_br(lab_done
);
435 gen_set_label(lab_fail
);
437 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
440 gen_set_label(lab_done
);
441 tcg_gen_movi_i64(cpu_lock_addr
, -1);
445 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
447 #ifndef CONFIG_USER_ONLY
448 return ((ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0
449 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
450 && ((addr
>> 41) & 3) == 2);
456 static bool use_exit_tb(DisasContext
*ctx
)
458 return ((ctx
->base
.tb
->cflags
& CF_LAST_IO
)
459 || ctx
->base
.singlestep_enabled
463 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
465 /* Suppress goto_tb in the case of single-steping and IO. */
466 if (unlikely(use_exit_tb(ctx
))) {
469 #ifndef CONFIG_USER_ONLY
470 /* If the destination is in the superpage, the page perms can't change. */
471 if (in_superpage(ctx
, dest
)) {
474 /* Check for the dest on the same page as the start of the TB. */
475 return ((ctx
->base
.tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
481 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
483 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
486 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
489 /* Notice branch-to-next; used to initialize RA with the PC. */
492 } else if (use_goto_tb(ctx
, dest
)) {
494 tcg_gen_movi_i64(cpu_pc
, dest
);
495 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
496 return DISAS_NORETURN
;
498 tcg_gen_movi_i64(cpu_pc
, dest
);
499 return DISAS_PC_UPDATED
;
503 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
504 TCGv cmp
, int32_t disp
)
506 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
507 TCGLabel
*lab_true
= gen_new_label();
509 if (use_goto_tb(ctx
, dest
)) {
510 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
513 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
514 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
516 gen_set_label(lab_true
);
518 tcg_gen_movi_i64(cpu_pc
, dest
);
519 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
+ 1);
521 return DISAS_NORETURN
;
523 TCGv_i64 z
= tcg_const_i64(0);
524 TCGv_i64 d
= tcg_const_i64(dest
);
525 TCGv_i64 p
= tcg_const_i64(ctx
->base
.pc_next
);
527 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
529 tcg_temp_free_i64(z
);
530 tcg_temp_free_i64(d
);
531 tcg_temp_free_i64(p
);
532 return DISAS_PC_UPDATED
;
536 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
537 int32_t disp
, int mask
)
540 TCGv tmp
= tcg_temp_new();
543 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
544 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
548 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
551 /* Fold -0.0 for comparison with COND. */
553 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
555 uint64_t mzero
= 1ull << 63;
560 /* For <= or >, the -0.0 value directly compares the way we want. */
561 tcg_gen_mov_i64(dest
, src
);
566 /* For == or !=, we can simply mask off the sign bit and compare. */
567 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
572 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
573 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
574 tcg_gen_neg_i64(dest
, dest
);
575 tcg_gen_and_i64(dest
, dest
, src
);
583 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
586 TCGv cmp_tmp
= tcg_temp_new();
589 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
590 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
591 tcg_temp_free(cmp_tmp
);
595 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
600 vb
= load_fpr(ctx
, rb
);
602 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
604 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
609 #define QUAL_RM_N 0x080 /* Round mode nearest even */
610 #define QUAL_RM_C 0x000 /* Round mode chopped */
611 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
612 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
613 #define QUAL_RM_MASK 0x0c0
615 #define QUAL_U 0x100 /* Underflow enable (fp output) */
616 #define QUAL_V 0x100 /* Overflow enable (int output) */
617 #define QUAL_S 0x400 /* Software completion enable */
618 #define QUAL_I 0x200 /* Inexact detection enable */
620 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
624 fn11
&= QUAL_RM_MASK
;
625 if (fn11
== ctx
->tb_rm
) {
630 tmp
= tcg_temp_new_i32();
633 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
636 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
639 tcg_gen_movi_i32(tmp
, float_round_down
);
642 tcg_gen_ld8u_i32(tmp
, cpu_env
,
643 offsetof(CPUAlphaState
, fpcr_dyn_round
));
647 #if defined(CONFIG_SOFTFLOAT_INLINE)
648 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
649 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
650 sets the one field. */
651 tcg_gen_st8_i32(tmp
, cpu_env
,
652 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
654 gen_helper_setroundmode(tmp
);
657 tcg_temp_free_i32(tmp
);
660 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
665 if (fn11
== ctx
->tb_ftz
) {
670 tmp
= tcg_temp_new_i32();
672 /* Underflow is enabled, use the FPCR setting. */
673 tcg_gen_ld8u_i32(tmp
, cpu_env
,
674 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
676 /* Underflow is disabled, force flush-to-zero. */
677 tcg_gen_movi_i32(tmp
, 1);
680 #if defined(CONFIG_SOFTFLOAT_INLINE)
681 tcg_gen_st8_i32(tmp
, cpu_env
,
682 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
684 gen_helper_setflushzero(tmp
);
687 tcg_temp_free_i32(tmp
);
690 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
694 if (unlikely(reg
== 31)) {
695 val
= load_zero(ctx
);
698 if ((fn11
& QUAL_S
) == 0) {
700 gen_helper_ieee_input_cmp(cpu_env
, val
);
702 gen_helper_ieee_input(cpu_env
, val
);
705 #ifndef CONFIG_USER_ONLY
706 /* In system mode, raise exceptions for denormals like real
707 hardware. In user mode, proceed as if the OS completion
708 handler is handling the denormal as per spec. */
709 gen_helper_ieee_input_s(cpu_env
, val
);
716 static void gen_fp_exc_raise(int rc
, int fn11
)
718 /* ??? We ought to be able to do something with imprecise exceptions.
719 E.g. notice we're still in the trap shadow of something within the
720 TB and do not generate the code to signal the exception; end the TB
721 when an exception is forced to arrive, either by consumption of a
722 register value or TRAPB or EXCB. */
726 if (!(fn11
& QUAL_U
)) {
727 /* Note that QUAL_U == QUAL_V, so ignore either. */
728 ignore
|= FPCR_UNF
| FPCR_IOV
;
730 if (!(fn11
& QUAL_I
)) {
733 ign
= tcg_const_i32(ignore
);
735 /* ??? Pass in the regno of the destination so that the helper can
736 set EXC_MASK, which contains a bitmask of destination registers
737 that have caused arithmetic traps. A simple userspace emulation
738 does not require this. We do need it for a guest kernel's entArith,
739 or if we were to do something clever with imprecise exceptions. */
740 reg
= tcg_const_i32(rc
+ 32);
742 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
744 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
747 tcg_temp_free_i32(reg
);
748 tcg_temp_free_i32(ign
);
751 static void gen_cvtlq(TCGv vc
, TCGv vb
)
753 TCGv tmp
= tcg_temp_new();
755 /* The arithmetic right shift here, plus the sign-extended mask below
756 yields a sign-extended result without an explicit ext32s_i64. */
757 tcg_gen_shri_i64(tmp
, vb
, 29);
758 tcg_gen_sari_i64(vc
, vb
, 32);
759 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
764 static void gen_ieee_arith2(DisasContext
*ctx
,
765 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
766 int rb
, int rc
, int fn11
)
770 gen_qual_roundmode(ctx
, fn11
);
771 gen_qual_flushzero(ctx
, fn11
);
773 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
774 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
776 gen_fp_exc_raise(rc
, fn11
);
779 #define IEEE_ARITH2(name) \
780 static inline void glue(gen_, name)(DisasContext *ctx, \
781 int rb, int rc, int fn11) \
783 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
790 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
794 /* No need to set flushzero, since we have an integer output. */
795 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
796 vc
= dest_fpr(ctx
, rc
);
798 /* Almost all integer conversions use cropped rounding;
799 special case that. */
800 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
801 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
803 gen_qual_roundmode(ctx
, fn11
);
804 gen_helper_cvttq(vc
, cpu_env
, vb
);
806 gen_fp_exc_raise(rc
, fn11
);
809 static void gen_ieee_intcvt(DisasContext
*ctx
,
810 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
811 int rb
, int rc
, int fn11
)
815 gen_qual_roundmode(ctx
, fn11
);
816 vb
= load_fpr(ctx
, rb
);
817 vc
= dest_fpr(ctx
, rc
);
819 /* The only exception that can be raised by integer conversion
820 is inexact. Thus we only need to worry about exceptions when
821 inexact handling is requested. */
823 helper(vc
, cpu_env
, vb
);
824 gen_fp_exc_raise(rc
, fn11
);
826 helper(vc
, cpu_env
, vb
);
830 #define IEEE_INTCVT(name) \
831 static inline void glue(gen_, name)(DisasContext *ctx, \
832 int rb, int rc, int fn11) \
834 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
839 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
841 TCGv vmask
= tcg_const_i64(mask
);
842 TCGv tmp
= tcg_temp_new_i64();
845 tcg_gen_andc_i64(tmp
, vmask
, va
);
847 tcg_gen_and_i64(tmp
, va
, vmask
);
850 tcg_gen_andc_i64(vc
, vb
, vmask
);
851 tcg_gen_or_i64(vc
, vc
, tmp
);
853 tcg_temp_free(vmask
);
857 static void gen_ieee_arith3(DisasContext
*ctx
,
858 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
859 int ra
, int rb
, int rc
, int fn11
)
863 gen_qual_roundmode(ctx
, fn11
);
864 gen_qual_flushzero(ctx
, fn11
);
866 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
867 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
868 vc
= dest_fpr(ctx
, rc
);
869 helper(vc
, cpu_env
, va
, vb
);
871 gen_fp_exc_raise(rc
, fn11
);
874 #define IEEE_ARITH3(name) \
875 static inline void glue(gen_, name)(DisasContext *ctx, \
876 int ra, int rb, int rc, int fn11) \
878 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
889 static void gen_ieee_compare(DisasContext
*ctx
,
890 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
891 int ra
, int rb
, int rc
, int fn11
)
895 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
896 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
897 vc
= dest_fpr(ctx
, rc
);
898 helper(vc
, cpu_env
, va
, vb
);
900 gen_fp_exc_raise(rc
, fn11
);
903 #define IEEE_CMP3(name) \
904 static inline void glue(gen_, name)(DisasContext *ctx, \
905 int ra, int rb, int rc, int fn11) \
907 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
914 static inline uint64_t zapnot_mask(uint8_t lit
)
919 for (i
= 0; i
< 8; ++i
) {
920 if ((lit
>> i
) & 1) {
921 mask
|= 0xffull
<< (i
* 8);
927 /* Implement zapnot with an immediate operand, which expands to some
928 form of immediate AND. This is a basic building block in the
929 definition of many of the other byte manipulation instructions. */
930 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
934 tcg_gen_movi_i64(dest
, 0);
937 tcg_gen_ext8u_i64(dest
, src
);
940 tcg_gen_ext16u_i64(dest
, src
);
943 tcg_gen_ext32u_i64(dest
, src
);
946 tcg_gen_mov_i64(dest
, src
);
949 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
954 /* EXTWH, EXTLH, EXTQH */
955 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
956 uint8_t lit
, uint8_t byte_mask
)
959 int pos
= (64 - lit
* 8) & 0x3f;
960 int len
= cto32(byte_mask
) * 8;
962 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
964 tcg_gen_movi_i64(vc
, 0);
967 TCGv tmp
= tcg_temp_new();
968 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
969 tcg_gen_neg_i64(tmp
, tmp
);
970 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
971 tcg_gen_shl_i64(vc
, va
, tmp
);
974 gen_zapnoti(vc
, vc
, byte_mask
);
977 /* EXTBL, EXTWL, EXTLL, EXTQL */
978 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
979 uint8_t lit
, uint8_t byte_mask
)
982 int pos
= (lit
& 7) * 8;
983 int len
= cto32(byte_mask
) * 8;
984 if (pos
+ len
>= 64) {
987 tcg_gen_extract_i64(vc
, va
, pos
, len
);
989 TCGv tmp
= tcg_temp_new();
990 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
991 tcg_gen_shli_i64(tmp
, tmp
, 3);
992 tcg_gen_shr_i64(vc
, va
, tmp
);
994 gen_zapnoti(vc
, vc
, byte_mask
);
998 /* INSWH, INSLH, INSQH */
999 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1000 uint8_t lit
, uint8_t byte_mask
)
1003 int pos
= 64 - (lit
& 7) * 8;
1004 int len
= cto32(byte_mask
) * 8;
1006 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
1008 tcg_gen_movi_i64(vc
, 0);
1011 TCGv tmp
= tcg_temp_new();
1012 TCGv shift
= tcg_temp_new();
1014 /* The instruction description has us left-shift the byte mask
1015 and extract bits <15:8> and apply that zap at the end. This
1016 is equivalent to simply performing the zap first and shifting
1018 gen_zapnoti(tmp
, va
, byte_mask
);
1020 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1021 portably by splitting the shift into two parts: shift_count-1 and 1.
1022 Arrange for the -1 by using ones-complement instead of
1023 twos-complement in the negation: ~(B * 8) & 63. */
1025 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1026 tcg_gen_not_i64(shift
, shift
);
1027 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1029 tcg_gen_shr_i64(vc
, tmp
, shift
);
1030 tcg_gen_shri_i64(vc
, vc
, 1);
1031 tcg_temp_free(shift
);
1036 /* INSBL, INSWL, INSLL, INSQL */
1037 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1038 uint8_t lit
, uint8_t byte_mask
)
1041 int pos
= (lit
& 7) * 8;
1042 int len
= cto32(byte_mask
) * 8;
1043 if (pos
+ len
> 64) {
1046 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1048 TCGv tmp
= tcg_temp_new();
1049 TCGv shift
= tcg_temp_new();
1051 /* The instruction description has us left-shift the byte mask
1052 and extract bits <15:8> and apply that zap at the end. This
1053 is equivalent to simply performing the zap first and shifting
1055 gen_zapnoti(tmp
, va
, byte_mask
);
1057 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1058 tcg_gen_shli_i64(shift
, shift
, 3);
1059 tcg_gen_shl_i64(vc
, tmp
, shift
);
1060 tcg_temp_free(shift
);
1065 /* MSKWH, MSKLH, MSKQH */
1066 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1067 uint8_t lit
, uint8_t byte_mask
)
1070 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1072 TCGv shift
= tcg_temp_new();
1073 TCGv mask
= tcg_temp_new();
1075 /* The instruction description is as above, where the byte_mask
1076 is shifted left, and then we extract bits <15:8>. This can be
1077 emulated with a right-shift on the expanded byte mask. This
1078 requires extra care because for an input <2:0> == 0 we need a
1079 shift of 64 bits in order to generate a zero. This is done by
1080 splitting the shift into two parts, the variable shift - 1
1081 followed by a constant 1 shift. The code we expand below is
1082 equivalent to ~(B * 8) & 63. */
1084 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1085 tcg_gen_not_i64(shift
, shift
);
1086 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1087 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1088 tcg_gen_shr_i64(mask
, mask
, shift
);
1089 tcg_gen_shri_i64(mask
, mask
, 1);
1091 tcg_gen_andc_i64(vc
, va
, mask
);
1093 tcg_temp_free(mask
);
1094 tcg_temp_free(shift
);
1098 /* MSKBL, MSKWL, MSKLL, MSKQL */
1099 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1100 uint8_t lit
, uint8_t byte_mask
)
1103 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1105 TCGv shift
= tcg_temp_new();
1106 TCGv mask
= tcg_temp_new();
1108 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1109 tcg_gen_shli_i64(shift
, shift
, 3);
1110 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1111 tcg_gen_shl_i64(mask
, mask
, shift
);
1113 tcg_gen_andc_i64(vc
, va
, mask
);
1115 tcg_temp_free(mask
);
1116 tcg_temp_free(shift
);
1120 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1125 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1128 tmp
= tcg_const_i64(set
);
1129 st_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1133 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1135 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1136 to internal cpu registers. */
1138 /* Unprivileged PAL call */
1139 if (palcode
>= 0x80 && palcode
< 0xC0) {
1143 /* No-op inside QEMU. */
1147 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1148 offsetof(CPUAlphaState
, unique
));
1152 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1153 offsetof(CPUAlphaState
, unique
));
1162 #ifndef CONFIG_USER_ONLY
1163 /* Privileged PAL code */
1164 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1168 /* No-op inside QEMU. */
1172 /* No-op inside QEMU. */
1176 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1177 offsetof(CPUAlphaState
, vptptr
));
1181 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1182 offsetof(CPUAlphaState
, sysval
));
1186 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1187 offsetof(CPUAlphaState
, sysval
));
1192 /* Note that we already know we're in kernel mode, so we know
1193 that PS only contains the 3 IPL bits. */
1194 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1196 /* But make sure and store only the 3 IPL bits from the user. */
1198 TCGv tmp
= tcg_temp_new();
1199 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1200 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1204 /* Allow interrupts to be recognized right away. */
1205 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1206 return DISAS_PC_UPDATED_NOCHAIN
;
1210 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1215 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1216 offsetof(CPUAlphaState
, usp
));
1220 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1221 offsetof(CPUAlphaState
, usp
));
1225 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1226 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1232 TCGv_i32 tmp
= tcg_const_i32(1);
1233 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1234 offsetof(CPUState
, halted
));
1235 tcg_temp_free_i32(tmp
);
1237 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1238 return gen_excp(ctx
, EXCP_HALTED
, 0);
1247 return gen_invalid(ctx
);
1250 #ifdef CONFIG_USER_ONLY
1251 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1254 TCGv tmp
= tcg_temp_new();
1255 uint64_t exc_addr
= ctx
->base
.pc_next
;
1256 uint64_t entry
= ctx
->palbr
;
1258 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1261 tcg_gen_movi_i64(tmp
, 1);
1262 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1265 tcg_gen_movi_i64(tmp
, exc_addr
);
1266 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1269 entry
+= (palcode
& 0x80
1270 ? 0x2000 + (palcode
- 0x80) * 64
1271 : 0x1000 + palcode
* 64);
1273 /* Since the destination is running in PALmode, we don't really
1274 need the page permissions check. We'll see the existence of
1275 the page when we create the TB, and we'll flush all TBs if
1276 we change the PAL base register. */
1277 if (!use_exit_tb(ctx
)) {
1279 tcg_gen_movi_i64(cpu_pc
, entry
);
1280 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
1281 return DISAS_NORETURN
;
1283 tcg_gen_movi_i64(cpu_pc
, entry
);
1284 return DISAS_PC_UPDATED
;
1290 #ifndef CONFIG_USER_ONLY
1292 #define PR_LONG 0x200000
1294 static int cpu_pr_data(int pr
)
1297 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1298 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1299 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1300 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1301 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1302 case 7: return offsetof(CPUAlphaState
, palbr
);
1303 case 8: return offsetof(CPUAlphaState
, ptbr
);
1304 case 9: return offsetof(CPUAlphaState
, vptptr
);
1305 case 10: return offsetof(CPUAlphaState
, unique
);
1306 case 11: return offsetof(CPUAlphaState
, sysval
);
1307 case 12: return offsetof(CPUAlphaState
, usp
);
1310 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1313 return offsetof(CPUAlphaState
, alarm_expire
);
1318 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1320 void (*helper
)(TCGv
);
1325 /* Accessing the "non-shadow" general registers. */
1326 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1327 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1330 case 250: /* WALLTIME */
1331 helper
= gen_helper_get_walltime
;
1333 case 249: /* VMTIME */
1334 helper
= gen_helper_get_vmtime
;
1340 return DISAS_PC_STALE
;
1347 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1350 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1354 /* The basic registers are data only, and unknown registers
1355 are read-zero, write-ignore. */
1356 data
= cpu_pr_data(regno
);
1358 tcg_gen_movi_i64(va
, 0);
1359 } else if (data
& PR_LONG
) {
1360 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1362 tcg_gen_ld_i64(va
, cpu_env
, data
);
1370 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1377 gen_helper_tbia(cpu_env
);
1382 gen_helper_tbis(cpu_env
, vb
);
1388 TCGv_i32 tmp
= tcg_const_i32(1);
1389 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1390 offsetof(CPUState
, halted
));
1391 tcg_temp_free_i32(tmp
);
1393 return gen_excp(ctx
, EXCP_HALTED
, 0);
1397 gen_helper_halt(vb
);
1398 return DISAS_PC_STALE
;
1402 gen_helper_set_alarm(cpu_env
, vb
);
1407 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1408 /* Changing the PAL base register implies un-chaining all of the TBs
1409 that ended with a CALL_PAL. Since the base register usually only
1410 changes during boot, flushing everything works well. */
1411 gen_helper_tb_flush(cpu_env
);
1412 return DISAS_PC_STALE
;
1415 /* Accessing the "non-shadow" general registers. */
1416 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1417 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1421 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1424 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1428 /* The basic registers are data only, and unknown registers
1429 are read-zero, write-ignore. */
1430 data
= cpu_pr_data(regno
);
1432 if (data
& PR_LONG
) {
1433 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1435 tcg_gen_st_i64(vb
, cpu_env
, data
);
1443 #endif /* !USER_ONLY*/
1445 #define REQUIRE_NO_LIT \
1452 #define REQUIRE_AMASK(FLAG) \
1454 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1459 #define REQUIRE_TB_FLAG(FLAG) \
1461 if ((ctx->tbflags & (FLAG)) == 0) { \
1466 #define REQUIRE_REG_31(WHICH) \
1468 if (WHICH != 31) { \
1473 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1475 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1477 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1478 bool islit
, real_islit
;
1479 TCGv va
, vb
, vc
, tmp
, tmp2
;
1483 /* Decode all instruction fields */
1484 opc
= extract32(insn
, 26, 6);
1485 ra
= extract32(insn
, 21, 5);
1486 rb
= extract32(insn
, 16, 5);
1487 rc
= extract32(insn
, 0, 5);
1488 real_islit
= islit
= extract32(insn
, 12, 1);
1489 lit
= extract32(insn
, 13, 8);
1491 disp21
= sextract32(insn
, 0, 21);
1492 disp16
= sextract32(insn
, 0, 16);
1493 disp12
= sextract32(insn
, 0, 12);
1495 fn11
= extract32(insn
, 5, 11);
1496 fpfn
= extract32(insn
, 5, 6);
1497 fn7
= extract32(insn
, 5, 7);
1499 if (rb
== 31 && !islit
) {
1508 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1534 disp16
= (uint32_t)disp16
<< 16;
1538 va
= dest_gpr(ctx
, ra
);
1539 /* It's worth special-casing immediate loads. */
1541 tcg_gen_movi_i64(va
, disp16
);
1543 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1550 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1554 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1559 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1564 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1569 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1573 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1577 vc
= dest_gpr(ctx
, rc
);
1578 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1582 /* Special case ADDL as SEXTL. */
1583 tcg_gen_ext32s_i64(vc
, vb
);
1587 /* Special case SUBQ as NEGQ. */
1588 tcg_gen_neg_i64(vc
, vb
);
1593 va
= load_gpr(ctx
, ra
);
1597 tcg_gen_add_i64(vc
, va
, vb
);
1598 tcg_gen_ext32s_i64(vc
, vc
);
1602 tmp
= tcg_temp_new();
1603 tcg_gen_shli_i64(tmp
, va
, 2);
1604 tcg_gen_add_i64(tmp
, tmp
, vb
);
1605 tcg_gen_ext32s_i64(vc
, tmp
);
1610 tcg_gen_sub_i64(vc
, va
, vb
);
1611 tcg_gen_ext32s_i64(vc
, vc
);
1615 tmp
= tcg_temp_new();
1616 tcg_gen_shli_i64(tmp
, va
, 2);
1617 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1618 tcg_gen_ext32s_i64(vc
, tmp
);
1624 /* Special case 0 >= X as X == 0. */
1625 gen_helper_cmpbe0(vc
, vb
);
1627 gen_helper_cmpbge(vc
, va
, vb
);
1632 tmp
= tcg_temp_new();
1633 tcg_gen_shli_i64(tmp
, va
, 3);
1634 tcg_gen_add_i64(tmp
, tmp
, vb
);
1635 tcg_gen_ext32s_i64(vc
, tmp
);
1640 tmp
= tcg_temp_new();
1641 tcg_gen_shli_i64(tmp
, va
, 3);
1642 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1643 tcg_gen_ext32s_i64(vc
, tmp
);
1648 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1652 tcg_gen_add_i64(vc
, va
, vb
);
1656 tmp
= tcg_temp_new();
1657 tcg_gen_shli_i64(tmp
, va
, 2);
1658 tcg_gen_add_i64(vc
, tmp
, vb
);
1663 tcg_gen_sub_i64(vc
, va
, vb
);
1667 tmp
= tcg_temp_new();
1668 tcg_gen_shli_i64(tmp
, va
, 2);
1669 tcg_gen_sub_i64(vc
, tmp
, vb
);
1674 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1678 tmp
= tcg_temp_new();
1679 tcg_gen_shli_i64(tmp
, va
, 3);
1680 tcg_gen_add_i64(vc
, tmp
, vb
);
1685 tmp
= tcg_temp_new();
1686 tcg_gen_shli_i64(tmp
, va
, 3);
1687 tcg_gen_sub_i64(vc
, tmp
, vb
);
1692 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1696 tmp
= tcg_temp_new();
1697 tcg_gen_ext32s_i64(tmp
, va
);
1698 tcg_gen_ext32s_i64(vc
, vb
);
1699 tcg_gen_add_i64(tmp
, tmp
, vc
);
1700 tcg_gen_ext32s_i64(vc
, tmp
);
1701 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1706 tmp
= tcg_temp_new();
1707 tcg_gen_ext32s_i64(tmp
, va
);
1708 tcg_gen_ext32s_i64(vc
, vb
);
1709 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1710 tcg_gen_ext32s_i64(vc
, tmp
);
1711 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1716 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1720 tmp
= tcg_temp_new();
1721 tmp2
= tcg_temp_new();
1722 tcg_gen_eqv_i64(tmp
, va
, vb
);
1723 tcg_gen_mov_i64(tmp2
, va
);
1724 tcg_gen_add_i64(vc
, va
, vb
);
1725 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1726 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1727 tcg_gen_shri_i64(tmp
, tmp
, 63);
1728 tcg_gen_movi_i64(tmp2
, 0);
1729 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1731 tcg_temp_free(tmp2
);
1735 tmp
= tcg_temp_new();
1736 tmp2
= tcg_temp_new();
1737 tcg_gen_xor_i64(tmp
, va
, vb
);
1738 tcg_gen_mov_i64(tmp2
, va
);
1739 tcg_gen_sub_i64(vc
, va
, vb
);
1740 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1741 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1742 tcg_gen_shri_i64(tmp
, tmp
, 63);
1743 tcg_gen_movi_i64(tmp2
, 0);
1744 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1746 tcg_temp_free(tmp2
);
1750 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1760 /* Special case BIS as NOP. */
1764 /* Special case BIS as MOV. */
1765 vc
= dest_gpr(ctx
, rc
);
1767 tcg_gen_movi_i64(vc
, lit
);
1769 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1775 vc
= dest_gpr(ctx
, rc
);
1776 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1778 if (fn7
== 0x28 && ra
== 31) {
1779 /* Special case ORNOT as NOT. */
1780 tcg_gen_not_i64(vc
, vb
);
1784 va
= load_gpr(ctx
, ra
);
1788 tcg_gen_and_i64(vc
, va
, vb
);
1792 tcg_gen_andc_i64(vc
, va
, vb
);
1796 tmp
= tcg_temp_new();
1797 tcg_gen_andi_i64(tmp
, va
, 1);
1798 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1799 vb
, load_gpr(ctx
, rc
));
1804 tmp
= tcg_temp_new();
1805 tcg_gen_andi_i64(tmp
, va
, 1);
1806 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1807 vb
, load_gpr(ctx
, rc
));
1812 tcg_gen_or_i64(vc
, va
, vb
);
1816 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1817 vb
, load_gpr(ctx
, rc
));
1821 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1822 vb
, load_gpr(ctx
, rc
));
1826 tcg_gen_orc_i64(vc
, va
, vb
);
1830 tcg_gen_xor_i64(vc
, va
, vb
);
1834 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1835 vb
, load_gpr(ctx
, rc
));
1839 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1840 vb
, load_gpr(ctx
, rc
));
1844 tcg_gen_eqv_i64(vc
, va
, vb
);
1849 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1853 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1854 vb
, load_gpr(ctx
, rc
));
1858 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1859 vb
, load_gpr(ctx
, rc
));
1864 tcg_gen_movi_i64(vc
, ctx
->implver
);
1872 vc
= dest_gpr(ctx
, rc
);
1873 va
= load_gpr(ctx
, ra
);
1877 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1881 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1885 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1889 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1893 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1897 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1901 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1905 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1909 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1914 gen_zapnoti(vc
, va
, ~lit
);
1916 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1922 gen_zapnoti(vc
, va
, lit
);
1924 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1929 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1934 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1936 tmp
= tcg_temp_new();
1937 vb
= load_gpr(ctx
, rb
);
1938 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1939 tcg_gen_shr_i64(vc
, va
, tmp
);
1945 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1950 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1952 tmp
= tcg_temp_new();
1953 vb
= load_gpr(ctx
, rb
);
1954 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1955 tcg_gen_shl_i64(vc
, va
, tmp
);
1961 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1966 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1968 tmp
= tcg_temp_new();
1969 vb
= load_gpr(ctx
, rb
);
1970 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1971 tcg_gen_sar_i64(vc
, va
, tmp
);
1977 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1981 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1985 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1989 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1993 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1997 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
2001 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2005 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2009 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2017 vc
= dest_gpr(ctx
, rc
);
2018 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2019 va
= load_gpr(ctx
, ra
);
2023 tcg_gen_mul_i64(vc
, va
, vb
);
2024 tcg_gen_ext32s_i64(vc
, vc
);
2028 tcg_gen_mul_i64(vc
, va
, vb
);
2032 tmp
= tcg_temp_new();
2033 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
2038 tmp
= tcg_temp_new();
2039 tcg_gen_ext32s_i64(tmp
, va
);
2040 tcg_gen_ext32s_i64(vc
, vb
);
2041 tcg_gen_mul_i64(tmp
, tmp
, vc
);
2042 tcg_gen_ext32s_i64(vc
, tmp
);
2043 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2048 tmp
= tcg_temp_new();
2049 tmp2
= tcg_temp_new();
2050 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2051 tcg_gen_sari_i64(tmp2
, vc
, 63);
2052 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2054 tcg_temp_free(tmp2
);
2063 vc
= dest_fpr(ctx
, rc
);
2064 switch (fpfn
) { /* fn11 & 0x3F */
2068 t32
= tcg_temp_new_i32();
2069 va
= load_gpr(ctx
, ra
);
2070 tcg_gen_extrl_i64_i32(t32
, va
);
2071 gen_helper_memory_to_s(vc
, t32
);
2072 tcg_temp_free_i32(t32
);
2077 vb
= load_fpr(ctx
, rb
);
2078 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2083 gen_sqrts(ctx
, rb
, rc
, fn11
);
2088 t32
= tcg_temp_new_i32();
2089 va
= load_gpr(ctx
, ra
);
2090 tcg_gen_extrl_i64_i32(t32
, va
);
2091 gen_helper_memory_to_f(vc
, t32
);
2092 tcg_temp_free_i32(t32
);
2097 va
= load_gpr(ctx
, ra
);
2098 tcg_gen_mov_i64(vc
, va
);
2103 vb
= load_fpr(ctx
, rb
);
2104 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2109 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2117 /* VAX floating point */
2118 /* XXX: rounding mode and trap are ignored (!) */
2119 vc
= dest_fpr(ctx
, rc
);
2120 vb
= load_fpr(ctx
, rb
);
2121 va
= load_fpr(ctx
, ra
);
2122 switch (fpfn
) { /* fn11 & 0x3F */
2125 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2129 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2133 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2137 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2145 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2149 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2153 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2157 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2161 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2165 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2169 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2174 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2183 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2188 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2193 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2201 /* IEEE floating-point */
2202 switch (fpfn
) { /* fn11 & 0x3F */
2205 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2209 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2213 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2217 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2221 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2225 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2229 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2233 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2237 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2241 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2245 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2249 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2253 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2255 gen_cvtst(ctx
, rb
, rc
, fn11
);
2258 gen_cvtts(ctx
, rb
, rc
, fn11
);
2264 gen_cvttq(ctx
, rb
, rc
, fn11
);
2269 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2274 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2286 vc
= dest_fpr(ctx
, rc
);
2287 vb
= load_fpr(ctx
, rb
);
2293 /* Special case CPYS as FNOP. */
2295 vc
= dest_fpr(ctx
, rc
);
2296 va
= load_fpr(ctx
, ra
);
2298 /* Special case CPYS as FMOV. */
2299 tcg_gen_mov_i64(vc
, va
);
2301 vb
= load_fpr(ctx
, rb
);
2302 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2308 vc
= dest_fpr(ctx
, rc
);
2309 vb
= load_fpr(ctx
, rb
);
2310 va
= load_fpr(ctx
, ra
);
2311 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2315 vc
= dest_fpr(ctx
, rc
);
2316 vb
= load_fpr(ctx
, rb
);
2317 va
= load_fpr(ctx
, ra
);
2318 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2322 va
= load_fpr(ctx
, ra
);
2323 gen_helper_store_fpcr(cpu_env
, va
);
2324 if (ctx
->tb_rm
== QUAL_RM_D
) {
2325 /* Re-do the copy of the rounding mode to fp_status
2326 the next time we use dynamic rounding. */
2332 va
= dest_fpr(ctx
, ra
);
2333 gen_helper_load_fpcr(va
, cpu_env
);
2337 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2341 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2345 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2349 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2353 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2357 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2359 case 0x030: /* CVTQL */
2360 case 0x130: /* CVTQL/V */
2361 case 0x530: /* CVTQL/SV */
2363 vc
= dest_fpr(ctx
, rc
);
2364 vb
= load_fpr(ctx
, rb
);
2365 gen_helper_cvtql(vc
, cpu_env
, vb
);
2366 gen_fp_exc_raise(rc
, fn11
);
2374 switch ((uint16_t)disp16
) {
2385 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2389 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2401 va
= dest_gpr(ctx
, ra
);
2402 if (ctx
->base
.tb
->cflags
& CF_USE_ICOUNT
) {
2404 gen_helper_load_pcc(va
, cpu_env
);
2406 ret
= DISAS_PC_STALE
;
2408 gen_helper_load_pcc(va
, cpu_env
);
2436 /* HW_MFPR (PALcode) */
2437 #ifndef CONFIG_USER_ONLY
2438 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2439 va
= dest_gpr(ctx
, ra
);
2440 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2447 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2448 prediction stack action, which of course we don't implement. */
2449 vb
= load_gpr(ctx
, rb
);
2450 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2452 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2454 ret
= DISAS_PC_UPDATED
;
2458 /* HW_LD (PALcode) */
2459 #ifndef CONFIG_USER_ONLY
2460 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2462 TCGv addr
= tcg_temp_new();
2463 vb
= load_gpr(ctx
, rb
);
2464 va
= dest_gpr(ctx
, ra
);
2466 tcg_gen_addi_i64(addr
, vb
, disp12
);
2467 switch ((insn
>> 12) & 0xF) {
2469 /* Longword physical access (hw_ldl/p) */
2470 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2473 /* Quadword physical access (hw_ldq/p) */
2474 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2477 /* Longword physical access with lock (hw_ldl_l/p) */
2478 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2481 /* Quadword physical access with lock (hw_ldq_l/p) */
2482 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2485 /* Longword virtual PTE fetch (hw_ldl/v) */
2488 /* Quadword virtual PTE fetch (hw_ldq/v) */
2498 /* Longword virtual access (hw_ldl) */
2501 /* Quadword virtual access (hw_ldq) */
2504 /* Longword virtual access with protection check (hw_ldl/w) */
2505 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2508 /* Quadword virtual access with protection check (hw_ldq/w) */
2509 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2512 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2515 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2518 /* Longword virtual access with alternate access mode and
2519 protection checks (hw_ldl/wa) */
2520 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2523 /* Quadword virtual access with alternate access mode and
2524 protection checks (hw_ldq/wa) */
2525 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2528 tcg_temp_free(addr
);
2536 vc
= dest_gpr(ctx
, rc
);
2541 va
= load_fpr(ctx
, ra
);
2542 tcg_gen_mov_i64(vc
, va
);
2544 } else if (fn7
== 0x78) {
2548 t32
= tcg_temp_new_i32();
2549 va
= load_fpr(ctx
, ra
);
2550 gen_helper_s_to_memory(t32
, va
);
2551 tcg_gen_ext_i32_i64(vc
, t32
);
2552 tcg_temp_free_i32(t32
);
2556 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2562 tcg_gen_ext8s_i64(vc
, vb
);
2568 tcg_gen_ext16s_i64(vc
, vb
);
2575 tcg_gen_ctpop_i64(vc
, vb
);
2581 va
= load_gpr(ctx
, ra
);
2582 gen_helper_perr(vc
, va
, vb
);
2589 tcg_gen_clzi_i64(vc
, vb
, 64);
2596 tcg_gen_ctzi_i64(vc
, vb
, 64);
2603 gen_helper_unpkbw(vc
, vb
);
2610 gen_helper_unpkbl(vc
, vb
);
2617 gen_helper_pkwb(vc
, vb
);
2624 gen_helper_pklb(vc
, vb
);
2629 va
= load_gpr(ctx
, ra
);
2630 gen_helper_minsb8(vc
, va
, vb
);
2635 va
= load_gpr(ctx
, ra
);
2636 gen_helper_minsw4(vc
, va
, vb
);
2641 va
= load_gpr(ctx
, ra
);
2642 gen_helper_minub8(vc
, va
, vb
);
2647 va
= load_gpr(ctx
, ra
);
2648 gen_helper_minuw4(vc
, va
, vb
);
2653 va
= load_gpr(ctx
, ra
);
2654 gen_helper_maxub8(vc
, va
, vb
);
2659 va
= load_gpr(ctx
, ra
);
2660 gen_helper_maxuw4(vc
, va
, vb
);
2665 va
= load_gpr(ctx
, ra
);
2666 gen_helper_maxsb8(vc
, va
, vb
);
2671 va
= load_gpr(ctx
, ra
);
2672 gen_helper_maxsw4(vc
, va
, vb
);
2680 /* HW_MTPR (PALcode) */
2681 #ifndef CONFIG_USER_ONLY
2682 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2683 vb
= load_gpr(ctx
, rb
);
2684 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2691 /* HW_RET (PALcode) */
2692 #ifndef CONFIG_USER_ONLY
2693 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2695 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2696 address from EXC_ADDR. This turns out to be useful for our
2697 emulation PALcode, so continue to accept it. */
2698 ctx
->lit
= vb
= tcg_temp_new();
2699 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2701 vb
= load_gpr(ctx
, rb
);
2703 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2704 tmp
= tcg_temp_new();
2705 tcg_gen_movi_i64(tmp
, 0);
2706 st_flag_byte(tmp
, ENV_FLAG_RX_SHIFT
);
2707 tcg_gen_andi_i64(tmp
, vb
, 1);
2708 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2710 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2711 /* Allow interrupts to be recognized right away. */
2712 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2719 /* HW_ST (PALcode) */
2720 #ifndef CONFIG_USER_ONLY
2721 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2723 switch ((insn
>> 12) & 0xF) {
2725 /* Longword physical access */
2726 va
= load_gpr(ctx
, ra
);
2727 vb
= load_gpr(ctx
, rb
);
2728 tmp
= tcg_temp_new();
2729 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2730 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2734 /* Quadword physical access */
2735 va
= load_gpr(ctx
, ra
);
2736 vb
= load_gpr(ctx
, rb
);
2737 tmp
= tcg_temp_new();
2738 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2739 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2743 /* Longword physical access with lock */
2744 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2745 MMU_PHYS_IDX
, MO_LESL
);
2748 /* Quadword physical access with lock */
2749 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2750 MMU_PHYS_IDX
, MO_LEQ
);
2753 /* Longword virtual access */
2756 /* Quadword virtual access */
2777 /* Longword virtual access with alternate access mode */
2780 /* Quadword virtual access with alternate access mode */
2796 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2800 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2804 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2808 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2812 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2816 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2820 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2824 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2828 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2832 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2836 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2840 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2844 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2848 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2852 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2853 ctx
->mem_idx
, MO_LESL
);
2857 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2858 ctx
->mem_idx
, MO_LEQ
);
2862 ret
= gen_bdirect(ctx
, ra
, disp21
);
2864 case 0x31: /* FBEQ */
2865 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2867 case 0x32: /* FBLT */
2868 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2870 case 0x33: /* FBLE */
2871 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2875 ret
= gen_bdirect(ctx
, ra
, disp21
);
2877 case 0x35: /* FBNE */
2878 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2880 case 0x36: /* FBGE */
2881 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2883 case 0x37: /* FBGT */
2884 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2888 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2892 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2896 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2900 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2904 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2908 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2912 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2916 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2919 ret
= gen_invalid(ctx
);
2926 static int alpha_tr_init_disas_context(DisasContextBase
*dcbase
,
2927 CPUState
*cpu
, int max_insns
)
2929 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2930 CPUAlphaState
*env
= cpu
->env_ptr
;
2931 int64_t bound
, mask
;
2933 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2934 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2935 ctx
->implver
= env
->implver
;
2936 ctx
->amask
= env
->amask
;
2938 #ifdef CONFIG_USER_ONLY
2939 ctx
->ir
= cpu_std_ir
;
2941 ctx
->palbr
= env
->palbr
;
2942 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2945 /* ??? Every TB begins with unset rounding mode, to be initialized on
2946 the first fp insn of the TB. Alternately we could define a proper
2947 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2948 to reset the FP_STATUS to that default at the end of any TB that
2949 changes the default. We could even (gasp) dynamiclly figure out
2950 what default would be most efficient given the running program. */
2952 /* Similarly for flush-to-zero. */
2955 TCGV_UNUSED_I64(ctx
->zero
);
2956 TCGV_UNUSED_I64(ctx
->sink
);
2957 TCGV_UNUSED_I64(ctx
->lit
);
2959 /* Bound the number of insns to execute to those left on the page. */
2960 if (in_superpage(ctx
, ctx
->base
.pc_first
)) {
2963 mask
= TARGET_PAGE_MASK
;
2965 bound
= -(ctx
->base
.pc_first
| mask
) / 4;
2967 return MIN(max_insns
, bound
);
2970 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2974 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2976 tcg_gen_insn_start(dcbase
->pc_next
);
2979 static bool alpha_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
2980 const CPUBreakpoint
*bp
)
2982 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2984 ctx
->base
.is_jmp
= gen_excp(ctx
, EXCP_DEBUG
, 0);
2986 /* The address covered by the breakpoint must be included in
2987 [tb->pc, tb->pc + tb->size) in order to for it to be
2988 properly cleared -- thus we increment the PC here so that
2989 the logic setting tb->size below does the right thing. */
2990 ctx
->base
.pc_next
+= 4;
2994 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2996 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2997 CPUAlphaState
*env
= cpu
->env_ptr
;
2998 uint32_t insn
= cpu_ldl_code(env
, ctx
->base
.pc_next
);
3000 ctx
->base
.pc_next
+= 4;
3001 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
3003 free_context_temps(ctx
);
3004 translator_loop_temp_check(&ctx
->base
);
3007 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
3009 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
3011 switch (ctx
->base
.is_jmp
) {
3012 case DISAS_NORETURN
:
3014 case DISAS_TOO_MANY
:
3015 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
3017 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3018 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
3021 case DISAS_PC_STALE
:
3022 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3024 case DISAS_PC_UPDATED
:
3025 if (!use_exit_tb(ctx
)) {
3026 tcg_gen_lookup_and_goto_ptr();
3030 case DISAS_PC_UPDATED_NOCHAIN
:
3031 if (ctx
->base
.singlestep_enabled
) {
3032 gen_excp_1(EXCP_DEBUG
, 0);
3038 g_assert_not_reached();
3042 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
3044 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
3045 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
, 1);
3048 static const TranslatorOps alpha_tr_ops
= {
3049 .init_disas_context
= alpha_tr_init_disas_context
,
3050 .tb_start
= alpha_tr_tb_start
,
3051 .insn_start
= alpha_tr_insn_start
,
3052 .breakpoint_check
= alpha_tr_breakpoint_check
,
3053 .translate_insn
= alpha_tr_translate_insn
,
3054 .tb_stop
= alpha_tr_tb_stop
,
3055 .disas_log
= alpha_tr_disas_log
,
3058 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
)
3061 translator_loop(&alpha_tr_ops
, &dc
.base
, cpu
, tb
);
3064 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,