2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/translator.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 DisasContextBase base
;
48 #ifndef CONFIG_USER_ONLY
54 /* implver and amask values for this CPU. */
58 /* Current rounding mode for this TB. */
60 /* Current flush-to-zero setting for this TB. */
63 /* The set of registers active in the current context. */
66 /* Temporaries for $31 and $f31 as source and destination. */
69 /* Temporary for immediate constants. */
73 /* Target-specific return values from translate_one, indicating the
74 state of the TB. Note that DISAS_NEXT indicates that we are not
76 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
77 #define DISAS_PC_UPDATED DISAS_TARGET_1
78 #define DISAS_PC_STALE DISAS_TARGET_2
80 /* global register indexes */
81 static TCGv_env cpu_env
;
82 static TCGv cpu_std_ir
[31];
83 static TCGv cpu_fir
[31];
85 static TCGv cpu_lock_addr
;
86 static TCGv cpu_lock_value
;
88 #ifndef CONFIG_USER_ONLY
89 static TCGv cpu_pal_ir
[31];
92 #include "exec/gen-icount.h"
94 void alpha_translate_init(void)
96 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
98 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
99 static const GlobalVar vars
[] = {
107 /* Use the symbolic register names that match the disassembler. */
108 static const char greg_names
[31][4] = {
109 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
110 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
111 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
112 "t10", "t11", "ra", "t12", "at", "gp", "sp"
114 static const char freg_names
[31][4] = {
115 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
116 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
117 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
118 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
120 #ifndef CONFIG_USER_ONLY
121 static const char shadow_names
[8][8] = {
122 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
123 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
127 static bool done_init
= 0;
135 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
136 tcg_ctx
.tcg_env
= cpu_env
;
138 for (i
= 0; i
< 31; i
++) {
139 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
140 offsetof(CPUAlphaState
, ir
[i
]),
144 for (i
= 0; i
< 31; i
++) {
145 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
146 offsetof(CPUAlphaState
, fir
[i
]),
150 #ifndef CONFIG_USER_ONLY
151 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
152 for (i
= 0; i
< 8; i
++) {
153 int r
= (i
== 7 ? 25 : i
+ 8);
154 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
155 offsetof(CPUAlphaState
,
161 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
162 const GlobalVar
*v
= &vars
[i
];
163 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
167 static TCGv
load_zero(DisasContext
*ctx
)
169 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
170 ctx
->zero
= tcg_const_i64(0);
175 static TCGv
dest_sink(DisasContext
*ctx
)
177 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
178 ctx
->sink
= tcg_temp_new();
183 static void free_context_temps(DisasContext
*ctx
)
185 if (!TCGV_IS_UNUSED_I64(ctx
->sink
)) {
186 tcg_gen_discard_i64(ctx
->sink
);
187 tcg_temp_free(ctx
->sink
);
188 TCGV_UNUSED_I64(ctx
->sink
);
190 if (!TCGV_IS_UNUSED_I64(ctx
->zero
)) {
191 tcg_temp_free(ctx
->zero
);
192 TCGV_UNUSED_I64(ctx
->zero
);
194 if (!TCGV_IS_UNUSED_I64(ctx
->lit
)) {
195 tcg_temp_free(ctx
->lit
);
196 TCGV_UNUSED_I64(ctx
->lit
);
200 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
202 if (likely(reg
< 31)) {
205 return load_zero(ctx
);
209 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
210 uint8_t lit
, bool islit
)
213 ctx
->lit
= tcg_const_i64(lit
);
215 } else if (likely(reg
< 31)) {
218 return load_zero(ctx
);
222 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
224 if (likely(reg
< 31)) {
227 return dest_sink(ctx
);
231 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
233 if (likely(reg
< 31)) {
236 return load_zero(ctx
);
240 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
242 if (likely(reg
< 31)) {
245 return dest_sink(ctx
);
249 static int get_flag_ofs(unsigned shift
)
251 int ofs
= offsetof(CPUAlphaState
, flags
);
252 #ifdef HOST_WORDS_BIGENDIAN
253 ofs
+= 3 - (shift
/ 8);
260 static void ld_flag_byte(TCGv val
, unsigned shift
)
262 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
265 static void st_flag_byte(TCGv val
, unsigned shift
)
267 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
270 static void gen_excp_1(int exception
, int error_code
)
274 tmp1
= tcg_const_i32(exception
);
275 tmp2
= tcg_const_i32(error_code
);
276 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
277 tcg_temp_free_i32(tmp2
);
278 tcg_temp_free_i32(tmp1
);
281 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
283 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
284 gen_excp_1(exception
, error_code
);
285 return DISAS_NORETURN
;
288 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
290 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
293 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
295 TCGv_i32 tmp32
= tcg_temp_new_i32();
296 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
297 gen_helper_memory_to_f(t0
, tmp32
);
298 tcg_temp_free_i32(tmp32
);
301 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
303 TCGv tmp
= tcg_temp_new();
304 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
305 gen_helper_memory_to_g(t0
, tmp
);
309 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
311 TCGv_i32 tmp32
= tcg_temp_new_i32();
312 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
313 gen_helper_memory_to_s(t0
, tmp32
);
314 tcg_temp_free_i32(tmp32
);
317 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
319 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
320 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
321 tcg_gen_mov_i64(cpu_lock_value
, t0
);
324 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
326 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
327 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
328 tcg_gen_mov_i64(cpu_lock_value
, t0
);
331 static inline void gen_load_mem(DisasContext
*ctx
,
332 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
334 int ra
, int rb
, int32_t disp16
, bool fp
,
339 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
340 prefetches, which we can treat as nops. No worries about
341 missed exceptions here. */
342 if (unlikely(ra
== 31)) {
346 tmp
= tcg_temp_new();
347 addr
= load_gpr(ctx
, rb
);
350 tcg_gen_addi_i64(tmp
, addr
, disp16
);
354 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
358 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
359 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
364 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
366 TCGv_i32 tmp32
= tcg_temp_new_i32();
367 gen_helper_f_to_memory(tmp32
, t0
);
368 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
369 tcg_temp_free_i32(tmp32
);
372 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
374 TCGv tmp
= tcg_temp_new();
375 gen_helper_g_to_memory(tmp
, t0
);
376 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
380 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
382 TCGv_i32 tmp32
= tcg_temp_new_i32();
383 gen_helper_s_to_memory(tmp32
, t0
);
384 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
385 tcg_temp_free_i32(tmp32
);
388 static inline void gen_store_mem(DisasContext
*ctx
,
389 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
391 int ra
, int rb
, int32_t disp16
, bool fp
,
396 tmp
= tcg_temp_new();
397 addr
= load_gpr(ctx
, rb
);
400 tcg_gen_addi_i64(tmp
, addr
, disp16
);
404 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
408 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
409 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
414 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
415 int32_t disp16
, int mem_idx
,
418 TCGLabel
*lab_fail
, *lab_done
;
421 addr
= tcg_temp_new_i64();
422 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
423 free_context_temps(ctx
);
425 lab_fail
= gen_new_label();
426 lab_done
= gen_new_label();
427 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
428 tcg_temp_free_i64(addr
);
430 val
= tcg_temp_new_i64();
431 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
432 load_gpr(ctx
, ra
), mem_idx
, op
);
433 free_context_temps(ctx
);
436 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
438 tcg_temp_free_i64(val
);
439 tcg_gen_br(lab_done
);
441 gen_set_label(lab_fail
);
443 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
446 gen_set_label(lab_done
);
447 tcg_gen_movi_i64(cpu_lock_addr
, -1);
451 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
453 #ifndef CONFIG_USER_ONLY
454 return ((ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0
455 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
456 && ((addr
>> 41) & 3) == 2);
462 static bool use_exit_tb(DisasContext
*ctx
)
464 return ((ctx
->base
.tb
->cflags
& CF_LAST_IO
)
465 || ctx
->base
.singlestep_enabled
469 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
471 /* Suppress goto_tb in the case of single-steping and IO. */
472 if (unlikely(use_exit_tb(ctx
))) {
475 #ifndef CONFIG_USER_ONLY
476 /* If the destination is in the superpage, the page perms can't change. */
477 if (in_superpage(ctx
, dest
)) {
480 /* Check for the dest on the same page as the start of the TB. */
481 return ((ctx
->base
.tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
487 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
489 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
492 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
495 /* Notice branch-to-next; used to initialize RA with the PC. */
498 } else if (use_goto_tb(ctx
, dest
)) {
500 tcg_gen_movi_i64(cpu_pc
, dest
);
501 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
502 return DISAS_NORETURN
;
504 tcg_gen_movi_i64(cpu_pc
, dest
);
505 return DISAS_PC_UPDATED
;
509 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
510 TCGv cmp
, int32_t disp
)
512 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
513 TCGLabel
*lab_true
= gen_new_label();
515 if (use_goto_tb(ctx
, dest
)) {
516 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
519 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
520 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
522 gen_set_label(lab_true
);
524 tcg_gen_movi_i64(cpu_pc
, dest
);
525 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
+ 1);
527 return DISAS_NORETURN
;
529 TCGv_i64 z
= tcg_const_i64(0);
530 TCGv_i64 d
= tcg_const_i64(dest
);
531 TCGv_i64 p
= tcg_const_i64(ctx
->base
.pc_next
);
533 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
535 tcg_temp_free_i64(z
);
536 tcg_temp_free_i64(d
);
537 tcg_temp_free_i64(p
);
538 return DISAS_PC_UPDATED
;
542 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
543 int32_t disp
, int mask
)
546 TCGv tmp
= tcg_temp_new();
549 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
550 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
554 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
557 /* Fold -0.0 for comparison with COND. */
559 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
561 uint64_t mzero
= 1ull << 63;
566 /* For <= or >, the -0.0 value directly compares the way we want. */
567 tcg_gen_mov_i64(dest
, src
);
572 /* For == or !=, we can simply mask off the sign bit and compare. */
573 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
578 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
579 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
580 tcg_gen_neg_i64(dest
, dest
);
581 tcg_gen_and_i64(dest
, dest
, src
);
589 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
592 TCGv cmp_tmp
= tcg_temp_new();
595 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
596 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
597 tcg_temp_free(cmp_tmp
);
601 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
606 vb
= load_fpr(ctx
, rb
);
608 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
610 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
615 #define QUAL_RM_N 0x080 /* Round mode nearest even */
616 #define QUAL_RM_C 0x000 /* Round mode chopped */
617 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
618 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
619 #define QUAL_RM_MASK 0x0c0
621 #define QUAL_U 0x100 /* Underflow enable (fp output) */
622 #define QUAL_V 0x100 /* Overflow enable (int output) */
623 #define QUAL_S 0x400 /* Software completion enable */
624 #define QUAL_I 0x200 /* Inexact detection enable */
626 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
630 fn11
&= QUAL_RM_MASK
;
631 if (fn11
== ctx
->tb_rm
) {
636 tmp
= tcg_temp_new_i32();
639 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
642 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
645 tcg_gen_movi_i32(tmp
, float_round_down
);
648 tcg_gen_ld8u_i32(tmp
, cpu_env
,
649 offsetof(CPUAlphaState
, fpcr_dyn_round
));
653 #if defined(CONFIG_SOFTFLOAT_INLINE)
654 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
655 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
656 sets the one field. */
657 tcg_gen_st8_i32(tmp
, cpu_env
,
658 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
660 gen_helper_setroundmode(tmp
);
663 tcg_temp_free_i32(tmp
);
666 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
671 if (fn11
== ctx
->tb_ftz
) {
676 tmp
= tcg_temp_new_i32();
678 /* Underflow is enabled, use the FPCR setting. */
679 tcg_gen_ld8u_i32(tmp
, cpu_env
,
680 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
682 /* Underflow is disabled, force flush-to-zero. */
683 tcg_gen_movi_i32(tmp
, 1);
686 #if defined(CONFIG_SOFTFLOAT_INLINE)
687 tcg_gen_st8_i32(tmp
, cpu_env
,
688 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
690 gen_helper_setflushzero(tmp
);
693 tcg_temp_free_i32(tmp
);
696 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
700 if (unlikely(reg
== 31)) {
701 val
= load_zero(ctx
);
704 if ((fn11
& QUAL_S
) == 0) {
706 gen_helper_ieee_input_cmp(cpu_env
, val
);
708 gen_helper_ieee_input(cpu_env
, val
);
711 #ifndef CONFIG_USER_ONLY
712 /* In system mode, raise exceptions for denormals like real
713 hardware. In user mode, proceed as if the OS completion
714 handler is handling the denormal as per spec. */
715 gen_helper_ieee_input_s(cpu_env
, val
);
722 static void gen_fp_exc_raise(int rc
, int fn11
)
724 /* ??? We ought to be able to do something with imprecise exceptions.
725 E.g. notice we're still in the trap shadow of something within the
726 TB and do not generate the code to signal the exception; end the TB
727 when an exception is forced to arrive, either by consumption of a
728 register value or TRAPB or EXCB. */
732 if (!(fn11
& QUAL_U
)) {
733 /* Note that QUAL_U == QUAL_V, so ignore either. */
734 ignore
|= FPCR_UNF
| FPCR_IOV
;
736 if (!(fn11
& QUAL_I
)) {
739 ign
= tcg_const_i32(ignore
);
741 /* ??? Pass in the regno of the destination so that the helper can
742 set EXC_MASK, which contains a bitmask of destination registers
743 that have caused arithmetic traps. A simple userspace emulation
744 does not require this. We do need it for a guest kernel's entArith,
745 or if we were to do something clever with imprecise exceptions. */
746 reg
= tcg_const_i32(rc
+ 32);
748 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
750 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
753 tcg_temp_free_i32(reg
);
754 tcg_temp_free_i32(ign
);
757 static void gen_cvtlq(TCGv vc
, TCGv vb
)
759 TCGv tmp
= tcg_temp_new();
761 /* The arithmetic right shift here, plus the sign-extended mask below
762 yields a sign-extended result without an explicit ext32s_i64. */
763 tcg_gen_shri_i64(tmp
, vb
, 29);
764 tcg_gen_sari_i64(vc
, vb
, 32);
765 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
770 static void gen_ieee_arith2(DisasContext
*ctx
,
771 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
772 int rb
, int rc
, int fn11
)
776 gen_qual_roundmode(ctx
, fn11
);
777 gen_qual_flushzero(ctx
, fn11
);
779 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
780 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
782 gen_fp_exc_raise(rc
, fn11
);
785 #define IEEE_ARITH2(name) \
786 static inline void glue(gen_, name)(DisasContext *ctx, \
787 int rb, int rc, int fn11) \
789 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
796 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
800 /* No need to set flushzero, since we have an integer output. */
801 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
802 vc
= dest_fpr(ctx
, rc
);
804 /* Almost all integer conversions use cropped rounding;
805 special case that. */
806 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
807 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
809 gen_qual_roundmode(ctx
, fn11
);
810 gen_helper_cvttq(vc
, cpu_env
, vb
);
812 gen_fp_exc_raise(rc
, fn11
);
815 static void gen_ieee_intcvt(DisasContext
*ctx
,
816 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
817 int rb
, int rc
, int fn11
)
821 gen_qual_roundmode(ctx
, fn11
);
822 vb
= load_fpr(ctx
, rb
);
823 vc
= dest_fpr(ctx
, rc
);
825 /* The only exception that can be raised by integer conversion
826 is inexact. Thus we only need to worry about exceptions when
827 inexact handling is requested. */
829 helper(vc
, cpu_env
, vb
);
830 gen_fp_exc_raise(rc
, fn11
);
832 helper(vc
, cpu_env
, vb
);
836 #define IEEE_INTCVT(name) \
837 static inline void glue(gen_, name)(DisasContext *ctx, \
838 int rb, int rc, int fn11) \
840 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
845 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
847 TCGv vmask
= tcg_const_i64(mask
);
848 TCGv tmp
= tcg_temp_new_i64();
851 tcg_gen_andc_i64(tmp
, vmask
, va
);
853 tcg_gen_and_i64(tmp
, va
, vmask
);
856 tcg_gen_andc_i64(vc
, vb
, vmask
);
857 tcg_gen_or_i64(vc
, vc
, tmp
);
859 tcg_temp_free(vmask
);
863 static void gen_ieee_arith3(DisasContext
*ctx
,
864 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
865 int ra
, int rb
, int rc
, int fn11
)
869 gen_qual_roundmode(ctx
, fn11
);
870 gen_qual_flushzero(ctx
, fn11
);
872 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
873 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
874 vc
= dest_fpr(ctx
, rc
);
875 helper(vc
, cpu_env
, va
, vb
);
877 gen_fp_exc_raise(rc
, fn11
);
880 #define IEEE_ARITH3(name) \
881 static inline void glue(gen_, name)(DisasContext *ctx, \
882 int ra, int rb, int rc, int fn11) \
884 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
895 static void gen_ieee_compare(DisasContext
*ctx
,
896 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
897 int ra
, int rb
, int rc
, int fn11
)
901 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
902 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
903 vc
= dest_fpr(ctx
, rc
);
904 helper(vc
, cpu_env
, va
, vb
);
906 gen_fp_exc_raise(rc
, fn11
);
909 #define IEEE_CMP3(name) \
910 static inline void glue(gen_, name)(DisasContext *ctx, \
911 int ra, int rb, int rc, int fn11) \
913 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
920 static inline uint64_t zapnot_mask(uint8_t lit
)
925 for (i
= 0; i
< 8; ++i
) {
926 if ((lit
>> i
) & 1) {
927 mask
|= 0xffull
<< (i
* 8);
933 /* Implement zapnot with an immediate operand, which expands to some
934 form of immediate AND. This is a basic building block in the
935 definition of many of the other byte manipulation instructions. */
936 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
940 tcg_gen_movi_i64(dest
, 0);
943 tcg_gen_ext8u_i64(dest
, src
);
946 tcg_gen_ext16u_i64(dest
, src
);
949 tcg_gen_ext32u_i64(dest
, src
);
952 tcg_gen_mov_i64(dest
, src
);
955 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
960 /* EXTWH, EXTLH, EXTQH */
961 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
962 uint8_t lit
, uint8_t byte_mask
)
965 int pos
= (64 - lit
* 8) & 0x3f;
966 int len
= cto32(byte_mask
) * 8;
968 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
970 tcg_gen_movi_i64(vc
, 0);
973 TCGv tmp
= tcg_temp_new();
974 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
975 tcg_gen_neg_i64(tmp
, tmp
);
976 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
977 tcg_gen_shl_i64(vc
, va
, tmp
);
980 gen_zapnoti(vc
, vc
, byte_mask
);
983 /* EXTBL, EXTWL, EXTLL, EXTQL */
984 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
985 uint8_t lit
, uint8_t byte_mask
)
988 int pos
= (lit
& 7) * 8;
989 int len
= cto32(byte_mask
) * 8;
990 if (pos
+ len
>= 64) {
993 tcg_gen_extract_i64(vc
, va
, pos
, len
);
995 TCGv tmp
= tcg_temp_new();
996 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
997 tcg_gen_shli_i64(tmp
, tmp
, 3);
998 tcg_gen_shr_i64(vc
, va
, tmp
);
1000 gen_zapnoti(vc
, vc
, byte_mask
);
1004 /* INSWH, INSLH, INSQH */
1005 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1006 uint8_t lit
, uint8_t byte_mask
)
1009 int pos
= 64 - (lit
& 7) * 8;
1010 int len
= cto32(byte_mask
) * 8;
1012 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
1014 tcg_gen_movi_i64(vc
, 0);
1017 TCGv tmp
= tcg_temp_new();
1018 TCGv shift
= tcg_temp_new();
1020 /* The instruction description has us left-shift the byte mask
1021 and extract bits <15:8> and apply that zap at the end. This
1022 is equivalent to simply performing the zap first and shifting
1024 gen_zapnoti(tmp
, va
, byte_mask
);
1026 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1027 portably by splitting the shift into two parts: shift_count-1 and 1.
1028 Arrange for the -1 by using ones-complement instead of
1029 twos-complement in the negation: ~(B * 8) & 63. */
1031 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1032 tcg_gen_not_i64(shift
, shift
);
1033 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1035 tcg_gen_shr_i64(vc
, tmp
, shift
);
1036 tcg_gen_shri_i64(vc
, vc
, 1);
1037 tcg_temp_free(shift
);
1042 /* INSBL, INSWL, INSLL, INSQL */
1043 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1044 uint8_t lit
, uint8_t byte_mask
)
1047 int pos
= (lit
& 7) * 8;
1048 int len
= cto32(byte_mask
) * 8;
1049 if (pos
+ len
> 64) {
1052 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1054 TCGv tmp
= tcg_temp_new();
1055 TCGv shift
= tcg_temp_new();
1057 /* The instruction description has us left-shift the byte mask
1058 and extract bits <15:8> and apply that zap at the end. This
1059 is equivalent to simply performing the zap first and shifting
1061 gen_zapnoti(tmp
, va
, byte_mask
);
1063 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1064 tcg_gen_shli_i64(shift
, shift
, 3);
1065 tcg_gen_shl_i64(vc
, tmp
, shift
);
1066 tcg_temp_free(shift
);
1071 /* MSKWH, MSKLH, MSKQH */
1072 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1073 uint8_t lit
, uint8_t byte_mask
)
1076 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1078 TCGv shift
= tcg_temp_new();
1079 TCGv mask
= tcg_temp_new();
1081 /* The instruction description is as above, where the byte_mask
1082 is shifted left, and then we extract bits <15:8>. This can be
1083 emulated with a right-shift on the expanded byte mask. This
1084 requires extra care because for an input <2:0> == 0 we need a
1085 shift of 64 bits in order to generate a zero. This is done by
1086 splitting the shift into two parts, the variable shift - 1
1087 followed by a constant 1 shift. The code we expand below is
1088 equivalent to ~(B * 8) & 63. */
1090 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1091 tcg_gen_not_i64(shift
, shift
);
1092 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1093 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1094 tcg_gen_shr_i64(mask
, mask
, shift
);
1095 tcg_gen_shri_i64(mask
, mask
, 1);
1097 tcg_gen_andc_i64(vc
, va
, mask
);
1099 tcg_temp_free(mask
);
1100 tcg_temp_free(shift
);
1104 /* MSKBL, MSKWL, MSKLL, MSKQL */
1105 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1106 uint8_t lit
, uint8_t byte_mask
)
1109 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1111 TCGv shift
= tcg_temp_new();
1112 TCGv mask
= tcg_temp_new();
1114 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1115 tcg_gen_shli_i64(shift
, shift
, 3);
1116 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1117 tcg_gen_shl_i64(mask
, mask
, shift
);
1119 tcg_gen_andc_i64(vc
, va
, mask
);
1121 tcg_temp_free(mask
);
1122 tcg_temp_free(shift
);
1126 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1131 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1134 tmp
= tcg_const_i64(set
);
1135 st_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1139 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1141 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1142 to internal cpu registers. */
1144 /* Unprivileged PAL call */
1145 if (palcode
>= 0x80 && palcode
< 0xC0) {
1149 /* No-op inside QEMU. */
1153 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1154 offsetof(CPUAlphaState
, unique
));
1158 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1159 offsetof(CPUAlphaState
, unique
));
1168 #ifndef CONFIG_USER_ONLY
1169 /* Privileged PAL code */
1170 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1174 /* No-op inside QEMU. */
1178 /* No-op inside QEMU. */
1182 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1183 offsetof(CPUAlphaState
, vptptr
));
1187 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1188 offsetof(CPUAlphaState
, sysval
));
1192 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1193 offsetof(CPUAlphaState
, sysval
));
1198 /* Note that we already know we're in kernel mode, so we know
1199 that PS only contains the 3 IPL bits. */
1200 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1202 /* But make sure and store only the 3 IPL bits from the user. */
1204 TCGv tmp
= tcg_temp_new();
1205 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1206 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1210 /* Allow interrupts to be recognized right away. */
1211 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1212 return DISAS_PC_UPDATED_NOCHAIN
;
1216 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1221 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1222 offsetof(CPUAlphaState
, usp
));
1226 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1227 offsetof(CPUAlphaState
, usp
));
1231 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1232 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1238 TCGv_i32 tmp
= tcg_const_i32(1);
1239 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1240 offsetof(CPUState
, halted
));
1241 tcg_temp_free_i32(tmp
);
1243 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1244 return gen_excp(ctx
, EXCP_HALTED
, 0);
1253 return gen_invalid(ctx
);
1256 #ifdef CONFIG_USER_ONLY
1257 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1260 TCGv tmp
= tcg_temp_new();
1261 uint64_t exc_addr
= ctx
->base
.pc_next
;
1262 uint64_t entry
= ctx
->palbr
;
1264 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1267 tcg_gen_movi_i64(tmp
, 1);
1268 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1271 tcg_gen_movi_i64(tmp
, exc_addr
);
1272 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1275 entry
+= (palcode
& 0x80
1276 ? 0x2000 + (palcode
- 0x80) * 64
1277 : 0x1000 + palcode
* 64);
1279 /* Since the destination is running in PALmode, we don't really
1280 need the page permissions check. We'll see the existence of
1281 the page when we create the TB, and we'll flush all TBs if
1282 we change the PAL base register. */
1283 if (!use_exit_tb(ctx
)) {
1285 tcg_gen_movi_i64(cpu_pc
, entry
);
1286 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
1287 return DISAS_NORETURN
;
1289 tcg_gen_movi_i64(cpu_pc
, entry
);
1290 return DISAS_PC_UPDATED
;
1296 #ifndef CONFIG_USER_ONLY
1298 #define PR_LONG 0x200000
1300 static int cpu_pr_data(int pr
)
1303 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1304 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1305 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1306 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1307 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1308 case 7: return offsetof(CPUAlphaState
, palbr
);
1309 case 8: return offsetof(CPUAlphaState
, ptbr
);
1310 case 9: return offsetof(CPUAlphaState
, vptptr
);
1311 case 10: return offsetof(CPUAlphaState
, unique
);
1312 case 11: return offsetof(CPUAlphaState
, sysval
);
1313 case 12: return offsetof(CPUAlphaState
, usp
);
1316 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1319 return offsetof(CPUAlphaState
, alarm_expire
);
1324 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1326 void (*helper
)(TCGv
);
1331 /* Accessing the "non-shadow" general registers. */
1332 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1333 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1336 case 250: /* WALLTIME */
1337 helper
= gen_helper_get_walltime
;
1339 case 249: /* VMTIME */
1340 helper
= gen_helper_get_vmtime
;
1346 return DISAS_PC_STALE
;
1353 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1356 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1360 /* The basic registers are data only, and unknown registers
1361 are read-zero, write-ignore. */
1362 data
= cpu_pr_data(regno
);
1364 tcg_gen_movi_i64(va
, 0);
1365 } else if (data
& PR_LONG
) {
1366 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1368 tcg_gen_ld_i64(va
, cpu_env
, data
);
1376 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1383 gen_helper_tbia(cpu_env
);
1388 gen_helper_tbis(cpu_env
, vb
);
1394 TCGv_i32 tmp
= tcg_const_i32(1);
1395 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1396 offsetof(CPUState
, halted
));
1397 tcg_temp_free_i32(tmp
);
1399 return gen_excp(ctx
, EXCP_HALTED
, 0);
1403 gen_helper_halt(vb
);
1404 return DISAS_PC_STALE
;
1408 gen_helper_set_alarm(cpu_env
, vb
);
1413 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1414 /* Changing the PAL base register implies un-chaining all of the TBs
1415 that ended with a CALL_PAL. Since the base register usually only
1416 changes during boot, flushing everything works well. */
1417 gen_helper_tb_flush(cpu_env
);
1418 return DISAS_PC_STALE
;
1421 /* Accessing the "non-shadow" general registers. */
1422 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1423 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1427 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1430 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1434 /* The basic registers are data only, and unknown registers
1435 are read-zero, write-ignore. */
1436 data
= cpu_pr_data(regno
);
1438 if (data
& PR_LONG
) {
1439 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1441 tcg_gen_st_i64(vb
, cpu_env
, data
);
1449 #endif /* !USER_ONLY*/
1451 #define REQUIRE_NO_LIT \
1458 #define REQUIRE_AMASK(FLAG) \
1460 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1465 #define REQUIRE_TB_FLAG(FLAG) \
1467 if ((ctx->tbflags & (FLAG)) == 0) { \
1472 #define REQUIRE_REG_31(WHICH) \
1474 if (WHICH != 31) { \
1479 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1481 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1483 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1484 bool islit
, real_islit
;
1485 TCGv va
, vb
, vc
, tmp
, tmp2
;
1489 /* Decode all instruction fields */
1490 opc
= extract32(insn
, 26, 6);
1491 ra
= extract32(insn
, 21, 5);
1492 rb
= extract32(insn
, 16, 5);
1493 rc
= extract32(insn
, 0, 5);
1494 real_islit
= islit
= extract32(insn
, 12, 1);
1495 lit
= extract32(insn
, 13, 8);
1497 disp21
= sextract32(insn
, 0, 21);
1498 disp16
= sextract32(insn
, 0, 16);
1499 disp12
= sextract32(insn
, 0, 12);
1501 fn11
= extract32(insn
, 5, 11);
1502 fpfn
= extract32(insn
, 5, 6);
1503 fn7
= extract32(insn
, 5, 7);
1505 if (rb
== 31 && !islit
) {
1514 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1540 disp16
= (uint32_t)disp16
<< 16;
1544 va
= dest_gpr(ctx
, ra
);
1545 /* It's worth special-casing immediate loads. */
1547 tcg_gen_movi_i64(va
, disp16
);
1549 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1556 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1560 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1565 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1570 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1575 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1579 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1583 vc
= dest_gpr(ctx
, rc
);
1584 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1588 /* Special case ADDL as SEXTL. */
1589 tcg_gen_ext32s_i64(vc
, vb
);
1593 /* Special case SUBQ as NEGQ. */
1594 tcg_gen_neg_i64(vc
, vb
);
1599 va
= load_gpr(ctx
, ra
);
1603 tcg_gen_add_i64(vc
, va
, vb
);
1604 tcg_gen_ext32s_i64(vc
, vc
);
1608 tmp
= tcg_temp_new();
1609 tcg_gen_shli_i64(tmp
, va
, 2);
1610 tcg_gen_add_i64(tmp
, tmp
, vb
);
1611 tcg_gen_ext32s_i64(vc
, tmp
);
1616 tcg_gen_sub_i64(vc
, va
, vb
);
1617 tcg_gen_ext32s_i64(vc
, vc
);
1621 tmp
= tcg_temp_new();
1622 tcg_gen_shli_i64(tmp
, va
, 2);
1623 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1624 tcg_gen_ext32s_i64(vc
, tmp
);
1630 /* Special case 0 >= X as X == 0. */
1631 gen_helper_cmpbe0(vc
, vb
);
1633 gen_helper_cmpbge(vc
, va
, vb
);
1638 tmp
= tcg_temp_new();
1639 tcg_gen_shli_i64(tmp
, va
, 3);
1640 tcg_gen_add_i64(tmp
, tmp
, vb
);
1641 tcg_gen_ext32s_i64(vc
, tmp
);
1646 tmp
= tcg_temp_new();
1647 tcg_gen_shli_i64(tmp
, va
, 3);
1648 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1649 tcg_gen_ext32s_i64(vc
, tmp
);
1654 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1658 tcg_gen_add_i64(vc
, va
, vb
);
1662 tmp
= tcg_temp_new();
1663 tcg_gen_shli_i64(tmp
, va
, 2);
1664 tcg_gen_add_i64(vc
, tmp
, vb
);
1669 tcg_gen_sub_i64(vc
, va
, vb
);
1673 tmp
= tcg_temp_new();
1674 tcg_gen_shli_i64(tmp
, va
, 2);
1675 tcg_gen_sub_i64(vc
, tmp
, vb
);
1680 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1684 tmp
= tcg_temp_new();
1685 tcg_gen_shli_i64(tmp
, va
, 3);
1686 tcg_gen_add_i64(vc
, tmp
, vb
);
1691 tmp
= tcg_temp_new();
1692 tcg_gen_shli_i64(tmp
, va
, 3);
1693 tcg_gen_sub_i64(vc
, tmp
, vb
);
1698 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1702 tmp
= tcg_temp_new();
1703 tcg_gen_ext32s_i64(tmp
, va
);
1704 tcg_gen_ext32s_i64(vc
, vb
);
1705 tcg_gen_add_i64(tmp
, tmp
, vc
);
1706 tcg_gen_ext32s_i64(vc
, tmp
);
1707 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1712 tmp
= tcg_temp_new();
1713 tcg_gen_ext32s_i64(tmp
, va
);
1714 tcg_gen_ext32s_i64(vc
, vb
);
1715 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1716 tcg_gen_ext32s_i64(vc
, tmp
);
1717 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1722 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1726 tmp
= tcg_temp_new();
1727 tmp2
= tcg_temp_new();
1728 tcg_gen_eqv_i64(tmp
, va
, vb
);
1729 tcg_gen_mov_i64(tmp2
, va
);
1730 tcg_gen_add_i64(vc
, va
, vb
);
1731 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1732 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1733 tcg_gen_shri_i64(tmp
, tmp
, 63);
1734 tcg_gen_movi_i64(tmp2
, 0);
1735 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1737 tcg_temp_free(tmp2
);
1741 tmp
= tcg_temp_new();
1742 tmp2
= tcg_temp_new();
1743 tcg_gen_xor_i64(tmp
, va
, vb
);
1744 tcg_gen_mov_i64(tmp2
, va
);
1745 tcg_gen_sub_i64(vc
, va
, vb
);
1746 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1747 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1748 tcg_gen_shri_i64(tmp
, tmp
, 63);
1749 tcg_gen_movi_i64(tmp2
, 0);
1750 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1752 tcg_temp_free(tmp2
);
1756 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1766 /* Special case BIS as NOP. */
1770 /* Special case BIS as MOV. */
1771 vc
= dest_gpr(ctx
, rc
);
1773 tcg_gen_movi_i64(vc
, lit
);
1775 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1781 vc
= dest_gpr(ctx
, rc
);
1782 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1784 if (fn7
== 0x28 && ra
== 31) {
1785 /* Special case ORNOT as NOT. */
1786 tcg_gen_not_i64(vc
, vb
);
1790 va
= load_gpr(ctx
, ra
);
1794 tcg_gen_and_i64(vc
, va
, vb
);
1798 tcg_gen_andc_i64(vc
, va
, vb
);
1802 tmp
= tcg_temp_new();
1803 tcg_gen_andi_i64(tmp
, va
, 1);
1804 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1805 vb
, load_gpr(ctx
, rc
));
1810 tmp
= tcg_temp_new();
1811 tcg_gen_andi_i64(tmp
, va
, 1);
1812 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1813 vb
, load_gpr(ctx
, rc
));
1818 tcg_gen_or_i64(vc
, va
, vb
);
1822 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1823 vb
, load_gpr(ctx
, rc
));
1827 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1828 vb
, load_gpr(ctx
, rc
));
1832 tcg_gen_orc_i64(vc
, va
, vb
);
1836 tcg_gen_xor_i64(vc
, va
, vb
);
1840 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1841 vb
, load_gpr(ctx
, rc
));
1845 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1846 vb
, load_gpr(ctx
, rc
));
1850 tcg_gen_eqv_i64(vc
, va
, vb
);
1855 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1859 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1860 vb
, load_gpr(ctx
, rc
));
1864 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1865 vb
, load_gpr(ctx
, rc
));
1870 tcg_gen_movi_i64(vc
, ctx
->implver
);
1878 vc
= dest_gpr(ctx
, rc
);
1879 va
= load_gpr(ctx
, ra
);
1883 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1887 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1891 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1895 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1899 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1903 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1907 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1911 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1915 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1920 gen_zapnoti(vc
, va
, ~lit
);
1922 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1928 gen_zapnoti(vc
, va
, lit
);
1930 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1935 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1940 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1942 tmp
= tcg_temp_new();
1943 vb
= load_gpr(ctx
, rb
);
1944 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1945 tcg_gen_shr_i64(vc
, va
, tmp
);
1951 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1956 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1958 tmp
= tcg_temp_new();
1959 vb
= load_gpr(ctx
, rb
);
1960 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1961 tcg_gen_shl_i64(vc
, va
, tmp
);
1967 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1972 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1974 tmp
= tcg_temp_new();
1975 vb
= load_gpr(ctx
, rb
);
1976 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1977 tcg_gen_sar_i64(vc
, va
, tmp
);
1983 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1987 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1991 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1995 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1999 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
2003 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
2007 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2011 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2015 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2023 vc
= dest_gpr(ctx
, rc
);
2024 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2025 va
= load_gpr(ctx
, ra
);
2029 tcg_gen_mul_i64(vc
, va
, vb
);
2030 tcg_gen_ext32s_i64(vc
, vc
);
2034 tcg_gen_mul_i64(vc
, va
, vb
);
2038 tmp
= tcg_temp_new();
2039 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
2044 tmp
= tcg_temp_new();
2045 tcg_gen_ext32s_i64(tmp
, va
);
2046 tcg_gen_ext32s_i64(vc
, vb
);
2047 tcg_gen_mul_i64(tmp
, tmp
, vc
);
2048 tcg_gen_ext32s_i64(vc
, tmp
);
2049 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2054 tmp
= tcg_temp_new();
2055 tmp2
= tcg_temp_new();
2056 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2057 tcg_gen_sari_i64(tmp2
, vc
, 63);
2058 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2060 tcg_temp_free(tmp2
);
2069 vc
= dest_fpr(ctx
, rc
);
2070 switch (fpfn
) { /* fn11 & 0x3F */
2074 t32
= tcg_temp_new_i32();
2075 va
= load_gpr(ctx
, ra
);
2076 tcg_gen_extrl_i64_i32(t32
, va
);
2077 gen_helper_memory_to_s(vc
, t32
);
2078 tcg_temp_free_i32(t32
);
2083 vb
= load_fpr(ctx
, rb
);
2084 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2089 gen_sqrts(ctx
, rb
, rc
, fn11
);
2094 t32
= tcg_temp_new_i32();
2095 va
= load_gpr(ctx
, ra
);
2096 tcg_gen_extrl_i64_i32(t32
, va
);
2097 gen_helper_memory_to_f(vc
, t32
);
2098 tcg_temp_free_i32(t32
);
2103 va
= load_gpr(ctx
, ra
);
2104 tcg_gen_mov_i64(vc
, va
);
2109 vb
= load_fpr(ctx
, rb
);
2110 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2115 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2123 /* VAX floating point */
2124 /* XXX: rounding mode and trap are ignored (!) */
2125 vc
= dest_fpr(ctx
, rc
);
2126 vb
= load_fpr(ctx
, rb
);
2127 va
= load_fpr(ctx
, ra
);
2128 switch (fpfn
) { /* fn11 & 0x3F */
2131 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2135 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2139 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2143 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2151 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2155 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2159 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2163 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2167 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2171 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2175 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2180 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2189 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2194 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2199 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2207 /* IEEE floating-point */
2208 switch (fpfn
) { /* fn11 & 0x3F */
2211 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2215 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2219 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2223 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2227 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2231 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2235 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2239 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2243 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2247 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2251 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2255 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2259 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2261 gen_cvtst(ctx
, rb
, rc
, fn11
);
2264 gen_cvtts(ctx
, rb
, rc
, fn11
);
2270 gen_cvttq(ctx
, rb
, rc
, fn11
);
2275 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2280 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2292 vc
= dest_fpr(ctx
, rc
);
2293 vb
= load_fpr(ctx
, rb
);
2299 /* Special case CPYS as FNOP. */
2301 vc
= dest_fpr(ctx
, rc
);
2302 va
= load_fpr(ctx
, ra
);
2304 /* Special case CPYS as FMOV. */
2305 tcg_gen_mov_i64(vc
, va
);
2307 vb
= load_fpr(ctx
, rb
);
2308 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2314 vc
= dest_fpr(ctx
, rc
);
2315 vb
= load_fpr(ctx
, rb
);
2316 va
= load_fpr(ctx
, ra
);
2317 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2321 vc
= dest_fpr(ctx
, rc
);
2322 vb
= load_fpr(ctx
, rb
);
2323 va
= load_fpr(ctx
, ra
);
2324 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2328 va
= load_fpr(ctx
, ra
);
2329 gen_helper_store_fpcr(cpu_env
, va
);
2330 if (ctx
->tb_rm
== QUAL_RM_D
) {
2331 /* Re-do the copy of the rounding mode to fp_status
2332 the next time we use dynamic rounding. */
2338 va
= dest_fpr(ctx
, ra
);
2339 gen_helper_load_fpcr(va
, cpu_env
);
2343 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2347 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2351 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2355 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2359 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2363 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2365 case 0x030: /* CVTQL */
2366 case 0x130: /* CVTQL/V */
2367 case 0x530: /* CVTQL/SV */
2369 vc
= dest_fpr(ctx
, rc
);
2370 vb
= load_fpr(ctx
, rb
);
2371 gen_helper_cvtql(vc
, cpu_env
, vb
);
2372 gen_fp_exc_raise(rc
, fn11
);
2380 switch ((uint16_t)disp16
) {
2391 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2395 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2407 va
= dest_gpr(ctx
, ra
);
2408 if (ctx
->base
.tb
->cflags
& CF_USE_ICOUNT
) {
2410 gen_helper_load_pcc(va
, cpu_env
);
2412 ret
= DISAS_PC_STALE
;
2414 gen_helper_load_pcc(va
, cpu_env
);
2442 /* HW_MFPR (PALcode) */
2443 #ifndef CONFIG_USER_ONLY
2444 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2445 va
= dest_gpr(ctx
, ra
);
2446 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2453 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2454 prediction stack action, which of course we don't implement. */
2455 vb
= load_gpr(ctx
, rb
);
2456 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2458 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2460 ret
= DISAS_PC_UPDATED
;
2464 /* HW_LD (PALcode) */
2465 #ifndef CONFIG_USER_ONLY
2466 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2468 TCGv addr
= tcg_temp_new();
2469 vb
= load_gpr(ctx
, rb
);
2470 va
= dest_gpr(ctx
, ra
);
2472 tcg_gen_addi_i64(addr
, vb
, disp12
);
2473 switch ((insn
>> 12) & 0xF) {
2475 /* Longword physical access (hw_ldl/p) */
2476 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2479 /* Quadword physical access (hw_ldq/p) */
2480 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2483 /* Longword physical access with lock (hw_ldl_l/p) */
2484 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2487 /* Quadword physical access with lock (hw_ldq_l/p) */
2488 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2491 /* Longword virtual PTE fetch (hw_ldl/v) */
2494 /* Quadword virtual PTE fetch (hw_ldq/v) */
2504 /* Longword virtual access (hw_ldl) */
2507 /* Quadword virtual access (hw_ldq) */
2510 /* Longword virtual access with protection check (hw_ldl/w) */
2511 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2514 /* Quadword virtual access with protection check (hw_ldq/w) */
2515 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2518 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2521 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2524 /* Longword virtual access with alternate access mode and
2525 protection checks (hw_ldl/wa) */
2526 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2529 /* Quadword virtual access with alternate access mode and
2530 protection checks (hw_ldq/wa) */
2531 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2534 tcg_temp_free(addr
);
2542 vc
= dest_gpr(ctx
, rc
);
2547 va
= load_fpr(ctx
, ra
);
2548 tcg_gen_mov_i64(vc
, va
);
2550 } else if (fn7
== 0x78) {
2554 t32
= tcg_temp_new_i32();
2555 va
= load_fpr(ctx
, ra
);
2556 gen_helper_s_to_memory(t32
, va
);
2557 tcg_gen_ext_i32_i64(vc
, t32
);
2558 tcg_temp_free_i32(t32
);
2562 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2568 tcg_gen_ext8s_i64(vc
, vb
);
2574 tcg_gen_ext16s_i64(vc
, vb
);
2581 tcg_gen_ctpop_i64(vc
, vb
);
2587 va
= load_gpr(ctx
, ra
);
2588 gen_helper_perr(vc
, va
, vb
);
2595 tcg_gen_clzi_i64(vc
, vb
, 64);
2602 tcg_gen_ctzi_i64(vc
, vb
, 64);
2609 gen_helper_unpkbw(vc
, vb
);
2616 gen_helper_unpkbl(vc
, vb
);
2623 gen_helper_pkwb(vc
, vb
);
2630 gen_helper_pklb(vc
, vb
);
2635 va
= load_gpr(ctx
, ra
);
2636 gen_helper_minsb8(vc
, va
, vb
);
2641 va
= load_gpr(ctx
, ra
);
2642 gen_helper_minsw4(vc
, va
, vb
);
2647 va
= load_gpr(ctx
, ra
);
2648 gen_helper_minub8(vc
, va
, vb
);
2653 va
= load_gpr(ctx
, ra
);
2654 gen_helper_minuw4(vc
, va
, vb
);
2659 va
= load_gpr(ctx
, ra
);
2660 gen_helper_maxub8(vc
, va
, vb
);
2665 va
= load_gpr(ctx
, ra
);
2666 gen_helper_maxuw4(vc
, va
, vb
);
2671 va
= load_gpr(ctx
, ra
);
2672 gen_helper_maxsb8(vc
, va
, vb
);
2677 va
= load_gpr(ctx
, ra
);
2678 gen_helper_maxsw4(vc
, va
, vb
);
2686 /* HW_MTPR (PALcode) */
2687 #ifndef CONFIG_USER_ONLY
2688 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2689 vb
= load_gpr(ctx
, rb
);
2690 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2697 /* HW_RET (PALcode) */
2698 #ifndef CONFIG_USER_ONLY
2699 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2701 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2702 address from EXC_ADDR. This turns out to be useful for our
2703 emulation PALcode, so continue to accept it. */
2704 ctx
->lit
= vb
= tcg_temp_new();
2705 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2707 vb
= load_gpr(ctx
, rb
);
2709 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2710 tmp
= tcg_temp_new();
2711 tcg_gen_movi_i64(tmp
, 0);
2712 st_flag_byte(tmp
, ENV_FLAG_RX_SHIFT
);
2713 tcg_gen_andi_i64(tmp
, vb
, 1);
2714 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2716 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2717 /* Allow interrupts to be recognized right away. */
2718 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2725 /* HW_ST (PALcode) */
2726 #ifndef CONFIG_USER_ONLY
2727 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2729 switch ((insn
>> 12) & 0xF) {
2731 /* Longword physical access */
2732 va
= load_gpr(ctx
, ra
);
2733 vb
= load_gpr(ctx
, rb
);
2734 tmp
= tcg_temp_new();
2735 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2736 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2740 /* Quadword physical access */
2741 va
= load_gpr(ctx
, ra
);
2742 vb
= load_gpr(ctx
, rb
);
2743 tmp
= tcg_temp_new();
2744 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2745 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2749 /* Longword physical access with lock */
2750 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2751 MMU_PHYS_IDX
, MO_LESL
);
2754 /* Quadword physical access with lock */
2755 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2756 MMU_PHYS_IDX
, MO_LEQ
);
2759 /* Longword virtual access */
2762 /* Quadword virtual access */
2783 /* Longword virtual access with alternate access mode */
2786 /* Quadword virtual access with alternate access mode */
2802 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2806 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2810 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2814 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2818 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2822 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2826 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2830 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2834 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2838 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2842 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2846 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2850 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2854 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2858 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2859 ctx
->mem_idx
, MO_LESL
);
2863 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2864 ctx
->mem_idx
, MO_LEQ
);
2868 ret
= gen_bdirect(ctx
, ra
, disp21
);
2870 case 0x31: /* FBEQ */
2871 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2873 case 0x32: /* FBLT */
2874 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2876 case 0x33: /* FBLE */
2877 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2881 ret
= gen_bdirect(ctx
, ra
, disp21
);
2883 case 0x35: /* FBNE */
2884 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2886 case 0x36: /* FBGE */
2887 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2889 case 0x37: /* FBGT */
2890 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2894 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2898 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2902 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2906 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2910 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2914 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2918 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2922 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2925 ret
= gen_invalid(ctx
);
2932 static int alpha_tr_init_disas_context(DisasContextBase
*dcbase
,
2933 CPUState
*cpu
, int max_insns
)
2935 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2936 CPUAlphaState
*env
= cpu
->env_ptr
;
2937 int64_t bound
, mask
;
2939 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2940 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2941 ctx
->implver
= env
->implver
;
2942 ctx
->amask
= env
->amask
;
2944 #ifdef CONFIG_USER_ONLY
2945 ctx
->ir
= cpu_std_ir
;
2947 ctx
->palbr
= env
->palbr
;
2948 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2951 /* ??? Every TB begins with unset rounding mode, to be initialized on
2952 the first fp insn of the TB. Alternately we could define a proper
2953 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2954 to reset the FP_STATUS to that default at the end of any TB that
2955 changes the default. We could even (gasp) dynamiclly figure out
2956 what default would be most efficient given the running program. */
2958 /* Similarly for flush-to-zero. */
2961 TCGV_UNUSED_I64(ctx
->zero
);
2962 TCGV_UNUSED_I64(ctx
->sink
);
2963 TCGV_UNUSED_I64(ctx
->lit
);
2965 /* Bound the number of insns to execute to those left on the page. */
2966 if (in_superpage(ctx
, ctx
->base
.pc_first
)) {
2969 mask
= TARGET_PAGE_MASK
;
2971 bound
= -(ctx
->base
.pc_first
| mask
) / 4;
2973 return MIN(max_insns
, bound
);
2976 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2980 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2982 tcg_gen_insn_start(dcbase
->pc_next
);
2985 static bool alpha_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
2986 const CPUBreakpoint
*bp
)
2988 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2990 ctx
->base
.is_jmp
= gen_excp(ctx
, EXCP_DEBUG
, 0);
2992 /* The address covered by the breakpoint must be included in
2993 [tb->pc, tb->pc + tb->size) in order to for it to be
2994 properly cleared -- thus we increment the PC here so that
2995 the logic setting tb->size below does the right thing. */
2996 ctx
->base
.pc_next
+= 4;
3000 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
3002 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
3003 CPUAlphaState
*env
= cpu
->env_ptr
;
3004 uint32_t insn
= cpu_ldl_code(env
, ctx
->base
.pc_next
);
3006 ctx
->base
.pc_next
+= 4;
3007 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
3009 free_context_temps(ctx
);
3010 translator_loop_temp_check(&ctx
->base
);
3013 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
3015 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
3017 switch (ctx
->base
.is_jmp
) {
3018 case DISAS_NORETURN
:
3020 case DISAS_TOO_MANY
:
3021 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
3023 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3024 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
3027 case DISAS_PC_STALE
:
3028 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3030 case DISAS_PC_UPDATED
:
3031 if (!use_exit_tb(ctx
)) {
3032 tcg_gen_lookup_and_goto_ptr(cpu_pc
);
3036 case DISAS_PC_UPDATED_NOCHAIN
:
3037 if (ctx
->base
.singlestep_enabled
) {
3038 gen_excp_1(EXCP_DEBUG
, 0);
3044 g_assert_not_reached();
3048 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
3050 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
3051 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
, 1);
3054 static const TranslatorOps alpha_tr_ops
= {
3055 .init_disas_context
= alpha_tr_init_disas_context
,
3056 .tb_start
= alpha_tr_tb_start
,
3057 .insn_start
= alpha_tr_insn_start
,
3058 .breakpoint_check
= alpha_tr_breakpoint_check
,
3059 .translate_insn
= alpha_tr_translate_insn
,
3060 .tb_stop
= alpha_tr_tb_stop
,
3061 .disas_log
= alpha_tr_disas_log
,
3064 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
)
3067 translator_loop(&alpha_tr_ops
, &dc
.base
, cpu
, tb
);
3070 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,