2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DISAS(...) do { } while (0)
45 typedef struct DisasContext DisasContext
;
47 struct TranslationBlock
*tb
;
49 #ifndef CONFIG_USER_ONLY
54 /* Current rounding mode for this TB. */
56 /* Current flush-to-zero setting for this TB. */
59 /* implver value for this CPU. */
62 /* The set of registers active in the current context. */
65 /* Temporaries for $31 and $f31 as source and destination. */
68 /* Temporary for immediate constants. */
71 bool singlestep_enabled
;
74 /* Return values from translate_one, indicating the state of the TB.
75 Note that zero indicates that we are not exiting the TB. */
80 /* We have emitted one or more goto_tb. No fixup required. */
83 /* We are not using a goto_tb (for whatever reason), but have updated
84 the PC (for whatever reason), so there's no need to do it again on
88 /* We are exiting the TB, but have neither emitted a goto_tb, nor
89 updated the PC for the next instruction to be executed. */
92 /* We are ending the TB with a noreturn function call, e.g. longjmp.
93 No following code will be executed. */
97 /* global register indexes */
98 static TCGv_env cpu_env
;
99 static TCGv cpu_std_ir
[31];
100 static TCGv cpu_fir
[31];
102 static TCGv cpu_lock_addr
;
103 static TCGv cpu_lock_value
;
105 #ifndef CONFIG_USER_ONLY
106 static TCGv cpu_pal_ir
[31];
109 #include "exec/gen-icount.h"
111 void alpha_translate_init(void)
113 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
115 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
116 static const GlobalVar vars
[] = {
124 /* Use the symbolic register names that match the disassembler. */
125 static const char greg_names
[31][4] = {
126 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
127 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
128 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
129 "t10", "t11", "ra", "t12", "at", "gp", "sp"
131 static const char freg_names
[31][4] = {
132 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
133 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
134 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
135 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
137 #ifndef CONFIG_USER_ONLY
138 static const char shadow_names
[8][8] = {
139 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
140 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
144 static bool done_init
= 0;
152 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
153 tcg_ctx
.tcg_env
= cpu_env
;
155 for (i
= 0; i
< 31; i
++) {
156 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
157 offsetof(CPUAlphaState
, ir
[i
]),
161 for (i
= 0; i
< 31; i
++) {
162 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
163 offsetof(CPUAlphaState
, fir
[i
]),
167 #ifndef CONFIG_USER_ONLY
168 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
169 for (i
= 0; i
< 8; i
++) {
170 int r
= (i
== 7 ? 25 : i
+ 8);
171 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
172 offsetof(CPUAlphaState
,
178 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
179 const GlobalVar
*v
= &vars
[i
];
180 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
184 static TCGv
load_zero(DisasContext
*ctx
)
186 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
187 ctx
->zero
= tcg_const_i64(0);
192 static TCGv
dest_sink(DisasContext
*ctx
)
194 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
195 ctx
->sink
= tcg_temp_new();
200 static void free_context_temps(DisasContext
*ctx
)
202 if (!TCGV_IS_UNUSED_I64(ctx
->sink
)) {
203 tcg_gen_discard_i64(ctx
->sink
);
204 tcg_temp_free(ctx
->sink
);
205 TCGV_UNUSED_I64(ctx
->sink
);
207 if (!TCGV_IS_UNUSED_I64(ctx
->zero
)) {
208 tcg_temp_free(ctx
->zero
);
209 TCGV_UNUSED_I64(ctx
->zero
);
211 if (!TCGV_IS_UNUSED_I64(ctx
->lit
)) {
212 tcg_temp_free(ctx
->lit
);
213 TCGV_UNUSED_I64(ctx
->lit
);
217 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
219 if (likely(reg
< 31)) {
222 return load_zero(ctx
);
226 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
227 uint8_t lit
, bool islit
)
230 ctx
->lit
= tcg_const_i64(lit
);
232 } else if (likely(reg
< 31)) {
235 return load_zero(ctx
);
239 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
241 if (likely(reg
< 31)) {
244 return dest_sink(ctx
);
248 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
250 if (likely(reg
< 31)) {
253 return load_zero(ctx
);
257 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
259 if (likely(reg
< 31)) {
262 return dest_sink(ctx
);
266 static void gen_excp_1(int exception
, int error_code
)
270 tmp1
= tcg_const_i32(exception
);
271 tmp2
= tcg_const_i32(error_code
);
272 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
273 tcg_temp_free_i32(tmp2
);
274 tcg_temp_free_i32(tmp1
);
277 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
279 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
280 gen_excp_1(exception
, error_code
);
281 return EXIT_NORETURN
;
284 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
286 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
289 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
291 TCGv_i32 tmp32
= tcg_temp_new_i32();
292 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
293 gen_helper_memory_to_f(t0
, tmp32
);
294 tcg_temp_free_i32(tmp32
);
297 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
299 TCGv tmp
= tcg_temp_new();
300 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
301 gen_helper_memory_to_g(t0
, tmp
);
305 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
307 TCGv_i32 tmp32
= tcg_temp_new_i32();
308 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
309 gen_helper_memory_to_s(t0
, tmp32
);
310 tcg_temp_free_i32(tmp32
);
313 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
315 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
316 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
317 tcg_gen_mov_i64(cpu_lock_value
, t0
);
320 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
322 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
323 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
324 tcg_gen_mov_i64(cpu_lock_value
, t0
);
327 static inline void gen_load_mem(DisasContext
*ctx
,
328 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
330 int ra
, int rb
, int32_t disp16
, bool fp
,
335 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
336 prefetches, which we can treat as nops. No worries about
337 missed exceptions here. */
338 if (unlikely(ra
== 31)) {
342 tmp
= tcg_temp_new();
343 addr
= load_gpr(ctx
, rb
);
346 tcg_gen_addi_i64(tmp
, addr
, disp16
);
350 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
354 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
355 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
360 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
362 TCGv_i32 tmp32
= tcg_temp_new_i32();
363 gen_helper_f_to_memory(tmp32
, t0
);
364 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
365 tcg_temp_free_i32(tmp32
);
368 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
370 TCGv tmp
= tcg_temp_new();
371 gen_helper_g_to_memory(tmp
, t0
);
372 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
376 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
378 TCGv_i32 tmp32
= tcg_temp_new_i32();
379 gen_helper_s_to_memory(tmp32
, t0
);
380 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
381 tcg_temp_free_i32(tmp32
);
384 static inline void gen_store_mem(DisasContext
*ctx
,
385 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
387 int ra
, int rb
, int32_t disp16
, bool fp
,
392 tmp
= tcg_temp_new();
393 addr
= load_gpr(ctx
, rb
);
396 tcg_gen_addi_i64(tmp
, addr
, disp16
);
400 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
404 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
405 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
410 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
411 int32_t disp16
, int mem_idx
,
414 TCGLabel
*lab_fail
, *lab_done
;
417 addr
= tcg_temp_new_i64();
418 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
419 free_context_temps(ctx
);
421 lab_fail
= gen_new_label();
422 lab_done
= gen_new_label();
423 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
424 tcg_temp_free_i64(addr
);
426 val
= tcg_temp_new_i64();
427 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
428 load_gpr(ctx
, ra
), mem_idx
, op
);
429 free_context_temps(ctx
);
432 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
434 tcg_temp_free_i64(val
);
435 tcg_gen_br(lab_done
);
437 gen_set_label(lab_fail
);
439 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
442 gen_set_label(lab_done
);
443 tcg_gen_movi_i64(cpu_lock_addr
, -1);
447 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
449 #ifndef CONFIG_USER_ONLY
450 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
451 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
452 && ((addr
>> 41) & 3) == 2);
458 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
460 /* Suppress goto_tb in the case of single-steping and IO. */
461 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
462 || ctx
->singlestep_enabled
|| singlestep
) {
465 #ifndef CONFIG_USER_ONLY
466 /* If the destination is in the superpage, the page perms can't change. */
467 if (in_superpage(ctx
, dest
)) {
470 /* Check for the dest on the same page as the start of the TB. */
471 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
477 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
479 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
482 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
485 /* Notice branch-to-next; used to initialize RA with the PC. */
488 } else if (use_goto_tb(ctx
, dest
)) {
490 tcg_gen_movi_i64(cpu_pc
, dest
);
491 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
494 tcg_gen_movi_i64(cpu_pc
, dest
);
495 return EXIT_PC_UPDATED
;
499 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
500 TCGv cmp
, int32_t disp
)
502 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
503 TCGLabel
*lab_true
= gen_new_label();
505 if (use_goto_tb(ctx
, dest
)) {
506 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
509 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
510 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
512 gen_set_label(lab_true
);
514 tcg_gen_movi_i64(cpu_pc
, dest
);
515 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
519 TCGv_i64 z
= tcg_const_i64(0);
520 TCGv_i64 d
= tcg_const_i64(dest
);
521 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
523 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
525 tcg_temp_free_i64(z
);
526 tcg_temp_free_i64(d
);
527 tcg_temp_free_i64(p
);
528 return EXIT_PC_UPDATED
;
532 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
533 int32_t disp
, int mask
)
538 cmp_tmp
= tcg_temp_new();
539 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
541 cmp_tmp
= load_gpr(ctx
, ra
);
544 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
547 /* Fold -0.0 for comparison with COND. */
549 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
551 uint64_t mzero
= 1ull << 63;
556 /* For <= or >, the -0.0 value directly compares the way we want. */
557 tcg_gen_mov_i64(dest
, src
);
562 /* For == or !=, we can simply mask off the sign bit and compare. */
563 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
568 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
569 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
570 tcg_gen_neg_i64(dest
, dest
);
571 tcg_gen_and_i64(dest
, dest
, src
);
579 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
582 TCGv cmp_tmp
= tcg_temp_new();
583 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
584 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
587 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
592 vb
= load_fpr(ctx
, rb
);
594 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
596 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
601 #define QUAL_RM_N 0x080 /* Round mode nearest even */
602 #define QUAL_RM_C 0x000 /* Round mode chopped */
603 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
604 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
605 #define QUAL_RM_MASK 0x0c0
607 #define QUAL_U 0x100 /* Underflow enable (fp output) */
608 #define QUAL_V 0x100 /* Overflow enable (int output) */
609 #define QUAL_S 0x400 /* Software completion enable */
610 #define QUAL_I 0x200 /* Inexact detection enable */
612 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
616 fn11
&= QUAL_RM_MASK
;
617 if (fn11
== ctx
->tb_rm
) {
622 tmp
= tcg_temp_new_i32();
625 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
628 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
631 tcg_gen_movi_i32(tmp
, float_round_down
);
634 tcg_gen_ld8u_i32(tmp
, cpu_env
,
635 offsetof(CPUAlphaState
, fpcr_dyn_round
));
639 #if defined(CONFIG_SOFTFLOAT_INLINE)
640 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
641 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
642 sets the one field. */
643 tcg_gen_st8_i32(tmp
, cpu_env
,
644 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
646 gen_helper_setroundmode(tmp
);
649 tcg_temp_free_i32(tmp
);
652 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
657 if (fn11
== ctx
->tb_ftz
) {
662 tmp
= tcg_temp_new_i32();
664 /* Underflow is enabled, use the FPCR setting. */
665 tcg_gen_ld8u_i32(tmp
, cpu_env
,
666 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
668 /* Underflow is disabled, force flush-to-zero. */
669 tcg_gen_movi_i32(tmp
, 1);
672 #if defined(CONFIG_SOFTFLOAT_INLINE)
673 tcg_gen_st8_i32(tmp
, cpu_env
,
674 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
676 gen_helper_setflushzero(tmp
);
679 tcg_temp_free_i32(tmp
);
682 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
686 if (unlikely(reg
== 31)) {
687 val
= load_zero(ctx
);
690 if ((fn11
& QUAL_S
) == 0) {
692 gen_helper_ieee_input_cmp(cpu_env
, val
);
694 gen_helper_ieee_input(cpu_env
, val
);
697 #ifndef CONFIG_USER_ONLY
698 /* In system mode, raise exceptions for denormals like real
699 hardware. In user mode, proceed as if the OS completion
700 handler is handling the denormal as per spec. */
701 gen_helper_ieee_input_s(cpu_env
, val
);
708 static void gen_fp_exc_raise(int rc
, int fn11
)
710 /* ??? We ought to be able to do something with imprecise exceptions.
711 E.g. notice we're still in the trap shadow of something within the
712 TB and do not generate the code to signal the exception; end the TB
713 when an exception is forced to arrive, either by consumption of a
714 register value or TRAPB or EXCB. */
718 if (!(fn11
& QUAL_U
)) {
719 /* Note that QUAL_U == QUAL_V, so ignore either. */
720 ignore
|= FPCR_UNF
| FPCR_IOV
;
722 if (!(fn11
& QUAL_I
)) {
725 ign
= tcg_const_i32(ignore
);
727 /* ??? Pass in the regno of the destination so that the helper can
728 set EXC_MASK, which contains a bitmask of destination registers
729 that have caused arithmetic traps. A simple userspace emulation
730 does not require this. We do need it for a guest kernel's entArith,
731 or if we were to do something clever with imprecise exceptions. */
732 reg
= tcg_const_i32(rc
+ 32);
734 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
736 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
739 tcg_temp_free_i32(reg
);
740 tcg_temp_free_i32(ign
);
743 static void gen_cvtlq(TCGv vc
, TCGv vb
)
745 TCGv tmp
= tcg_temp_new();
747 /* The arithmetic right shift here, plus the sign-extended mask below
748 yields a sign-extended result without an explicit ext32s_i64. */
749 tcg_gen_sari_i64(tmp
, vb
, 32);
750 tcg_gen_shri_i64(vc
, vb
, 29);
751 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
752 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
753 tcg_gen_or_i64(vc
, vc
, tmp
);
758 static void gen_ieee_arith2(DisasContext
*ctx
,
759 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
760 int rb
, int rc
, int fn11
)
764 gen_qual_roundmode(ctx
, fn11
);
765 gen_qual_flushzero(ctx
, fn11
);
767 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
768 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
770 gen_fp_exc_raise(rc
, fn11
);
773 #define IEEE_ARITH2(name) \
774 static inline void glue(gen_, name)(DisasContext *ctx, \
775 int rb, int rc, int fn11) \
777 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
784 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
788 /* No need to set flushzero, since we have an integer output. */
789 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
790 vc
= dest_fpr(ctx
, rc
);
792 /* Almost all integer conversions use cropped rounding;
793 special case that. */
794 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
795 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
797 gen_qual_roundmode(ctx
, fn11
);
798 gen_helper_cvttq(vc
, cpu_env
, vb
);
800 gen_fp_exc_raise(rc
, fn11
);
803 static void gen_ieee_intcvt(DisasContext
*ctx
,
804 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
805 int rb
, int rc
, int fn11
)
809 gen_qual_roundmode(ctx
, fn11
);
810 vb
= load_fpr(ctx
, rb
);
811 vc
= dest_fpr(ctx
, rc
);
813 /* The only exception that can be raised by integer conversion
814 is inexact. Thus we only need to worry about exceptions when
815 inexact handling is requested. */
817 helper(vc
, cpu_env
, vb
);
818 gen_fp_exc_raise(rc
, fn11
);
820 helper(vc
, cpu_env
, vb
);
824 #define IEEE_INTCVT(name) \
825 static inline void glue(gen_, name)(DisasContext *ctx, \
826 int rb, int rc, int fn11) \
828 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
833 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
835 TCGv vmask
= tcg_const_i64(mask
);
836 TCGv tmp
= tcg_temp_new_i64();
839 tcg_gen_andc_i64(tmp
, vmask
, va
);
841 tcg_gen_and_i64(tmp
, va
, vmask
);
844 tcg_gen_andc_i64(vc
, vb
, vmask
);
845 tcg_gen_or_i64(vc
, vc
, tmp
);
847 tcg_temp_free(vmask
);
851 static void gen_ieee_arith3(DisasContext
*ctx
,
852 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
853 int ra
, int rb
, int rc
, int fn11
)
857 gen_qual_roundmode(ctx
, fn11
);
858 gen_qual_flushzero(ctx
, fn11
);
860 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
861 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
862 vc
= dest_fpr(ctx
, rc
);
863 helper(vc
, cpu_env
, va
, vb
);
865 gen_fp_exc_raise(rc
, fn11
);
868 #define IEEE_ARITH3(name) \
869 static inline void glue(gen_, name)(DisasContext *ctx, \
870 int ra, int rb, int rc, int fn11) \
872 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
883 static void gen_ieee_compare(DisasContext
*ctx
,
884 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
885 int ra
, int rb
, int rc
, int fn11
)
889 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
890 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
891 vc
= dest_fpr(ctx
, rc
);
892 helper(vc
, cpu_env
, va
, vb
);
894 gen_fp_exc_raise(rc
, fn11
);
897 #define IEEE_CMP3(name) \
898 static inline void glue(gen_, name)(DisasContext *ctx, \
899 int ra, int rb, int rc, int fn11) \
901 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
908 static inline uint64_t zapnot_mask(uint8_t lit
)
913 for (i
= 0; i
< 8; ++i
) {
914 if ((lit
>> i
) & 1) {
915 mask
|= 0xffull
<< (i
* 8);
921 /* Implement zapnot with an immediate operand, which expands to some
922 form of immediate AND. This is a basic building block in the
923 definition of many of the other byte manipulation instructions. */
924 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
928 tcg_gen_movi_i64(dest
, 0);
931 tcg_gen_ext8u_i64(dest
, src
);
934 tcg_gen_ext16u_i64(dest
, src
);
937 tcg_gen_ext32u_i64(dest
, src
);
940 tcg_gen_mov_i64(dest
, src
);
943 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
948 /* EXTWH, EXTLH, EXTQH */
949 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
950 uint8_t lit
, uint8_t byte_mask
)
953 int pos
= (64 - lit
* 8) & 0x3f;
954 int len
= cto32(byte_mask
) * 8;
956 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
958 tcg_gen_movi_i64(vc
, 0);
961 TCGv tmp
= tcg_temp_new();
962 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
963 tcg_gen_neg_i64(tmp
, tmp
);
964 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
965 tcg_gen_shl_i64(vc
, va
, tmp
);
968 gen_zapnoti(vc
, vc
, byte_mask
);
971 /* EXTBL, EXTWL, EXTLL, EXTQL */
972 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
973 uint8_t lit
, uint8_t byte_mask
)
976 int pos
= (lit
& 7) * 8;
977 int len
= cto32(byte_mask
) * 8;
978 if (pos
+ len
>= 64) {
981 tcg_gen_extract_i64(vc
, va
, pos
, len
);
983 TCGv tmp
= tcg_temp_new();
984 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
985 tcg_gen_shli_i64(tmp
, tmp
, 3);
986 tcg_gen_shr_i64(vc
, va
, tmp
);
988 gen_zapnoti(vc
, vc
, byte_mask
);
992 /* INSWH, INSLH, INSQH */
993 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
994 uint8_t lit
, uint8_t byte_mask
)
997 int pos
= 64 - (lit
& 7) * 8;
998 int len
= cto32(byte_mask
) * 8;
1000 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
1002 tcg_gen_movi_i64(vc
, 0);
1005 TCGv tmp
= tcg_temp_new();
1006 TCGv shift
= tcg_temp_new();
1008 /* The instruction description has us left-shift the byte mask
1009 and extract bits <15:8> and apply that zap at the end. This
1010 is equivalent to simply performing the zap first and shifting
1012 gen_zapnoti(tmp
, va
, byte_mask
);
1014 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1015 portably by splitting the shift into two parts: shift_count-1 and 1.
1016 Arrange for the -1 by using ones-complement instead of
1017 twos-complement in the negation: ~(B * 8) & 63. */
1019 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1020 tcg_gen_not_i64(shift
, shift
);
1021 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1023 tcg_gen_shr_i64(vc
, tmp
, shift
);
1024 tcg_gen_shri_i64(vc
, vc
, 1);
1025 tcg_temp_free(shift
);
1030 /* INSBL, INSWL, INSLL, INSQL */
1031 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1032 uint8_t lit
, uint8_t byte_mask
)
1035 int pos
= (lit
& 7) * 8;
1036 int len
= cto32(byte_mask
) * 8;
1037 if (pos
+ len
> 64) {
1040 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1042 TCGv tmp
= tcg_temp_new();
1043 TCGv shift
= tcg_temp_new();
1045 /* The instruction description has us left-shift the byte mask
1046 and extract bits <15:8> and apply that zap at the end. This
1047 is equivalent to simply performing the zap first and shifting
1049 gen_zapnoti(tmp
, va
, byte_mask
);
1051 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1052 tcg_gen_shli_i64(shift
, shift
, 3);
1053 tcg_gen_shl_i64(vc
, tmp
, shift
);
1054 tcg_temp_free(shift
);
1059 /* MSKWH, MSKLH, MSKQH */
1060 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1061 uint8_t lit
, uint8_t byte_mask
)
1064 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1066 TCGv shift
= tcg_temp_new();
1067 TCGv mask
= tcg_temp_new();
1069 /* The instruction description is as above, where the byte_mask
1070 is shifted left, and then we extract bits <15:8>. This can be
1071 emulated with a right-shift on the expanded byte mask. This
1072 requires extra care because for an input <2:0> == 0 we need a
1073 shift of 64 bits in order to generate a zero. This is done by
1074 splitting the shift into two parts, the variable shift - 1
1075 followed by a constant 1 shift. The code we expand below is
1076 equivalent to ~(B * 8) & 63. */
1078 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1079 tcg_gen_not_i64(shift
, shift
);
1080 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1081 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1082 tcg_gen_shr_i64(mask
, mask
, shift
);
1083 tcg_gen_shri_i64(mask
, mask
, 1);
1085 tcg_gen_andc_i64(vc
, va
, mask
);
1087 tcg_temp_free(mask
);
1088 tcg_temp_free(shift
);
1092 /* MSKBL, MSKWL, MSKLL, MSKQL */
1093 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1094 uint8_t lit
, uint8_t byte_mask
)
1097 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1099 TCGv shift
= tcg_temp_new();
1100 TCGv mask
= tcg_temp_new();
1102 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1103 tcg_gen_shli_i64(shift
, shift
, 3);
1104 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1105 tcg_gen_shl_i64(mask
, mask
, shift
);
1107 tcg_gen_andc_i64(vc
, va
, mask
);
1109 tcg_temp_free(mask
);
1110 tcg_temp_free(shift
);
1114 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1119 tcg_gen_ld8u_i64(ctx
->ir
[ra
], cpu_env
,
1120 offsetof(CPUAlphaState
, intr_flag
));
1123 tmp
= tcg_const_i32(set
);
1124 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1125 tcg_temp_free_i32(tmp
);
1128 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1130 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1131 to internal cpu registers. */
1133 /* Unprivileged PAL call */
1134 if (palcode
>= 0x80 && palcode
< 0xC0) {
1138 /* No-op inside QEMU. */
1142 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1143 offsetof(CPUAlphaState
, unique
));
1147 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1148 offsetof(CPUAlphaState
, unique
));
1157 #ifndef CONFIG_USER_ONLY
1158 /* Privileged PAL code */
1159 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1163 /* No-op inside QEMU. */
1167 /* No-op inside QEMU. */
1171 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1172 offsetof(CPUAlphaState
, vptptr
));
1176 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1177 offsetof(CPUAlphaState
, sysval
));
1181 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1182 offsetof(CPUAlphaState
, sysval
));
1189 /* Note that we already know we're in kernel mode, so we know
1190 that PS only contains the 3 IPL bits. */
1191 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1192 offsetof(CPUAlphaState
, ps
));
1194 /* But make sure and store only the 3 IPL bits from the user. */
1195 tmp
= tcg_temp_new();
1196 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1197 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1204 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1205 offsetof(CPUAlphaState
, ps
));
1209 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1210 offsetof(CPUAlphaState
, usp
));
1214 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1215 offsetof(CPUAlphaState
, usp
));
1219 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1220 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1230 return gen_invalid(ctx
);
1233 #ifdef CONFIG_USER_ONLY
1234 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1237 TCGv tmp
= tcg_temp_new();
1238 uint64_t exc_addr
= ctx
->pc
;
1239 uint64_t entry
= ctx
->palbr
;
1241 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
1244 tcg_gen_movi_i64(tmp
, 1);
1245 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
1248 tcg_gen_movi_i64(tmp
, exc_addr
);
1249 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1252 entry
+= (palcode
& 0x80
1253 ? 0x2000 + (palcode
- 0x80) * 64
1254 : 0x1000 + palcode
* 64);
1256 /* Since the destination is running in PALmode, we don't really
1257 need the page permissions check. We'll see the existence of
1258 the page when we create the TB, and we'll flush all TBs if
1259 we change the PAL base register. */
1260 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1262 tcg_gen_movi_i64(cpu_pc
, entry
);
1263 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1264 return EXIT_GOTO_TB
;
1266 tcg_gen_movi_i64(cpu_pc
, entry
);
1267 return EXIT_PC_UPDATED
;
1273 #ifndef CONFIG_USER_ONLY
1275 #define PR_BYTE 0x100000
1276 #define PR_LONG 0x200000
1278 static int cpu_pr_data(int pr
)
1281 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1282 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1283 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1284 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1285 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1286 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1287 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1288 case 7: return offsetof(CPUAlphaState
, palbr
);
1289 case 8: return offsetof(CPUAlphaState
, ptbr
);
1290 case 9: return offsetof(CPUAlphaState
, vptptr
);
1291 case 10: return offsetof(CPUAlphaState
, unique
);
1292 case 11: return offsetof(CPUAlphaState
, sysval
);
1293 case 12: return offsetof(CPUAlphaState
, usp
);
1296 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1299 return offsetof(CPUAlphaState
, alarm_expire
);
1304 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1306 void (*helper
)(TCGv
);
1311 /* Accessing the "non-shadow" general registers. */
1312 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1313 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1316 case 250: /* WALLTIME */
1317 helper
= gen_helper_get_walltime
;
1319 case 249: /* VMTIME */
1320 helper
= gen_helper_get_vmtime
;
1326 return EXIT_PC_STALE
;
1333 /* The basic registers are data only, and unknown registers
1334 are read-zero, write-ignore. */
1335 data
= cpu_pr_data(regno
);
1337 tcg_gen_movi_i64(va
, 0);
1338 } else if (data
& PR_BYTE
) {
1339 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1340 } else if (data
& PR_LONG
) {
1341 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1343 tcg_gen_ld_i64(va
, cpu_env
, data
);
1351 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1359 gen_helper_tbia(cpu_env
);
1364 gen_helper_tbis(cpu_env
, vb
);
1369 tmp
= tcg_const_i64(1);
1370 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1371 offsetof(CPUState
, halted
));
1372 return gen_excp(ctx
, EXCP_HLT
, 0);
1376 gen_helper_halt(vb
);
1377 return EXIT_PC_STALE
;
1381 gen_helper_set_alarm(cpu_env
, vb
);
1386 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1387 /* Changing the PAL base register implies un-chaining all of the TBs
1388 that ended with a CALL_PAL. Since the base register usually only
1389 changes during boot, flushing everything works well. */
1390 gen_helper_tb_flush(cpu_env
);
1391 return EXIT_PC_STALE
;
1394 /* Accessing the "non-shadow" general registers. */
1395 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1396 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1400 /* The basic registers are data only, and unknown registers
1401 are read-zero, write-ignore. */
1402 data
= cpu_pr_data(regno
);
1404 if (data
& PR_BYTE
) {
1405 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1406 } else if (data
& PR_LONG
) {
1407 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1409 tcg_gen_st_i64(vb
, cpu_env
, data
);
1417 #endif /* !USER_ONLY*/
1419 #define REQUIRE_NO_LIT \
1426 #define REQUIRE_TB_FLAG(FLAG) \
1428 if ((ctx->tb->flags & (FLAG)) == 0) { \
1433 #define REQUIRE_REG_31(WHICH) \
1435 if (WHICH != 31) { \
1440 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1442 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1444 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1445 bool islit
, real_islit
;
1446 TCGv va
, vb
, vc
, tmp
, tmp2
;
1450 /* Decode all instruction fields */
1451 opc
= extract32(insn
, 26, 6);
1452 ra
= extract32(insn
, 21, 5);
1453 rb
= extract32(insn
, 16, 5);
1454 rc
= extract32(insn
, 0, 5);
1455 real_islit
= islit
= extract32(insn
, 12, 1);
1456 lit
= extract32(insn
, 13, 8);
1458 disp21
= sextract32(insn
, 0, 21);
1459 disp16
= sextract32(insn
, 0, 16);
1460 disp12
= sextract32(insn
, 0, 12);
1462 fn11
= extract32(insn
, 5, 11);
1463 fpfn
= extract32(insn
, 5, 6);
1464 fn7
= extract32(insn
, 5, 7);
1466 if (rb
== 31 && !islit
) {
1475 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1501 disp16
= (uint32_t)disp16
<< 16;
1505 va
= dest_gpr(ctx
, ra
);
1506 /* It's worth special-casing immediate loads. */
1508 tcg_gen_movi_i64(va
, disp16
);
1510 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1516 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1517 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1521 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1526 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1530 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1531 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1535 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1536 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1540 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1544 vc
= dest_gpr(ctx
, rc
);
1545 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1549 /* Special case ADDL as SEXTL. */
1550 tcg_gen_ext32s_i64(vc
, vb
);
1554 /* Special case SUBQ as NEGQ. */
1555 tcg_gen_neg_i64(vc
, vb
);
1560 va
= load_gpr(ctx
, ra
);
1564 tcg_gen_add_i64(vc
, va
, vb
);
1565 tcg_gen_ext32s_i64(vc
, vc
);
1569 tmp
= tcg_temp_new();
1570 tcg_gen_shli_i64(tmp
, va
, 2);
1571 tcg_gen_add_i64(tmp
, tmp
, vb
);
1572 tcg_gen_ext32s_i64(vc
, tmp
);
1577 tcg_gen_sub_i64(vc
, va
, vb
);
1578 tcg_gen_ext32s_i64(vc
, vc
);
1582 tmp
= tcg_temp_new();
1583 tcg_gen_shli_i64(tmp
, va
, 2);
1584 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1585 tcg_gen_ext32s_i64(vc
, tmp
);
1591 /* Special case 0 >= X as X == 0. */
1592 gen_helper_cmpbe0(vc
, vb
);
1594 gen_helper_cmpbge(vc
, va
, vb
);
1599 tmp
= tcg_temp_new();
1600 tcg_gen_shli_i64(tmp
, va
, 3);
1601 tcg_gen_add_i64(tmp
, tmp
, vb
);
1602 tcg_gen_ext32s_i64(vc
, tmp
);
1607 tmp
= tcg_temp_new();
1608 tcg_gen_shli_i64(tmp
, va
, 3);
1609 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1610 tcg_gen_ext32s_i64(vc
, tmp
);
1615 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1619 tcg_gen_add_i64(vc
, va
, vb
);
1623 tmp
= tcg_temp_new();
1624 tcg_gen_shli_i64(tmp
, va
, 2);
1625 tcg_gen_add_i64(vc
, tmp
, vb
);
1630 tcg_gen_sub_i64(vc
, va
, vb
);
1634 tmp
= tcg_temp_new();
1635 tcg_gen_shli_i64(tmp
, va
, 2);
1636 tcg_gen_sub_i64(vc
, tmp
, vb
);
1641 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1645 tmp
= tcg_temp_new();
1646 tcg_gen_shli_i64(tmp
, va
, 3);
1647 tcg_gen_add_i64(vc
, tmp
, vb
);
1652 tmp
= tcg_temp_new();
1653 tcg_gen_shli_i64(tmp
, va
, 3);
1654 tcg_gen_sub_i64(vc
, tmp
, vb
);
1659 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1663 tmp
= tcg_temp_new();
1664 tcg_gen_ext32s_i64(tmp
, va
);
1665 tcg_gen_ext32s_i64(vc
, vb
);
1666 tcg_gen_add_i64(tmp
, tmp
, vc
);
1667 tcg_gen_ext32s_i64(vc
, tmp
);
1668 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1673 tmp
= tcg_temp_new();
1674 tcg_gen_ext32s_i64(tmp
, va
);
1675 tcg_gen_ext32s_i64(vc
, vb
);
1676 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1677 tcg_gen_ext32s_i64(vc
, tmp
);
1678 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1683 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1687 tmp
= tcg_temp_new();
1688 tmp2
= tcg_temp_new();
1689 tcg_gen_eqv_i64(tmp
, va
, vb
);
1690 tcg_gen_mov_i64(tmp2
, va
);
1691 tcg_gen_add_i64(vc
, va
, vb
);
1692 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1693 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1694 tcg_gen_shri_i64(tmp
, tmp
, 63);
1695 tcg_gen_movi_i64(tmp2
, 0);
1696 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1698 tcg_temp_free(tmp2
);
1702 tmp
= tcg_temp_new();
1703 tmp2
= tcg_temp_new();
1704 tcg_gen_xor_i64(tmp
, va
, vb
);
1705 tcg_gen_mov_i64(tmp2
, va
);
1706 tcg_gen_sub_i64(vc
, va
, vb
);
1707 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1708 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1709 tcg_gen_shri_i64(tmp
, tmp
, 63);
1710 tcg_gen_movi_i64(tmp2
, 0);
1711 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1713 tcg_temp_free(tmp2
);
1717 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1727 /* Special case BIS as NOP. */
1731 /* Special case BIS as MOV. */
1732 vc
= dest_gpr(ctx
, rc
);
1734 tcg_gen_movi_i64(vc
, lit
);
1736 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1742 vc
= dest_gpr(ctx
, rc
);
1743 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1745 if (fn7
== 0x28 && ra
== 31) {
1746 /* Special case ORNOT as NOT. */
1747 tcg_gen_not_i64(vc
, vb
);
1751 va
= load_gpr(ctx
, ra
);
1755 tcg_gen_and_i64(vc
, va
, vb
);
1759 tcg_gen_andc_i64(vc
, va
, vb
);
1763 tmp
= tcg_temp_new();
1764 tcg_gen_andi_i64(tmp
, va
, 1);
1765 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1766 vb
, load_gpr(ctx
, rc
));
1771 tmp
= tcg_temp_new();
1772 tcg_gen_andi_i64(tmp
, va
, 1);
1773 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1774 vb
, load_gpr(ctx
, rc
));
1779 tcg_gen_or_i64(vc
, va
, vb
);
1783 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1784 vb
, load_gpr(ctx
, rc
));
1788 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1789 vb
, load_gpr(ctx
, rc
));
1793 tcg_gen_orc_i64(vc
, va
, vb
);
1797 tcg_gen_xor_i64(vc
, va
, vb
);
1801 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1802 vb
, load_gpr(ctx
, rc
));
1806 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1807 vb
, load_gpr(ctx
, rc
));
1811 tcg_gen_eqv_i64(vc
, va
, vb
);
1817 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1818 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1823 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1824 vb
, load_gpr(ctx
, rc
));
1828 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1829 vb
, load_gpr(ctx
, rc
));
1834 tcg_gen_movi_i64(vc
, ctx
->implver
);
1842 vc
= dest_gpr(ctx
, rc
);
1843 va
= load_gpr(ctx
, ra
);
1847 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1851 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1855 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1859 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1863 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1867 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1871 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1875 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1879 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1884 gen_zapnoti(vc
, va
, ~lit
);
1886 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1892 gen_zapnoti(vc
, va
, lit
);
1894 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1899 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1904 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1906 tmp
= tcg_temp_new();
1907 vb
= load_gpr(ctx
, rb
);
1908 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1909 tcg_gen_shr_i64(vc
, va
, tmp
);
1915 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1920 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1922 tmp
= tcg_temp_new();
1923 vb
= load_gpr(ctx
, rb
);
1924 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1925 tcg_gen_shl_i64(vc
, va
, tmp
);
1931 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1936 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1938 tmp
= tcg_temp_new();
1939 vb
= load_gpr(ctx
, rb
);
1940 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1941 tcg_gen_sar_i64(vc
, va
, tmp
);
1947 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1951 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1955 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1959 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1963 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1967 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1971 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1975 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1979 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1987 vc
= dest_gpr(ctx
, rc
);
1988 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1989 va
= load_gpr(ctx
, ra
);
1993 tcg_gen_mul_i64(vc
, va
, vb
);
1994 tcg_gen_ext32s_i64(vc
, vc
);
1998 tcg_gen_mul_i64(vc
, va
, vb
);
2002 tmp
= tcg_temp_new();
2003 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
2008 tmp
= tcg_temp_new();
2009 tcg_gen_ext32s_i64(tmp
, va
);
2010 tcg_gen_ext32s_i64(vc
, vb
);
2011 tcg_gen_mul_i64(tmp
, tmp
, vc
);
2012 tcg_gen_ext32s_i64(vc
, tmp
);
2013 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2018 tmp
= tcg_temp_new();
2019 tmp2
= tcg_temp_new();
2020 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2021 tcg_gen_sari_i64(tmp2
, vc
, 63);
2022 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2024 tcg_temp_free(tmp2
);
2032 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2033 vc
= dest_fpr(ctx
, rc
);
2034 switch (fpfn
) { /* fn11 & 0x3F */
2038 t32
= tcg_temp_new_i32();
2039 va
= load_gpr(ctx
, ra
);
2040 tcg_gen_extrl_i64_i32(t32
, va
);
2041 gen_helper_memory_to_s(vc
, t32
);
2042 tcg_temp_free_i32(t32
);
2047 vb
= load_fpr(ctx
, rb
);
2048 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2053 gen_sqrts(ctx
, rb
, rc
, fn11
);
2058 t32
= tcg_temp_new_i32();
2059 va
= load_gpr(ctx
, ra
);
2060 tcg_gen_extrl_i64_i32(t32
, va
);
2061 gen_helper_memory_to_f(vc
, t32
);
2062 tcg_temp_free_i32(t32
);
2067 va
= load_gpr(ctx
, ra
);
2068 tcg_gen_mov_i64(vc
, va
);
2073 vb
= load_fpr(ctx
, rb
);
2074 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2079 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2087 /* VAX floating point */
2088 /* XXX: rounding mode and trap are ignored (!) */
2089 vc
= dest_fpr(ctx
, rc
);
2090 vb
= load_fpr(ctx
, rb
);
2091 va
= load_fpr(ctx
, ra
);
2092 switch (fpfn
) { /* fn11 & 0x3F */
2095 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2099 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2103 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2107 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2115 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2119 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2123 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2127 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2131 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2135 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2139 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2144 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2153 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2158 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2163 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2171 /* IEEE floating-point */
2172 switch (fpfn
) { /* fn11 & 0x3F */
2175 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2179 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2183 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2187 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2191 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2195 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2199 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2203 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2207 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2211 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2215 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2219 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2223 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2225 gen_cvtst(ctx
, rb
, rc
, fn11
);
2228 gen_cvtts(ctx
, rb
, rc
, fn11
);
2234 gen_cvttq(ctx
, rb
, rc
, fn11
);
2239 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2244 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2256 vc
= dest_fpr(ctx
, rc
);
2257 vb
= load_fpr(ctx
, rb
);
2263 /* Special case CPYS as FNOP. */
2265 vc
= dest_fpr(ctx
, rc
);
2266 va
= load_fpr(ctx
, ra
);
2268 /* Special case CPYS as FMOV. */
2269 tcg_gen_mov_i64(vc
, va
);
2271 vb
= load_fpr(ctx
, rb
);
2272 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2278 vc
= dest_fpr(ctx
, rc
);
2279 vb
= load_fpr(ctx
, rb
);
2280 va
= load_fpr(ctx
, ra
);
2281 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2285 vc
= dest_fpr(ctx
, rc
);
2286 vb
= load_fpr(ctx
, rb
);
2287 va
= load_fpr(ctx
, ra
);
2288 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2292 va
= load_fpr(ctx
, ra
);
2293 gen_helper_store_fpcr(cpu_env
, va
);
2294 if (ctx
->tb_rm
== QUAL_RM_D
) {
2295 /* Re-do the copy of the rounding mode to fp_status
2296 the next time we use dynamic rounding. */
2302 va
= dest_fpr(ctx
, ra
);
2303 gen_helper_load_fpcr(va
, cpu_env
);
2307 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2311 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2315 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2319 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2323 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2327 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2329 case 0x030: /* CVTQL */
2330 case 0x130: /* CVTQL/V */
2331 case 0x530: /* CVTQL/SV */
2333 vc
= dest_fpr(ctx
, rc
);
2334 vb
= load_fpr(ctx
, rb
);
2335 gen_helper_cvtql(vc
, cpu_env
, vb
);
2336 gen_fp_exc_raise(rc
, fn11
);
2344 switch ((uint16_t)disp16
) {
2355 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2359 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2371 va
= dest_gpr(ctx
, ra
);
2372 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2374 gen_helper_load_pcc(va
, cpu_env
);
2376 ret
= EXIT_PC_STALE
;
2378 gen_helper_load_pcc(va
, cpu_env
);
2406 /* HW_MFPR (PALcode) */
2407 #ifndef CONFIG_USER_ONLY
2408 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2409 va
= dest_gpr(ctx
, ra
);
2410 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2417 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2418 prediction stack action, which of course we don't implement. */
2419 vb
= load_gpr(ctx
, rb
);
2420 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2422 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2424 ret
= EXIT_PC_UPDATED
;
2428 /* HW_LD (PALcode) */
2429 #ifndef CONFIG_USER_ONLY
2430 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2432 TCGv addr
= tcg_temp_new();
2433 vb
= load_gpr(ctx
, rb
);
2434 va
= dest_gpr(ctx
, ra
);
2436 tcg_gen_addi_i64(addr
, vb
, disp12
);
2437 switch ((insn
>> 12) & 0xF) {
2439 /* Longword physical access (hw_ldl/p) */
2440 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2443 /* Quadword physical access (hw_ldq/p) */
2444 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2447 /* Longword physical access with lock (hw_ldl_l/p) */
2448 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2451 /* Quadword physical access with lock (hw_ldq_l/p) */
2452 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2455 /* Longword virtual PTE fetch (hw_ldl/v) */
2458 /* Quadword virtual PTE fetch (hw_ldq/v) */
2468 /* Longword virtual access (hw_ldl) */
2471 /* Quadword virtual access (hw_ldq) */
2474 /* Longword virtual access with protection check (hw_ldl/w) */
2475 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2478 /* Quadword virtual access with protection check (hw_ldq/w) */
2479 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2482 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2485 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2488 /* Longword virtual access with alternate access mode and
2489 protection checks (hw_ldl/wa) */
2490 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2493 /* Quadword virtual access with alternate access mode and
2494 protection checks (hw_ldq/wa) */
2495 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2498 tcg_temp_free(addr
);
2506 vc
= dest_gpr(ctx
, rc
);
2509 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2511 va
= load_fpr(ctx
, ra
);
2512 tcg_gen_mov_i64(vc
, va
);
2514 } else if (fn7
== 0x78) {
2516 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2518 t32
= tcg_temp_new_i32();
2519 va
= load_fpr(ctx
, ra
);
2520 gen_helper_s_to_memory(t32
, va
);
2521 tcg_gen_ext_i32_i64(vc
, t32
);
2522 tcg_temp_free_i32(t32
);
2526 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2530 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2532 tcg_gen_ext8s_i64(vc
, vb
);
2536 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2538 tcg_gen_ext16s_i64(vc
, vb
);
2542 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2545 tcg_gen_ctpop_i64(vc
, vb
);
2549 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2551 va
= load_gpr(ctx
, ra
);
2552 gen_helper_perr(vc
, va
, vb
);
2556 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2559 tcg_gen_clzi_i64(vc
, vb
, 64);
2563 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2566 tcg_gen_ctzi_i64(vc
, vb
, 64);
2570 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2573 gen_helper_unpkbw(vc
, vb
);
2577 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2580 gen_helper_unpkbl(vc
, vb
);
2584 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2587 gen_helper_pkwb(vc
, vb
);
2591 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2594 gen_helper_pklb(vc
, vb
);
2598 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2599 va
= load_gpr(ctx
, ra
);
2600 gen_helper_minsb8(vc
, va
, vb
);
2604 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2605 va
= load_gpr(ctx
, ra
);
2606 gen_helper_minsw4(vc
, va
, vb
);
2610 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2611 va
= load_gpr(ctx
, ra
);
2612 gen_helper_minub8(vc
, va
, vb
);
2616 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2617 va
= load_gpr(ctx
, ra
);
2618 gen_helper_minuw4(vc
, va
, vb
);
2622 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2623 va
= load_gpr(ctx
, ra
);
2624 gen_helper_maxub8(vc
, va
, vb
);
2628 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2629 va
= load_gpr(ctx
, ra
);
2630 gen_helper_maxuw4(vc
, va
, vb
);
2634 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2635 va
= load_gpr(ctx
, ra
);
2636 gen_helper_maxsb8(vc
, va
, vb
);
2640 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2641 va
= load_gpr(ctx
, ra
);
2642 gen_helper_maxsw4(vc
, va
, vb
);
2650 /* HW_MTPR (PALcode) */
2651 #ifndef CONFIG_USER_ONLY
2652 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2653 vb
= load_gpr(ctx
, rb
);
2654 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2661 /* HW_RET (PALcode) */
2662 #ifndef CONFIG_USER_ONLY
2663 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2665 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2666 address from EXC_ADDR. This turns out to be useful for our
2667 emulation PALcode, so continue to accept it. */
2668 ctx
->lit
= vb
= tcg_temp_new();
2669 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2671 vb
= load_gpr(ctx
, rb
);
2673 tmp
= tcg_temp_new();
2674 tcg_gen_movi_i64(tmp
, 0);
2675 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
2676 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2677 tcg_gen_andi_i64(tmp
, vb
, 1);
2678 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
2679 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2680 ret
= EXIT_PC_UPDATED
;
2687 /* HW_ST (PALcode) */
2688 #ifndef CONFIG_USER_ONLY
2689 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2691 switch ((insn
>> 12) & 0xF) {
2693 /* Longword physical access */
2694 va
= load_gpr(ctx
, ra
);
2695 vb
= load_gpr(ctx
, rb
);
2696 tmp
= tcg_temp_new();
2697 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2698 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2702 /* Quadword physical access */
2703 va
= load_gpr(ctx
, ra
);
2704 vb
= load_gpr(ctx
, rb
);
2705 tmp
= tcg_temp_new();
2706 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2707 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2711 /* Longword physical access with lock */
2712 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2713 MMU_PHYS_IDX
, MO_LESL
);
2716 /* Quadword physical access with lock */
2717 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2718 MMU_PHYS_IDX
, MO_LEQ
);
2721 /* Longword virtual access */
2724 /* Quadword virtual access */
2745 /* Longword virtual access with alternate access mode */
2748 /* Quadword virtual access with alternate access mode */
2764 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2768 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2772 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2776 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2780 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2784 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2788 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2792 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2796 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2800 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2804 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2808 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2812 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2816 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2820 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2821 ctx
->mem_idx
, MO_LESL
);
2825 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2826 ctx
->mem_idx
, MO_LEQ
);
2830 ret
= gen_bdirect(ctx
, ra
, disp21
);
2832 case 0x31: /* FBEQ */
2833 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2835 case 0x32: /* FBLT */
2836 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2838 case 0x33: /* FBLE */
2839 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2843 ret
= gen_bdirect(ctx
, ra
, disp21
);
2845 case 0x35: /* FBNE */
2846 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2848 case 0x36: /* FBGE */
2849 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2851 case 0x37: /* FBGT */
2852 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2856 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2860 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2864 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2868 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2872 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2876 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2880 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2884 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2887 ret
= gen_invalid(ctx
);
2894 void gen_intermediate_code(CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2896 AlphaCPU
*cpu
= alpha_env_get_cpu(env
);
2897 CPUState
*cs
= CPU(cpu
);
2898 DisasContext ctx
, *ctxp
= &ctx
;
2899 target_ulong pc_start
;
2900 target_ulong pc_mask
;
2910 ctx
.mem_idx
= cpu_mmu_index(env
, false);
2911 ctx
.implver
= env
->implver
;
2912 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2914 #ifdef CONFIG_USER_ONLY
2915 ctx
.ir
= cpu_std_ir
;
2917 ctx
.palbr
= env
->palbr
;
2918 ctx
.ir
= (tb
->flags
& TB_FLAGS_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2921 /* ??? Every TB begins with unset rounding mode, to be initialized on
2922 the first fp insn of the TB. Alternately we could define a proper
2923 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2924 to reset the FP_STATUS to that default at the end of any TB that
2925 changes the default. We could even (gasp) dynamiclly figure out
2926 what default would be most efficient given the running program. */
2928 /* Similarly for flush-to-zero. */
2931 TCGV_UNUSED_I64(ctx
.zero
);
2932 TCGV_UNUSED_I64(ctx
.sink
);
2933 TCGV_UNUSED_I64(ctx
.lit
);
2936 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2937 if (max_insns
== 0) {
2938 max_insns
= CF_COUNT_MASK
;
2940 if (max_insns
> TCG_MAX_INSNS
) {
2941 max_insns
= TCG_MAX_INSNS
;
2944 if (in_superpage(&ctx
, pc_start
)) {
2945 pc_mask
= (1ULL << 41) - 1;
2947 pc_mask
= ~TARGET_PAGE_MASK
;
2952 tcg_gen_insn_start(ctx
.pc
);
2955 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
2956 ret
= gen_excp(&ctx
, EXCP_DEBUG
, 0);
2957 /* The address covered by the breakpoint must be included in
2958 [tb->pc, tb->pc + tb->size) in order to for it to be
2959 properly cleared -- thus we increment the PC here so that
2960 the logic setting tb->size below does the right thing. */
2964 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2967 insn
= cpu_ldl_code(env
, ctx
.pc
);
2970 ret
= translate_one(ctxp
, insn
);
2971 free_context_temps(ctxp
);
2973 /* If we reach a page boundary, are single stepping,
2974 or exhaust instruction count, stop generation. */
2976 && ((ctx
.pc
& pc_mask
) == 0
2977 || tcg_op_buf_full()
2978 || num_insns
>= max_insns
2980 || ctx
.singlestep_enabled
)) {
2981 ret
= EXIT_PC_STALE
;
2983 } while (ret
== NO_EXIT
);
2985 if (tb
->cflags
& CF_LAST_IO
) {
2994 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2996 case EXIT_PC_UPDATED
:
2997 if (ctx
.singlestep_enabled
) {
2998 gen_excp_1(EXCP_DEBUG
, 0);
3007 gen_tb_end(tb
, num_insns
);
3009 tb
->size
= ctx
.pc
- pc_start
;
3010 tb
->icount
= num_insns
;
3013 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
3014 && qemu_log_in_addr_range(pc_start
)) {
3016 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3017 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
3024 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,