2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 struct TranslationBlock
*tb
;
48 #ifndef CONFIG_USER_ONLY
53 /* Current rounding mode for this TB. */
55 /* Current flush-to-zero setting for this TB. */
58 /* implver value for this CPU. */
61 /* The set of registers active in the current context. */
64 /* Temporaries for $31 and $f31 as source and destination. */
67 /* Temporary for immediate constants. */
70 bool singlestep_enabled
;
73 /* Return values from translate_one, indicating the state of the TB.
74 Note that zero indicates that we are not exiting the TB. */
79 /* We have emitted one or more goto_tb. No fixup required. */
82 /* We are not using a goto_tb (for whatever reason), but have updated
83 the PC (for whatever reason), so there's no need to do it again on
87 /* We are exiting the TB, but have neither emitted a goto_tb, nor
88 updated the PC for the next instruction to be executed. */
91 /* We are ending the TB with a noreturn function call, e.g. longjmp.
92 No following code will be executed. */
96 /* global register indexes */
97 static TCGv_env cpu_env
;
98 static TCGv cpu_std_ir
[31];
99 static TCGv cpu_fir
[31];
101 static TCGv cpu_lock_addr
;
102 static TCGv cpu_lock_value
;
104 #ifndef CONFIG_USER_ONLY
105 static TCGv cpu_pal_ir
[31];
108 #include "exec/gen-icount.h"
110 void alpha_translate_init(void)
112 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
114 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
115 static const GlobalVar vars
[] = {
123 /* Use the symbolic register names that match the disassembler. */
124 static const char greg_names
[31][4] = {
125 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
126 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
127 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
128 "t10", "t11", "ra", "t12", "at", "gp", "sp"
130 static const char freg_names
[31][4] = {
131 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
132 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
133 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
134 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
136 #ifndef CONFIG_USER_ONLY
137 static const char shadow_names
[8][8] = {
138 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
139 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
143 static bool done_init
= 0;
151 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
152 tcg_ctx
.tcg_env
= cpu_env
;
154 for (i
= 0; i
< 31; i
++) {
155 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
156 offsetof(CPUAlphaState
, ir
[i
]),
160 for (i
= 0; i
< 31; i
++) {
161 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
162 offsetof(CPUAlphaState
, fir
[i
]),
166 #ifndef CONFIG_USER_ONLY
167 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
168 for (i
= 0; i
< 8; i
++) {
169 int r
= (i
== 7 ? 25 : i
+ 8);
170 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
171 offsetof(CPUAlphaState
,
177 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
178 const GlobalVar
*v
= &vars
[i
];
179 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
183 static TCGv
load_zero(DisasContext
*ctx
)
185 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
186 ctx
->zero
= tcg_const_i64(0);
191 static TCGv
dest_sink(DisasContext
*ctx
)
193 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
194 ctx
->sink
= tcg_temp_new();
199 static void free_context_temps(DisasContext
*ctx
)
201 if (!TCGV_IS_UNUSED_I64(ctx
->sink
)) {
202 tcg_gen_discard_i64(ctx
->sink
);
203 tcg_temp_free(ctx
->sink
);
204 TCGV_UNUSED_I64(ctx
->sink
);
206 if (!TCGV_IS_UNUSED_I64(ctx
->zero
)) {
207 tcg_temp_free(ctx
->zero
);
208 TCGV_UNUSED_I64(ctx
->zero
);
210 if (!TCGV_IS_UNUSED_I64(ctx
->lit
)) {
211 tcg_temp_free(ctx
->lit
);
212 TCGV_UNUSED_I64(ctx
->lit
);
216 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
218 if (likely(reg
< 31)) {
221 return load_zero(ctx
);
225 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
226 uint8_t lit
, bool islit
)
229 ctx
->lit
= tcg_const_i64(lit
);
231 } else if (likely(reg
< 31)) {
234 return load_zero(ctx
);
238 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
240 if (likely(reg
< 31)) {
243 return dest_sink(ctx
);
247 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
249 if (likely(reg
< 31)) {
252 return load_zero(ctx
);
256 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
258 if (likely(reg
< 31)) {
261 return dest_sink(ctx
);
265 static void gen_excp_1(int exception
, int error_code
)
269 tmp1
= tcg_const_i32(exception
);
270 tmp2
= tcg_const_i32(error_code
);
271 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
272 tcg_temp_free_i32(tmp2
);
273 tcg_temp_free_i32(tmp1
);
276 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
278 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
279 gen_excp_1(exception
, error_code
);
280 return EXIT_NORETURN
;
283 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
285 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
288 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
290 TCGv_i32 tmp32
= tcg_temp_new_i32();
291 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
292 gen_helper_memory_to_f(t0
, tmp32
);
293 tcg_temp_free_i32(tmp32
);
296 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
298 TCGv tmp
= tcg_temp_new();
299 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
300 gen_helper_memory_to_g(t0
, tmp
);
304 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
306 TCGv_i32 tmp32
= tcg_temp_new_i32();
307 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
308 gen_helper_memory_to_s(t0
, tmp32
);
309 tcg_temp_free_i32(tmp32
);
312 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
314 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
315 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
316 tcg_gen_mov_i64(cpu_lock_value
, t0
);
319 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
321 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
322 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
323 tcg_gen_mov_i64(cpu_lock_value
, t0
);
326 static inline void gen_load_mem(DisasContext
*ctx
,
327 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
329 int ra
, int rb
, int32_t disp16
, bool fp
,
334 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
335 prefetches, which we can treat as nops. No worries about
336 missed exceptions here. */
337 if (unlikely(ra
== 31)) {
341 tmp
= tcg_temp_new();
342 addr
= load_gpr(ctx
, rb
);
345 tcg_gen_addi_i64(tmp
, addr
, disp16
);
349 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
353 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
354 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
359 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
361 TCGv_i32 tmp32
= tcg_temp_new_i32();
362 gen_helper_f_to_memory(tmp32
, t0
);
363 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
364 tcg_temp_free_i32(tmp32
);
367 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
369 TCGv tmp
= tcg_temp_new();
370 gen_helper_g_to_memory(tmp
, t0
);
371 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
375 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
377 TCGv_i32 tmp32
= tcg_temp_new_i32();
378 gen_helper_s_to_memory(tmp32
, t0
);
379 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
380 tcg_temp_free_i32(tmp32
);
383 static inline void gen_store_mem(DisasContext
*ctx
,
384 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
386 int ra
, int rb
, int32_t disp16
, bool fp
,
391 tmp
= tcg_temp_new();
392 addr
= load_gpr(ctx
, rb
);
395 tcg_gen_addi_i64(tmp
, addr
, disp16
);
399 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
403 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
404 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
409 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
410 int32_t disp16
, int mem_idx
,
413 TCGLabel
*lab_fail
, *lab_done
;
416 addr
= tcg_temp_new_i64();
417 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
418 free_context_temps(ctx
);
420 lab_fail
= gen_new_label();
421 lab_done
= gen_new_label();
422 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
423 tcg_temp_free_i64(addr
);
425 val
= tcg_temp_new_i64();
426 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
427 load_gpr(ctx
, ra
), mem_idx
, op
);
428 free_context_temps(ctx
);
431 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
433 tcg_temp_free_i64(val
);
434 tcg_gen_br(lab_done
);
436 gen_set_label(lab_fail
);
438 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
441 gen_set_label(lab_done
);
442 tcg_gen_movi_i64(cpu_lock_addr
, -1);
446 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
448 #ifndef CONFIG_USER_ONLY
449 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
450 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
451 && ((addr
>> 41) & 3) == 2);
457 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
459 /* Suppress goto_tb in the case of single-steping and IO. */
460 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
461 || ctx
->singlestep_enabled
|| singlestep
) {
464 #ifndef CONFIG_USER_ONLY
465 /* If the destination is in the superpage, the page perms can't change. */
466 if (in_superpage(ctx
, dest
)) {
469 /* Check for the dest on the same page as the start of the TB. */
470 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
476 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
478 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
481 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
484 /* Notice branch-to-next; used to initialize RA with the PC. */
487 } else if (use_goto_tb(ctx
, dest
)) {
489 tcg_gen_movi_i64(cpu_pc
, dest
);
490 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
493 tcg_gen_movi_i64(cpu_pc
, dest
);
494 return EXIT_PC_UPDATED
;
498 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
499 TCGv cmp
, int32_t disp
)
501 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
502 TCGLabel
*lab_true
= gen_new_label();
504 if (use_goto_tb(ctx
, dest
)) {
505 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
508 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
509 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
511 gen_set_label(lab_true
);
513 tcg_gen_movi_i64(cpu_pc
, dest
);
514 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
518 TCGv_i64 z
= tcg_const_i64(0);
519 TCGv_i64 d
= tcg_const_i64(dest
);
520 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
522 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
524 tcg_temp_free_i64(z
);
525 tcg_temp_free_i64(d
);
526 tcg_temp_free_i64(p
);
527 return EXIT_PC_UPDATED
;
531 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
532 int32_t disp
, int mask
)
537 cmp_tmp
= tcg_temp_new();
538 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
540 cmp_tmp
= load_gpr(ctx
, ra
);
543 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
546 /* Fold -0.0 for comparison with COND. */
548 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
550 uint64_t mzero
= 1ull << 63;
555 /* For <= or >, the -0.0 value directly compares the way we want. */
556 tcg_gen_mov_i64(dest
, src
);
561 /* For == or !=, we can simply mask off the sign bit and compare. */
562 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
567 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
568 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
569 tcg_gen_neg_i64(dest
, dest
);
570 tcg_gen_and_i64(dest
, dest
, src
);
578 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
581 TCGv cmp_tmp
= tcg_temp_new();
582 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
583 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
586 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
591 vb
= load_fpr(ctx
, rb
);
593 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
595 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
600 #define QUAL_RM_N 0x080 /* Round mode nearest even */
601 #define QUAL_RM_C 0x000 /* Round mode chopped */
602 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
603 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
604 #define QUAL_RM_MASK 0x0c0
606 #define QUAL_U 0x100 /* Underflow enable (fp output) */
607 #define QUAL_V 0x100 /* Overflow enable (int output) */
608 #define QUAL_S 0x400 /* Software completion enable */
609 #define QUAL_I 0x200 /* Inexact detection enable */
611 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
615 fn11
&= QUAL_RM_MASK
;
616 if (fn11
== ctx
->tb_rm
) {
621 tmp
= tcg_temp_new_i32();
624 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
627 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
630 tcg_gen_movi_i32(tmp
, float_round_down
);
633 tcg_gen_ld8u_i32(tmp
, cpu_env
,
634 offsetof(CPUAlphaState
, fpcr_dyn_round
));
638 #if defined(CONFIG_SOFTFLOAT_INLINE)
639 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
640 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
641 sets the one field. */
642 tcg_gen_st8_i32(tmp
, cpu_env
,
643 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
645 gen_helper_setroundmode(tmp
);
648 tcg_temp_free_i32(tmp
);
651 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
656 if (fn11
== ctx
->tb_ftz
) {
661 tmp
= tcg_temp_new_i32();
663 /* Underflow is enabled, use the FPCR setting. */
664 tcg_gen_ld8u_i32(tmp
, cpu_env
,
665 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
667 /* Underflow is disabled, force flush-to-zero. */
668 tcg_gen_movi_i32(tmp
, 1);
671 #if defined(CONFIG_SOFTFLOAT_INLINE)
672 tcg_gen_st8_i32(tmp
, cpu_env
,
673 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
675 gen_helper_setflushzero(tmp
);
678 tcg_temp_free_i32(tmp
);
681 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
685 if (unlikely(reg
== 31)) {
686 val
= load_zero(ctx
);
689 if ((fn11
& QUAL_S
) == 0) {
691 gen_helper_ieee_input_cmp(cpu_env
, val
);
693 gen_helper_ieee_input(cpu_env
, val
);
696 #ifndef CONFIG_USER_ONLY
697 /* In system mode, raise exceptions for denormals like real
698 hardware. In user mode, proceed as if the OS completion
699 handler is handling the denormal as per spec. */
700 gen_helper_ieee_input_s(cpu_env
, val
);
707 static void gen_fp_exc_raise(int rc
, int fn11
)
709 /* ??? We ought to be able to do something with imprecise exceptions.
710 E.g. notice we're still in the trap shadow of something within the
711 TB and do not generate the code to signal the exception; end the TB
712 when an exception is forced to arrive, either by consumption of a
713 register value or TRAPB or EXCB. */
717 if (!(fn11
& QUAL_U
)) {
718 /* Note that QUAL_U == QUAL_V, so ignore either. */
719 ignore
|= FPCR_UNF
| FPCR_IOV
;
721 if (!(fn11
& QUAL_I
)) {
724 ign
= tcg_const_i32(ignore
);
726 /* ??? Pass in the regno of the destination so that the helper can
727 set EXC_MASK, which contains a bitmask of destination registers
728 that have caused arithmetic traps. A simple userspace emulation
729 does not require this. We do need it for a guest kernel's entArith,
730 or if we were to do something clever with imprecise exceptions. */
731 reg
= tcg_const_i32(rc
+ 32);
733 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
735 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
738 tcg_temp_free_i32(reg
);
739 tcg_temp_free_i32(ign
);
742 static void gen_cvtlq(TCGv vc
, TCGv vb
)
744 TCGv tmp
= tcg_temp_new();
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp
, vb
, 32);
749 tcg_gen_shri_i64(vc
, vb
, 29);
750 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
752 tcg_gen_or_i64(vc
, vc
, tmp
);
757 static void gen_ieee_arith2(DisasContext
*ctx
,
758 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
759 int rb
, int rc
, int fn11
)
763 gen_qual_roundmode(ctx
, fn11
);
764 gen_qual_flushzero(ctx
, fn11
);
766 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
767 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
769 gen_fp_exc_raise(rc
, fn11
);
772 #define IEEE_ARITH2(name) \
773 static inline void glue(gen_, name)(DisasContext *ctx, \
774 int rb, int rc, int fn11) \
776 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
783 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
787 /* No need to set flushzero, since we have an integer output. */
788 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
789 vc
= dest_fpr(ctx
, rc
);
791 /* Almost all integer conversions use cropped rounding;
792 special case that. */
793 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
794 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
796 gen_qual_roundmode(ctx
, fn11
);
797 gen_helper_cvttq(vc
, cpu_env
, vb
);
799 gen_fp_exc_raise(rc
, fn11
);
802 static void gen_ieee_intcvt(DisasContext
*ctx
,
803 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
804 int rb
, int rc
, int fn11
)
808 gen_qual_roundmode(ctx
, fn11
);
809 vb
= load_fpr(ctx
, rb
);
810 vc
= dest_fpr(ctx
, rc
);
812 /* The only exception that can be raised by integer conversion
813 is inexact. Thus we only need to worry about exceptions when
814 inexact handling is requested. */
816 helper(vc
, cpu_env
, vb
);
817 gen_fp_exc_raise(rc
, fn11
);
819 helper(vc
, cpu_env
, vb
);
823 #define IEEE_INTCVT(name) \
824 static inline void glue(gen_, name)(DisasContext *ctx, \
825 int rb, int rc, int fn11) \
827 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
832 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
834 TCGv vmask
= tcg_const_i64(mask
);
835 TCGv tmp
= tcg_temp_new_i64();
838 tcg_gen_andc_i64(tmp
, vmask
, va
);
840 tcg_gen_and_i64(tmp
, va
, vmask
);
843 tcg_gen_andc_i64(vc
, vb
, vmask
);
844 tcg_gen_or_i64(vc
, vc
, tmp
);
846 tcg_temp_free(vmask
);
850 static void gen_ieee_arith3(DisasContext
*ctx
,
851 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
852 int ra
, int rb
, int rc
, int fn11
)
856 gen_qual_roundmode(ctx
, fn11
);
857 gen_qual_flushzero(ctx
, fn11
);
859 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
860 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
861 vc
= dest_fpr(ctx
, rc
);
862 helper(vc
, cpu_env
, va
, vb
);
864 gen_fp_exc_raise(rc
, fn11
);
867 #define IEEE_ARITH3(name) \
868 static inline void glue(gen_, name)(DisasContext *ctx, \
869 int ra, int rb, int rc, int fn11) \
871 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
882 static void gen_ieee_compare(DisasContext
*ctx
,
883 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
884 int ra
, int rb
, int rc
, int fn11
)
888 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
889 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
890 vc
= dest_fpr(ctx
, rc
);
891 helper(vc
, cpu_env
, va
, vb
);
893 gen_fp_exc_raise(rc
, fn11
);
896 #define IEEE_CMP3(name) \
897 static inline void glue(gen_, name)(DisasContext *ctx, \
898 int ra, int rb, int rc, int fn11) \
900 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
907 static inline uint64_t zapnot_mask(uint8_t lit
)
912 for (i
= 0; i
< 8; ++i
) {
913 if ((lit
>> i
) & 1) {
914 mask
|= 0xffull
<< (i
* 8);
920 /* Implement zapnot with an immediate operand, which expands to some
921 form of immediate AND. This is a basic building block in the
922 definition of many of the other byte manipulation instructions. */
923 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
927 tcg_gen_movi_i64(dest
, 0);
930 tcg_gen_ext8u_i64(dest
, src
);
933 tcg_gen_ext16u_i64(dest
, src
);
936 tcg_gen_ext32u_i64(dest
, src
);
939 tcg_gen_mov_i64(dest
, src
);
942 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
947 /* EXTWH, EXTLH, EXTQH */
948 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
949 uint8_t lit
, uint8_t byte_mask
)
952 int pos
= (64 - lit
* 8) & 0x3f;
953 int len
= cto32(byte_mask
) * 8;
955 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
957 tcg_gen_movi_i64(vc
, 0);
960 TCGv tmp
= tcg_temp_new();
961 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
962 tcg_gen_neg_i64(tmp
, tmp
);
963 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
964 tcg_gen_shl_i64(vc
, va
, tmp
);
967 gen_zapnoti(vc
, vc
, byte_mask
);
970 /* EXTBL, EXTWL, EXTLL, EXTQL */
971 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
972 uint8_t lit
, uint8_t byte_mask
)
975 int pos
= (lit
& 7) * 8;
976 int len
= cto32(byte_mask
) * 8;
977 if (pos
+ len
>= 64) {
980 tcg_gen_extract_i64(vc
, va
, pos
, len
);
982 TCGv tmp
= tcg_temp_new();
983 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
984 tcg_gen_shli_i64(tmp
, tmp
, 3);
985 tcg_gen_shr_i64(vc
, va
, tmp
);
987 gen_zapnoti(vc
, vc
, byte_mask
);
991 /* INSWH, INSLH, INSQH */
992 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
993 uint8_t lit
, uint8_t byte_mask
)
996 int pos
= 64 - (lit
& 7) * 8;
997 int len
= cto32(byte_mask
) * 8;
999 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
1001 tcg_gen_movi_i64(vc
, 0);
1004 TCGv tmp
= tcg_temp_new();
1005 TCGv shift
= tcg_temp_new();
1007 /* The instruction description has us left-shift the byte mask
1008 and extract bits <15:8> and apply that zap at the end. This
1009 is equivalent to simply performing the zap first and shifting
1011 gen_zapnoti(tmp
, va
, byte_mask
);
1013 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1014 portably by splitting the shift into two parts: shift_count-1 and 1.
1015 Arrange for the -1 by using ones-complement instead of
1016 twos-complement in the negation: ~(B * 8) & 63. */
1018 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1019 tcg_gen_not_i64(shift
, shift
);
1020 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1022 tcg_gen_shr_i64(vc
, tmp
, shift
);
1023 tcg_gen_shri_i64(vc
, vc
, 1);
1024 tcg_temp_free(shift
);
1029 /* INSBL, INSWL, INSLL, INSQL */
1030 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1031 uint8_t lit
, uint8_t byte_mask
)
1034 int pos
= (lit
& 7) * 8;
1035 int len
= cto32(byte_mask
) * 8;
1036 if (pos
+ len
> 64) {
1039 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1041 TCGv tmp
= tcg_temp_new();
1042 TCGv shift
= tcg_temp_new();
1044 /* The instruction description has us left-shift the byte mask
1045 and extract bits <15:8> and apply that zap at the end. This
1046 is equivalent to simply performing the zap first and shifting
1048 gen_zapnoti(tmp
, va
, byte_mask
);
1050 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1051 tcg_gen_shli_i64(shift
, shift
, 3);
1052 tcg_gen_shl_i64(vc
, tmp
, shift
);
1053 tcg_temp_free(shift
);
1058 /* MSKWH, MSKLH, MSKQH */
1059 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1060 uint8_t lit
, uint8_t byte_mask
)
1063 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1065 TCGv shift
= tcg_temp_new();
1066 TCGv mask
= tcg_temp_new();
1068 /* The instruction description is as above, where the byte_mask
1069 is shifted left, and then we extract bits <15:8>. This can be
1070 emulated with a right-shift on the expanded byte mask. This
1071 requires extra care because for an input <2:0> == 0 we need a
1072 shift of 64 bits in order to generate a zero. This is done by
1073 splitting the shift into two parts, the variable shift - 1
1074 followed by a constant 1 shift. The code we expand below is
1075 equivalent to ~(B * 8) & 63. */
1077 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1078 tcg_gen_not_i64(shift
, shift
);
1079 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1080 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1081 tcg_gen_shr_i64(mask
, mask
, shift
);
1082 tcg_gen_shri_i64(mask
, mask
, 1);
1084 tcg_gen_andc_i64(vc
, va
, mask
);
1086 tcg_temp_free(mask
);
1087 tcg_temp_free(shift
);
1091 /* MSKBL, MSKWL, MSKLL, MSKQL */
1092 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1093 uint8_t lit
, uint8_t byte_mask
)
1096 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1098 TCGv shift
= tcg_temp_new();
1099 TCGv mask
= tcg_temp_new();
1101 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1102 tcg_gen_shli_i64(shift
, shift
, 3);
1103 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1104 tcg_gen_shl_i64(mask
, mask
, shift
);
1106 tcg_gen_andc_i64(vc
, va
, mask
);
1108 tcg_temp_free(mask
);
1109 tcg_temp_free(shift
);
1113 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1118 tcg_gen_ld8u_i64(ctx
->ir
[ra
], cpu_env
,
1119 offsetof(CPUAlphaState
, intr_flag
));
1122 tmp
= tcg_const_i32(set
);
1123 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1124 tcg_temp_free_i32(tmp
);
1127 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1129 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1130 to internal cpu registers. */
1132 /* Unprivileged PAL call */
1133 if (palcode
>= 0x80 && palcode
< 0xC0) {
1137 /* No-op inside QEMU. */
1141 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1142 offsetof(CPUAlphaState
, unique
));
1146 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1147 offsetof(CPUAlphaState
, unique
));
1156 #ifndef CONFIG_USER_ONLY
1157 /* Privileged PAL code */
1158 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1162 /* No-op inside QEMU. */
1166 /* No-op inside QEMU. */
1170 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1171 offsetof(CPUAlphaState
, vptptr
));
1175 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1176 offsetof(CPUAlphaState
, sysval
));
1180 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1181 offsetof(CPUAlphaState
, sysval
));
1188 /* Note that we already know we're in kernel mode, so we know
1189 that PS only contains the 3 IPL bits. */
1190 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1191 offsetof(CPUAlphaState
, ps
));
1193 /* But make sure and store only the 3 IPL bits from the user. */
1194 tmp
= tcg_temp_new();
1195 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1196 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1203 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1204 offsetof(CPUAlphaState
, ps
));
1208 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1209 offsetof(CPUAlphaState
, usp
));
1213 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1214 offsetof(CPUAlphaState
, usp
));
1218 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1219 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1229 return gen_invalid(ctx
);
1232 #ifdef CONFIG_USER_ONLY
1233 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1236 TCGv tmp
= tcg_temp_new();
1237 uint64_t exc_addr
= ctx
->pc
;
1238 uint64_t entry
= ctx
->palbr
;
1240 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
1243 tcg_gen_movi_i64(tmp
, 1);
1244 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
1247 tcg_gen_movi_i64(tmp
, exc_addr
);
1248 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1251 entry
+= (palcode
& 0x80
1252 ? 0x2000 + (palcode
- 0x80) * 64
1253 : 0x1000 + palcode
* 64);
1255 /* Since the destination is running in PALmode, we don't really
1256 need the page permissions check. We'll see the existence of
1257 the page when we create the TB, and we'll flush all TBs if
1258 we change the PAL base register. */
1259 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1261 tcg_gen_movi_i64(cpu_pc
, entry
);
1262 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1263 return EXIT_GOTO_TB
;
1265 tcg_gen_movi_i64(cpu_pc
, entry
);
1266 return EXIT_PC_UPDATED
;
1272 #ifndef CONFIG_USER_ONLY
1274 #define PR_BYTE 0x100000
1275 #define PR_LONG 0x200000
1277 static int cpu_pr_data(int pr
)
1280 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1281 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1282 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1283 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1284 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1285 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1286 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1287 case 7: return offsetof(CPUAlphaState
, palbr
);
1288 case 8: return offsetof(CPUAlphaState
, ptbr
);
1289 case 9: return offsetof(CPUAlphaState
, vptptr
);
1290 case 10: return offsetof(CPUAlphaState
, unique
);
1291 case 11: return offsetof(CPUAlphaState
, sysval
);
1292 case 12: return offsetof(CPUAlphaState
, usp
);
1295 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1298 return offsetof(CPUAlphaState
, alarm_expire
);
1303 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1305 void (*helper
)(TCGv
);
1310 /* Accessing the "non-shadow" general registers. */
1311 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1312 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1315 case 250: /* WALLTIME */
1316 helper
= gen_helper_get_walltime
;
1318 case 249: /* VMTIME */
1319 helper
= gen_helper_get_vmtime
;
1325 return EXIT_PC_STALE
;
1332 /* The basic registers are data only, and unknown registers
1333 are read-zero, write-ignore. */
1334 data
= cpu_pr_data(regno
);
1336 tcg_gen_movi_i64(va
, 0);
1337 } else if (data
& PR_BYTE
) {
1338 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1339 } else if (data
& PR_LONG
) {
1340 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1342 tcg_gen_ld_i64(va
, cpu_env
, data
);
1350 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1358 gen_helper_tbia(cpu_env
);
1363 gen_helper_tbis(cpu_env
, vb
);
1368 tmp
= tcg_const_i64(1);
1369 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1370 offsetof(CPUState
, halted
));
1371 return gen_excp(ctx
, EXCP_HLT
, 0);
1375 gen_helper_halt(vb
);
1376 return EXIT_PC_STALE
;
1380 gen_helper_set_alarm(cpu_env
, vb
);
1385 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1386 /* Changing the PAL base register implies un-chaining all of the TBs
1387 that ended with a CALL_PAL. Since the base register usually only
1388 changes during boot, flushing everything works well. */
1389 gen_helper_tb_flush(cpu_env
);
1390 return EXIT_PC_STALE
;
1393 /* Accessing the "non-shadow" general registers. */
1394 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1395 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1399 /* The basic registers are data only, and unknown registers
1400 are read-zero, write-ignore. */
1401 data
= cpu_pr_data(regno
);
1403 if (data
& PR_BYTE
) {
1404 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1405 } else if (data
& PR_LONG
) {
1406 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1408 tcg_gen_st_i64(vb
, cpu_env
, data
);
1416 #endif /* !USER_ONLY*/
1418 #define REQUIRE_NO_LIT \
1425 #define REQUIRE_TB_FLAG(FLAG) \
1427 if ((ctx->tb->flags & (FLAG)) == 0) { \
1432 #define REQUIRE_REG_31(WHICH) \
1434 if (WHICH != 31) { \
1439 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1441 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1443 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1444 bool islit
, real_islit
;
1445 TCGv va
, vb
, vc
, tmp
, tmp2
;
1449 /* Decode all instruction fields */
1450 opc
= extract32(insn
, 26, 6);
1451 ra
= extract32(insn
, 21, 5);
1452 rb
= extract32(insn
, 16, 5);
1453 rc
= extract32(insn
, 0, 5);
1454 real_islit
= islit
= extract32(insn
, 12, 1);
1455 lit
= extract32(insn
, 13, 8);
1457 disp21
= sextract32(insn
, 0, 21);
1458 disp16
= sextract32(insn
, 0, 16);
1459 disp12
= sextract32(insn
, 0, 12);
1461 fn11
= extract32(insn
, 5, 11);
1462 fpfn
= extract32(insn
, 5, 6);
1463 fn7
= extract32(insn
, 5, 7);
1465 if (rb
== 31 && !islit
) {
1474 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1500 disp16
= (uint32_t)disp16
<< 16;
1504 va
= dest_gpr(ctx
, ra
);
1505 /* It's worth special-casing immediate loads. */
1507 tcg_gen_movi_i64(va
, disp16
);
1509 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1515 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1516 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1520 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1524 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1525 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1529 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1530 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1534 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1535 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1539 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1543 vc
= dest_gpr(ctx
, rc
);
1544 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1548 /* Special case ADDL as SEXTL. */
1549 tcg_gen_ext32s_i64(vc
, vb
);
1553 /* Special case SUBQ as NEGQ. */
1554 tcg_gen_neg_i64(vc
, vb
);
1559 va
= load_gpr(ctx
, ra
);
1563 tcg_gen_add_i64(vc
, va
, vb
);
1564 tcg_gen_ext32s_i64(vc
, vc
);
1568 tmp
= tcg_temp_new();
1569 tcg_gen_shli_i64(tmp
, va
, 2);
1570 tcg_gen_add_i64(tmp
, tmp
, vb
);
1571 tcg_gen_ext32s_i64(vc
, tmp
);
1576 tcg_gen_sub_i64(vc
, va
, vb
);
1577 tcg_gen_ext32s_i64(vc
, vc
);
1581 tmp
= tcg_temp_new();
1582 tcg_gen_shli_i64(tmp
, va
, 2);
1583 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1584 tcg_gen_ext32s_i64(vc
, tmp
);
1590 /* Special case 0 >= X as X == 0. */
1591 gen_helper_cmpbe0(vc
, vb
);
1593 gen_helper_cmpbge(vc
, va
, vb
);
1598 tmp
= tcg_temp_new();
1599 tcg_gen_shli_i64(tmp
, va
, 3);
1600 tcg_gen_add_i64(tmp
, tmp
, vb
);
1601 tcg_gen_ext32s_i64(vc
, tmp
);
1606 tmp
= tcg_temp_new();
1607 tcg_gen_shli_i64(tmp
, va
, 3);
1608 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1609 tcg_gen_ext32s_i64(vc
, tmp
);
1614 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1618 tcg_gen_add_i64(vc
, va
, vb
);
1622 tmp
= tcg_temp_new();
1623 tcg_gen_shli_i64(tmp
, va
, 2);
1624 tcg_gen_add_i64(vc
, tmp
, vb
);
1629 tcg_gen_sub_i64(vc
, va
, vb
);
1633 tmp
= tcg_temp_new();
1634 tcg_gen_shli_i64(tmp
, va
, 2);
1635 tcg_gen_sub_i64(vc
, tmp
, vb
);
1640 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1644 tmp
= tcg_temp_new();
1645 tcg_gen_shli_i64(tmp
, va
, 3);
1646 tcg_gen_add_i64(vc
, tmp
, vb
);
1651 tmp
= tcg_temp_new();
1652 tcg_gen_shli_i64(tmp
, va
, 3);
1653 tcg_gen_sub_i64(vc
, tmp
, vb
);
1658 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1662 tmp
= tcg_temp_new();
1663 tcg_gen_ext32s_i64(tmp
, va
);
1664 tcg_gen_ext32s_i64(vc
, vb
);
1665 tcg_gen_add_i64(tmp
, tmp
, vc
);
1666 tcg_gen_ext32s_i64(vc
, tmp
);
1667 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1672 tmp
= tcg_temp_new();
1673 tcg_gen_ext32s_i64(tmp
, va
);
1674 tcg_gen_ext32s_i64(vc
, vb
);
1675 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1676 tcg_gen_ext32s_i64(vc
, tmp
);
1677 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1682 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1686 tmp
= tcg_temp_new();
1687 tmp2
= tcg_temp_new();
1688 tcg_gen_eqv_i64(tmp
, va
, vb
);
1689 tcg_gen_mov_i64(tmp2
, va
);
1690 tcg_gen_add_i64(vc
, va
, vb
);
1691 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1692 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1693 tcg_gen_shri_i64(tmp
, tmp
, 63);
1694 tcg_gen_movi_i64(tmp2
, 0);
1695 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1697 tcg_temp_free(tmp2
);
1701 tmp
= tcg_temp_new();
1702 tmp2
= tcg_temp_new();
1703 tcg_gen_xor_i64(tmp
, va
, vb
);
1704 tcg_gen_mov_i64(tmp2
, va
);
1705 tcg_gen_sub_i64(vc
, va
, vb
);
1706 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1707 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1708 tcg_gen_shri_i64(tmp
, tmp
, 63);
1709 tcg_gen_movi_i64(tmp2
, 0);
1710 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1712 tcg_temp_free(tmp2
);
1716 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1726 /* Special case BIS as NOP. */
1730 /* Special case BIS as MOV. */
1731 vc
= dest_gpr(ctx
, rc
);
1733 tcg_gen_movi_i64(vc
, lit
);
1735 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1741 vc
= dest_gpr(ctx
, rc
);
1742 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1744 if (fn7
== 0x28 && ra
== 31) {
1745 /* Special case ORNOT as NOT. */
1746 tcg_gen_not_i64(vc
, vb
);
1750 va
= load_gpr(ctx
, ra
);
1754 tcg_gen_and_i64(vc
, va
, vb
);
1758 tcg_gen_andc_i64(vc
, va
, vb
);
1762 tmp
= tcg_temp_new();
1763 tcg_gen_andi_i64(tmp
, va
, 1);
1764 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1765 vb
, load_gpr(ctx
, rc
));
1770 tmp
= tcg_temp_new();
1771 tcg_gen_andi_i64(tmp
, va
, 1);
1772 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1773 vb
, load_gpr(ctx
, rc
));
1778 tcg_gen_or_i64(vc
, va
, vb
);
1782 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1783 vb
, load_gpr(ctx
, rc
));
1787 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1788 vb
, load_gpr(ctx
, rc
));
1792 tcg_gen_orc_i64(vc
, va
, vb
);
1796 tcg_gen_xor_i64(vc
, va
, vb
);
1800 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1801 vb
, load_gpr(ctx
, rc
));
1805 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1806 vb
, load_gpr(ctx
, rc
));
1810 tcg_gen_eqv_i64(vc
, va
, vb
);
1816 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1817 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1822 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1823 vb
, load_gpr(ctx
, rc
));
1827 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1828 vb
, load_gpr(ctx
, rc
));
1833 tcg_gen_movi_i64(vc
, ctx
->implver
);
1841 vc
= dest_gpr(ctx
, rc
);
1842 va
= load_gpr(ctx
, ra
);
1846 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1850 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1854 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1858 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1862 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1866 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1870 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1874 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1878 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1883 gen_zapnoti(vc
, va
, ~lit
);
1885 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1891 gen_zapnoti(vc
, va
, lit
);
1893 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1898 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1903 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1905 tmp
= tcg_temp_new();
1906 vb
= load_gpr(ctx
, rb
);
1907 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1908 tcg_gen_shr_i64(vc
, va
, tmp
);
1914 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1919 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1921 tmp
= tcg_temp_new();
1922 vb
= load_gpr(ctx
, rb
);
1923 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1924 tcg_gen_shl_i64(vc
, va
, tmp
);
1930 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1935 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1937 tmp
= tcg_temp_new();
1938 vb
= load_gpr(ctx
, rb
);
1939 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1940 tcg_gen_sar_i64(vc
, va
, tmp
);
1946 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1950 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1954 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1958 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1962 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1966 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1970 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1974 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1978 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1986 vc
= dest_gpr(ctx
, rc
);
1987 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1988 va
= load_gpr(ctx
, ra
);
1992 tcg_gen_mul_i64(vc
, va
, vb
);
1993 tcg_gen_ext32s_i64(vc
, vc
);
1997 tcg_gen_mul_i64(vc
, va
, vb
);
2001 tmp
= tcg_temp_new();
2002 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
2007 tmp
= tcg_temp_new();
2008 tcg_gen_ext32s_i64(tmp
, va
);
2009 tcg_gen_ext32s_i64(vc
, vb
);
2010 tcg_gen_mul_i64(tmp
, tmp
, vc
);
2011 tcg_gen_ext32s_i64(vc
, tmp
);
2012 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2017 tmp
= tcg_temp_new();
2018 tmp2
= tcg_temp_new();
2019 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2020 tcg_gen_sari_i64(tmp2
, vc
, 63);
2021 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2023 tcg_temp_free(tmp2
);
2031 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2032 vc
= dest_fpr(ctx
, rc
);
2033 switch (fpfn
) { /* fn11 & 0x3F */
2037 t32
= tcg_temp_new_i32();
2038 va
= load_gpr(ctx
, ra
);
2039 tcg_gen_extrl_i64_i32(t32
, va
);
2040 gen_helper_memory_to_s(vc
, t32
);
2041 tcg_temp_free_i32(t32
);
2046 vb
= load_fpr(ctx
, rb
);
2047 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2052 gen_sqrts(ctx
, rb
, rc
, fn11
);
2057 t32
= tcg_temp_new_i32();
2058 va
= load_gpr(ctx
, ra
);
2059 tcg_gen_extrl_i64_i32(t32
, va
);
2060 gen_helper_memory_to_f(vc
, t32
);
2061 tcg_temp_free_i32(t32
);
2066 va
= load_gpr(ctx
, ra
);
2067 tcg_gen_mov_i64(vc
, va
);
2072 vb
= load_fpr(ctx
, rb
);
2073 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2078 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2086 /* VAX floating point */
2087 /* XXX: rounding mode and trap are ignored (!) */
2088 vc
= dest_fpr(ctx
, rc
);
2089 vb
= load_fpr(ctx
, rb
);
2090 va
= load_fpr(ctx
, ra
);
2091 switch (fpfn
) { /* fn11 & 0x3F */
2094 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2098 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2102 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2106 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2114 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2118 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2122 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2126 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2130 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2134 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2138 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2143 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2152 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2157 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2162 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2170 /* IEEE floating-point */
2171 switch (fpfn
) { /* fn11 & 0x3F */
2174 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2178 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2182 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2186 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2190 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2194 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2198 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2202 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2206 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2210 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2214 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2218 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2222 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2224 gen_cvtst(ctx
, rb
, rc
, fn11
);
2227 gen_cvtts(ctx
, rb
, rc
, fn11
);
2233 gen_cvttq(ctx
, rb
, rc
, fn11
);
2238 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2243 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2255 vc
= dest_fpr(ctx
, rc
);
2256 vb
= load_fpr(ctx
, rb
);
2262 /* Special case CPYS as FNOP. */
2264 vc
= dest_fpr(ctx
, rc
);
2265 va
= load_fpr(ctx
, ra
);
2267 /* Special case CPYS as FMOV. */
2268 tcg_gen_mov_i64(vc
, va
);
2270 vb
= load_fpr(ctx
, rb
);
2271 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2277 vc
= dest_fpr(ctx
, rc
);
2278 vb
= load_fpr(ctx
, rb
);
2279 va
= load_fpr(ctx
, ra
);
2280 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2284 vc
= dest_fpr(ctx
, rc
);
2285 vb
= load_fpr(ctx
, rb
);
2286 va
= load_fpr(ctx
, ra
);
2287 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2291 va
= load_fpr(ctx
, ra
);
2292 gen_helper_store_fpcr(cpu_env
, va
);
2293 if (ctx
->tb_rm
== QUAL_RM_D
) {
2294 /* Re-do the copy of the rounding mode to fp_status
2295 the next time we use dynamic rounding. */
2301 va
= dest_fpr(ctx
, ra
);
2302 gen_helper_load_fpcr(va
, cpu_env
);
2306 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2310 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2314 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2318 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2322 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2326 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2328 case 0x030: /* CVTQL */
2329 case 0x130: /* CVTQL/V */
2330 case 0x530: /* CVTQL/SV */
2332 vc
= dest_fpr(ctx
, rc
);
2333 vb
= load_fpr(ctx
, rb
);
2334 gen_helper_cvtql(vc
, cpu_env
, vb
);
2335 gen_fp_exc_raise(rc
, fn11
);
2343 switch ((uint16_t)disp16
) {
2354 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2358 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2370 va
= dest_gpr(ctx
, ra
);
2371 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2373 gen_helper_load_pcc(va
, cpu_env
);
2375 ret
= EXIT_PC_STALE
;
2377 gen_helper_load_pcc(va
, cpu_env
);
2405 /* HW_MFPR (PALcode) */
2406 #ifndef CONFIG_USER_ONLY
2407 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2408 va
= dest_gpr(ctx
, ra
);
2409 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2416 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2417 prediction stack action, which of course we don't implement. */
2418 vb
= load_gpr(ctx
, rb
);
2419 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2421 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2423 ret
= EXIT_PC_UPDATED
;
2427 /* HW_LD (PALcode) */
2428 #ifndef CONFIG_USER_ONLY
2429 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2431 TCGv addr
= tcg_temp_new();
2432 vb
= load_gpr(ctx
, rb
);
2433 va
= dest_gpr(ctx
, ra
);
2435 tcg_gen_addi_i64(addr
, vb
, disp12
);
2436 switch ((insn
>> 12) & 0xF) {
2438 /* Longword physical access (hw_ldl/p) */
2439 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2442 /* Quadword physical access (hw_ldq/p) */
2443 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2446 /* Longword physical access with lock (hw_ldl_l/p) */
2447 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2450 /* Quadword physical access with lock (hw_ldq_l/p) */
2451 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2454 /* Longword virtual PTE fetch (hw_ldl/v) */
2457 /* Quadword virtual PTE fetch (hw_ldq/v) */
2467 /* Longword virtual access (hw_ldl) */
2470 /* Quadword virtual access (hw_ldq) */
2473 /* Longword virtual access with protection check (hw_ldl/w) */
2474 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2477 /* Quadword virtual access with protection check (hw_ldq/w) */
2478 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2481 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2484 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2487 /* Longword virtual access with alternate access mode and
2488 protection checks (hw_ldl/wa) */
2489 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2492 /* Quadword virtual access with alternate access mode and
2493 protection checks (hw_ldq/wa) */
2494 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2497 tcg_temp_free(addr
);
2505 vc
= dest_gpr(ctx
, rc
);
2508 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2510 va
= load_fpr(ctx
, ra
);
2511 tcg_gen_mov_i64(vc
, va
);
2513 } else if (fn7
== 0x78) {
2515 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2517 t32
= tcg_temp_new_i32();
2518 va
= load_fpr(ctx
, ra
);
2519 gen_helper_s_to_memory(t32
, va
);
2520 tcg_gen_ext_i32_i64(vc
, t32
);
2521 tcg_temp_free_i32(t32
);
2525 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2529 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2531 tcg_gen_ext8s_i64(vc
, vb
);
2535 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2537 tcg_gen_ext16s_i64(vc
, vb
);
2541 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2544 tcg_gen_ctpop_i64(vc
, vb
);
2548 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2550 va
= load_gpr(ctx
, ra
);
2551 gen_helper_perr(vc
, va
, vb
);
2555 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2558 tcg_gen_clzi_i64(vc
, vb
, 64);
2562 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2565 tcg_gen_ctzi_i64(vc
, vb
, 64);
2569 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2572 gen_helper_unpkbw(vc
, vb
);
2576 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2579 gen_helper_unpkbl(vc
, vb
);
2583 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2586 gen_helper_pkwb(vc
, vb
);
2590 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2593 gen_helper_pklb(vc
, vb
);
2597 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2598 va
= load_gpr(ctx
, ra
);
2599 gen_helper_minsb8(vc
, va
, vb
);
2603 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2604 va
= load_gpr(ctx
, ra
);
2605 gen_helper_minsw4(vc
, va
, vb
);
2609 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2610 va
= load_gpr(ctx
, ra
);
2611 gen_helper_minub8(vc
, va
, vb
);
2615 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2616 va
= load_gpr(ctx
, ra
);
2617 gen_helper_minuw4(vc
, va
, vb
);
2621 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2622 va
= load_gpr(ctx
, ra
);
2623 gen_helper_maxub8(vc
, va
, vb
);
2627 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2628 va
= load_gpr(ctx
, ra
);
2629 gen_helper_maxuw4(vc
, va
, vb
);
2633 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2634 va
= load_gpr(ctx
, ra
);
2635 gen_helper_maxsb8(vc
, va
, vb
);
2639 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2640 va
= load_gpr(ctx
, ra
);
2641 gen_helper_maxsw4(vc
, va
, vb
);
2649 /* HW_MTPR (PALcode) */
2650 #ifndef CONFIG_USER_ONLY
2651 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2652 vb
= load_gpr(ctx
, rb
);
2653 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2660 /* HW_RET (PALcode) */
2661 #ifndef CONFIG_USER_ONLY
2662 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2664 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2665 address from EXC_ADDR. This turns out to be useful for our
2666 emulation PALcode, so continue to accept it. */
2667 ctx
->lit
= vb
= tcg_temp_new();
2668 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2670 vb
= load_gpr(ctx
, rb
);
2672 tmp
= tcg_temp_new();
2673 tcg_gen_movi_i64(tmp
, 0);
2674 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
2675 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2676 tcg_gen_andi_i64(tmp
, vb
, 1);
2677 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
2678 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2679 ret
= EXIT_PC_UPDATED
;
2686 /* HW_ST (PALcode) */
2687 #ifndef CONFIG_USER_ONLY
2688 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2690 switch ((insn
>> 12) & 0xF) {
2692 /* Longword physical access */
2693 va
= load_gpr(ctx
, ra
);
2694 vb
= load_gpr(ctx
, rb
);
2695 tmp
= tcg_temp_new();
2696 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2697 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2701 /* Quadword physical access */
2702 va
= load_gpr(ctx
, ra
);
2703 vb
= load_gpr(ctx
, rb
);
2704 tmp
= tcg_temp_new();
2705 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2706 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2710 /* Longword physical access with lock */
2711 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2712 MMU_PHYS_IDX
, MO_LESL
);
2715 /* Quadword physical access with lock */
2716 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2717 MMU_PHYS_IDX
, MO_LEQ
);
2720 /* Longword virtual access */
2723 /* Quadword virtual access */
2744 /* Longword virtual access with alternate access mode */
2747 /* Quadword virtual access with alternate access mode */
2763 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2767 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2771 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2775 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2779 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2783 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2787 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2791 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2795 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2799 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2803 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2807 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2811 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2815 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2819 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2820 ctx
->mem_idx
, MO_LESL
);
2824 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2825 ctx
->mem_idx
, MO_LEQ
);
2829 ret
= gen_bdirect(ctx
, ra
, disp21
);
2831 case 0x31: /* FBEQ */
2832 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2834 case 0x32: /* FBLT */
2835 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2837 case 0x33: /* FBLE */
2838 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2842 ret
= gen_bdirect(ctx
, ra
, disp21
);
2844 case 0x35: /* FBNE */
2845 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2847 case 0x36: /* FBGE */
2848 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2850 case 0x37: /* FBGT */
2851 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2855 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2859 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2863 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2867 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2871 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2875 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2879 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2883 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2886 ret
= gen_invalid(ctx
);
2893 void gen_intermediate_code(CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2895 AlphaCPU
*cpu
= alpha_env_get_cpu(env
);
2896 CPUState
*cs
= CPU(cpu
);
2897 DisasContext ctx
, *ctxp
= &ctx
;
2898 target_ulong pc_start
;
2899 target_ulong pc_mask
;
2909 ctx
.mem_idx
= cpu_mmu_index(env
, false);
2910 ctx
.implver
= env
->implver
;
2911 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2913 #ifdef CONFIG_USER_ONLY
2914 ctx
.ir
= cpu_std_ir
;
2916 ctx
.palbr
= env
->palbr
;
2917 ctx
.ir
= (tb
->flags
& TB_FLAGS_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2920 /* ??? Every TB begins with unset rounding mode, to be initialized on
2921 the first fp insn of the TB. Alternately we could define a proper
2922 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2923 to reset the FP_STATUS to that default at the end of any TB that
2924 changes the default. We could even (gasp) dynamiclly figure out
2925 what default would be most efficient given the running program. */
2927 /* Similarly for flush-to-zero. */
2930 TCGV_UNUSED_I64(ctx
.zero
);
2931 TCGV_UNUSED_I64(ctx
.sink
);
2932 TCGV_UNUSED_I64(ctx
.lit
);
2935 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2936 if (max_insns
== 0) {
2937 max_insns
= CF_COUNT_MASK
;
2939 if (max_insns
> TCG_MAX_INSNS
) {
2940 max_insns
= TCG_MAX_INSNS
;
2943 if (in_superpage(&ctx
, pc_start
)) {
2944 pc_mask
= (1ULL << 41) - 1;
2946 pc_mask
= ~TARGET_PAGE_MASK
;
2951 tcg_gen_insn_start(ctx
.pc
);
2954 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
2955 ret
= gen_excp(&ctx
, EXCP_DEBUG
, 0);
2956 /* The address covered by the breakpoint must be included in
2957 [tb->pc, tb->pc + tb->size) in order to for it to be
2958 properly cleared -- thus we increment the PC here so that
2959 the logic setting tb->size below does the right thing. */
2963 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2966 insn
= cpu_ldl_code(env
, ctx
.pc
);
2969 ret
= translate_one(ctxp
, insn
);
2970 free_context_temps(ctxp
);
2972 /* If we reach a page boundary, are single stepping,
2973 or exhaust instruction count, stop generation. */
2975 && ((ctx
.pc
& pc_mask
) == 0
2976 || tcg_op_buf_full()
2977 || num_insns
>= max_insns
2979 || ctx
.singlestep_enabled
)) {
2980 ret
= EXIT_PC_STALE
;
2982 } while (ret
== NO_EXIT
);
2984 if (tb
->cflags
& CF_LAST_IO
) {
2993 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2995 case EXIT_PC_UPDATED
:
2996 if (ctx
.singlestep_enabled
) {
2997 gen_excp_1(EXCP_DEBUG
, 0);
3006 gen_tb_end(tb
, num_insns
);
3008 tb
->size
= ctx
.pc
- pc_start
;
3009 tb
->icount
= num_insns
;
3012 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
3013 && qemu_log_in_addr_range(pc_start
)) {
3015 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3016 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
3023 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,