2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 struct TranslationBlock
*tb
;
48 #ifndef CONFIG_USER_ONLY
53 /* Current rounding mode for this TB. */
55 /* Current flush-to-zero setting for this TB. */
58 /* implver value for this CPU. */
61 /* The set of registers active in the current context. */
64 /* Temporaries for $31 and $f31 as source and destination. */
67 /* Temporary for immediate constants. */
70 bool singlestep_enabled
;
73 /* Return values from translate_one, indicating the state of the TB.
74 Note that zero indicates that we are not exiting the TB. */
79 /* We have emitted one or more goto_tb. No fixup required. */
82 /* We are not using a goto_tb (for whatever reason), but have updated
83 the PC (for whatever reason), so there's no need to do it again on
87 /* We are exiting the TB, but have neither emitted a goto_tb, nor
88 updated the PC for the next instruction to be executed. */
91 /* We are ending the TB with a noreturn function call, e.g. longjmp.
92 No following code will be executed. */
96 /* global register indexes */
97 static TCGv_env cpu_env
;
98 static TCGv cpu_std_ir
[31];
99 static TCGv cpu_fir
[31];
101 static TCGv cpu_lock_addr
;
102 static TCGv cpu_lock_value
;
104 #ifndef CONFIG_USER_ONLY
105 static TCGv cpu_pal_ir
[31];
108 #include "exec/gen-icount.h"
110 void alpha_translate_init(void)
112 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
114 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
115 static const GlobalVar vars
[] = {
123 /* Use the symbolic register names that match the disassembler. */
124 static const char greg_names
[31][4] = {
125 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
126 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
127 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
128 "t10", "t11", "ra", "t12", "at", "gp", "sp"
130 static const char freg_names
[31][4] = {
131 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
132 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
133 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
134 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
136 #ifndef CONFIG_USER_ONLY
137 static const char shadow_names
[8][8] = {
138 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
139 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
143 static bool done_init
= 0;
151 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
152 tcg_ctx
.tcg_env
= cpu_env
;
154 for (i
= 0; i
< 31; i
++) {
155 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
156 offsetof(CPUAlphaState
, ir
[i
]),
160 for (i
= 0; i
< 31; i
++) {
161 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
162 offsetof(CPUAlphaState
, fir
[i
]),
166 #ifndef CONFIG_USER_ONLY
167 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
168 for (i
= 0; i
< 8; i
++) {
169 int r
= (i
== 7 ? 25 : i
+ 8);
170 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
171 offsetof(CPUAlphaState
,
177 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
178 const GlobalVar
*v
= &vars
[i
];
179 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
183 static TCGv
load_zero(DisasContext
*ctx
)
185 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
186 ctx
->zero
= tcg_const_i64(0);
191 static TCGv
dest_sink(DisasContext
*ctx
)
193 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
194 ctx
->sink
= tcg_temp_new();
199 static void free_context_temps(DisasContext
*ctx
)
201 if (!TCGV_IS_UNUSED_I64(ctx
->sink
)) {
202 tcg_gen_discard_i64(ctx
->sink
);
203 tcg_temp_free(ctx
->sink
);
204 TCGV_UNUSED_I64(ctx
->sink
);
206 if (!TCGV_IS_UNUSED_I64(ctx
->zero
)) {
207 tcg_temp_free(ctx
->zero
);
208 TCGV_UNUSED_I64(ctx
->zero
);
210 if (!TCGV_IS_UNUSED_I64(ctx
->lit
)) {
211 tcg_temp_free(ctx
->lit
);
212 TCGV_UNUSED_I64(ctx
->lit
);
216 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
218 if (likely(reg
< 31)) {
221 return load_zero(ctx
);
225 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
226 uint8_t lit
, bool islit
)
229 ctx
->lit
= tcg_const_i64(lit
);
231 } else if (likely(reg
< 31)) {
234 return load_zero(ctx
);
238 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
240 if (likely(reg
< 31)) {
243 return dest_sink(ctx
);
247 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
249 if (likely(reg
< 31)) {
252 return load_zero(ctx
);
256 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
258 if (likely(reg
< 31)) {
261 return dest_sink(ctx
);
265 static void gen_excp_1(int exception
, int error_code
)
269 tmp1
= tcg_const_i32(exception
);
270 tmp2
= tcg_const_i32(error_code
);
271 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
272 tcg_temp_free_i32(tmp2
);
273 tcg_temp_free_i32(tmp1
);
276 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
278 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
279 gen_excp_1(exception
, error_code
);
280 return EXIT_NORETURN
;
283 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
285 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
288 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
290 TCGv_i32 tmp32
= tcg_temp_new_i32();
291 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
292 gen_helper_memory_to_f(t0
, tmp32
);
293 tcg_temp_free_i32(tmp32
);
296 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
298 TCGv tmp
= tcg_temp_new();
299 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
300 gen_helper_memory_to_g(t0
, tmp
);
304 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
306 TCGv_i32 tmp32
= tcg_temp_new_i32();
307 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
308 gen_helper_memory_to_s(t0
, tmp32
);
309 tcg_temp_free_i32(tmp32
);
312 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
314 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
315 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
316 tcg_gen_mov_i64(cpu_lock_value
, t0
);
319 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
321 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
322 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
323 tcg_gen_mov_i64(cpu_lock_value
, t0
);
326 static inline void gen_load_mem(DisasContext
*ctx
,
327 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
329 int ra
, int rb
, int32_t disp16
, bool fp
,
334 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
335 prefetches, which we can treat as nops. No worries about
336 missed exceptions here. */
337 if (unlikely(ra
== 31)) {
341 tmp
= tcg_temp_new();
342 addr
= load_gpr(ctx
, rb
);
345 tcg_gen_addi_i64(tmp
, addr
, disp16
);
349 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
353 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
354 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
359 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
361 TCGv_i32 tmp32
= tcg_temp_new_i32();
362 gen_helper_f_to_memory(tmp32
, t0
);
363 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
364 tcg_temp_free_i32(tmp32
);
367 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
369 TCGv tmp
= tcg_temp_new();
370 gen_helper_g_to_memory(tmp
, t0
);
371 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
375 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
377 TCGv_i32 tmp32
= tcg_temp_new_i32();
378 gen_helper_s_to_memory(tmp32
, t0
);
379 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
380 tcg_temp_free_i32(tmp32
);
383 static inline void gen_store_mem(DisasContext
*ctx
,
384 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
386 int ra
, int rb
, int32_t disp16
, bool fp
,
391 tmp
= tcg_temp_new();
392 addr
= load_gpr(ctx
, rb
);
395 tcg_gen_addi_i64(tmp
, addr
, disp16
);
399 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
403 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
404 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
409 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
410 int32_t disp16
, int mem_idx
,
413 TCGLabel
*lab_fail
, *lab_done
;
416 addr
= tcg_temp_new_i64();
417 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
418 free_context_temps(ctx
);
420 lab_fail
= gen_new_label();
421 lab_done
= gen_new_label();
422 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
423 tcg_temp_free_i64(addr
);
425 val
= tcg_temp_new_i64();
426 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
427 load_gpr(ctx
, ra
), mem_idx
, op
);
428 free_context_temps(ctx
);
431 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
433 tcg_temp_free_i64(val
);
434 tcg_gen_br(lab_done
);
436 gen_set_label(lab_fail
);
438 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
441 gen_set_label(lab_done
);
442 tcg_gen_movi_i64(cpu_lock_addr
, -1);
446 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
448 #ifndef CONFIG_USER_ONLY
449 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
450 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
451 && ((addr
>> 41) & 3) == 2);
457 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
459 /* Suppress goto_tb in the case of single-steping and IO. */
460 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
461 || ctx
->singlestep_enabled
|| singlestep
) {
464 #ifndef CONFIG_USER_ONLY
465 /* If the destination is in the superpage, the page perms can't change. */
466 if (in_superpage(ctx
, dest
)) {
469 /* Check for the dest on the same page as the start of the TB. */
470 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
476 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
478 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
481 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
484 /* Notice branch-to-next; used to initialize RA with the PC. */
487 } else if (use_goto_tb(ctx
, dest
)) {
489 tcg_gen_movi_i64(cpu_pc
, dest
);
490 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
493 tcg_gen_movi_i64(cpu_pc
, dest
);
494 return EXIT_PC_UPDATED
;
498 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
499 TCGv cmp
, int32_t disp
)
501 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
502 TCGLabel
*lab_true
= gen_new_label();
504 if (use_goto_tb(ctx
, dest
)) {
505 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
508 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
509 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
511 gen_set_label(lab_true
);
513 tcg_gen_movi_i64(cpu_pc
, dest
);
514 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
518 TCGv_i64 z
= tcg_const_i64(0);
519 TCGv_i64 d
= tcg_const_i64(dest
);
520 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
522 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
524 tcg_temp_free_i64(z
);
525 tcg_temp_free_i64(d
);
526 tcg_temp_free_i64(p
);
527 return EXIT_PC_UPDATED
;
531 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
532 int32_t disp
, int mask
)
537 cmp_tmp
= tcg_temp_new();
538 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
540 cmp_tmp
= load_gpr(ctx
, ra
);
543 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
546 /* Fold -0.0 for comparison with COND. */
548 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
550 uint64_t mzero
= 1ull << 63;
555 /* For <= or >, the -0.0 value directly compares the way we want. */
556 tcg_gen_mov_i64(dest
, src
);
561 /* For == or !=, we can simply mask off the sign bit and compare. */
562 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
567 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
568 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
569 tcg_gen_neg_i64(dest
, dest
);
570 tcg_gen_and_i64(dest
, dest
, src
);
578 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
581 TCGv cmp_tmp
= tcg_temp_new();
582 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
583 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
586 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
591 vb
= load_fpr(ctx
, rb
);
593 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
595 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
600 #define QUAL_RM_N 0x080 /* Round mode nearest even */
601 #define QUAL_RM_C 0x000 /* Round mode chopped */
602 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
603 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
604 #define QUAL_RM_MASK 0x0c0
606 #define QUAL_U 0x100 /* Underflow enable (fp output) */
607 #define QUAL_V 0x100 /* Overflow enable (int output) */
608 #define QUAL_S 0x400 /* Software completion enable */
609 #define QUAL_I 0x200 /* Inexact detection enable */
611 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
615 fn11
&= QUAL_RM_MASK
;
616 if (fn11
== ctx
->tb_rm
) {
621 tmp
= tcg_temp_new_i32();
624 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
627 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
630 tcg_gen_movi_i32(tmp
, float_round_down
);
633 tcg_gen_ld8u_i32(tmp
, cpu_env
,
634 offsetof(CPUAlphaState
, fpcr_dyn_round
));
638 #if defined(CONFIG_SOFTFLOAT_INLINE)
639 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
640 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
641 sets the one field. */
642 tcg_gen_st8_i32(tmp
, cpu_env
,
643 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
645 gen_helper_setroundmode(tmp
);
648 tcg_temp_free_i32(tmp
);
651 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
656 if (fn11
== ctx
->tb_ftz
) {
661 tmp
= tcg_temp_new_i32();
663 /* Underflow is enabled, use the FPCR setting. */
664 tcg_gen_ld8u_i32(tmp
, cpu_env
,
665 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
667 /* Underflow is disabled, force flush-to-zero. */
668 tcg_gen_movi_i32(tmp
, 1);
671 #if defined(CONFIG_SOFTFLOAT_INLINE)
672 tcg_gen_st8_i32(tmp
, cpu_env
,
673 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
675 gen_helper_setflushzero(tmp
);
678 tcg_temp_free_i32(tmp
);
681 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
685 if (unlikely(reg
== 31)) {
686 val
= load_zero(ctx
);
689 if ((fn11
& QUAL_S
) == 0) {
691 gen_helper_ieee_input_cmp(cpu_env
, val
);
693 gen_helper_ieee_input(cpu_env
, val
);
696 #ifndef CONFIG_USER_ONLY
697 /* In system mode, raise exceptions for denormals like real
698 hardware. In user mode, proceed as if the OS completion
699 handler is handling the denormal as per spec. */
700 gen_helper_ieee_input_s(cpu_env
, val
);
707 static void gen_fp_exc_raise(int rc
, int fn11
)
709 /* ??? We ought to be able to do something with imprecise exceptions.
710 E.g. notice we're still in the trap shadow of something within the
711 TB and do not generate the code to signal the exception; end the TB
712 when an exception is forced to arrive, either by consumption of a
713 register value or TRAPB or EXCB. */
717 if (!(fn11
& QUAL_U
)) {
718 /* Note that QUAL_U == QUAL_V, so ignore either. */
719 ignore
|= FPCR_UNF
| FPCR_IOV
;
721 if (!(fn11
& QUAL_I
)) {
724 ign
= tcg_const_i32(ignore
);
726 /* ??? Pass in the regno of the destination so that the helper can
727 set EXC_MASK, which contains a bitmask of destination registers
728 that have caused arithmetic traps. A simple userspace emulation
729 does not require this. We do need it for a guest kernel's entArith,
730 or if we were to do something clever with imprecise exceptions. */
731 reg
= tcg_const_i32(rc
+ 32);
733 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
735 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
738 tcg_temp_free_i32(reg
);
739 tcg_temp_free_i32(ign
);
742 static void gen_cvtlq(TCGv vc
, TCGv vb
)
744 TCGv tmp
= tcg_temp_new();
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp
, vb
, 32);
749 tcg_gen_shri_i64(vc
, vb
, 29);
750 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
752 tcg_gen_or_i64(vc
, vc
, tmp
);
757 static void gen_ieee_arith2(DisasContext
*ctx
,
758 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
759 int rb
, int rc
, int fn11
)
763 gen_qual_roundmode(ctx
, fn11
);
764 gen_qual_flushzero(ctx
, fn11
);
766 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
767 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
769 gen_fp_exc_raise(rc
, fn11
);
772 #define IEEE_ARITH2(name) \
773 static inline void glue(gen_, name)(DisasContext *ctx, \
774 int rb, int rc, int fn11) \
776 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
783 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
787 /* No need to set flushzero, since we have an integer output. */
788 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
789 vc
= dest_fpr(ctx
, rc
);
791 /* Almost all integer conversions use cropped rounding;
792 special case that. */
793 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
794 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
796 gen_qual_roundmode(ctx
, fn11
);
797 gen_helper_cvttq(vc
, cpu_env
, vb
);
799 gen_fp_exc_raise(rc
, fn11
);
802 static void gen_ieee_intcvt(DisasContext
*ctx
,
803 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
804 int rb
, int rc
, int fn11
)
808 gen_qual_roundmode(ctx
, fn11
);
809 vb
= load_fpr(ctx
, rb
);
810 vc
= dest_fpr(ctx
, rc
);
812 /* The only exception that can be raised by integer conversion
813 is inexact. Thus we only need to worry about exceptions when
814 inexact handling is requested. */
816 helper(vc
, cpu_env
, vb
);
817 gen_fp_exc_raise(rc
, fn11
);
819 helper(vc
, cpu_env
, vb
);
823 #define IEEE_INTCVT(name) \
824 static inline void glue(gen_, name)(DisasContext *ctx, \
825 int rb, int rc, int fn11) \
827 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
832 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
834 TCGv vmask
= tcg_const_i64(mask
);
835 TCGv tmp
= tcg_temp_new_i64();
838 tcg_gen_andc_i64(tmp
, vmask
, va
);
840 tcg_gen_and_i64(tmp
, va
, vmask
);
843 tcg_gen_andc_i64(vc
, vb
, vmask
);
844 tcg_gen_or_i64(vc
, vc
, tmp
);
846 tcg_temp_free(vmask
);
850 static void gen_ieee_arith3(DisasContext
*ctx
,
851 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
852 int ra
, int rb
, int rc
, int fn11
)
856 gen_qual_roundmode(ctx
, fn11
);
857 gen_qual_flushzero(ctx
, fn11
);
859 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
860 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
861 vc
= dest_fpr(ctx
, rc
);
862 helper(vc
, cpu_env
, va
, vb
);
864 gen_fp_exc_raise(rc
, fn11
);
867 #define IEEE_ARITH3(name) \
868 static inline void glue(gen_, name)(DisasContext *ctx, \
869 int ra, int rb, int rc, int fn11) \
871 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
882 static void gen_ieee_compare(DisasContext
*ctx
,
883 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
884 int ra
, int rb
, int rc
, int fn11
)
888 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
889 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
890 vc
= dest_fpr(ctx
, rc
);
891 helper(vc
, cpu_env
, va
, vb
);
893 gen_fp_exc_raise(rc
, fn11
);
896 #define IEEE_CMP3(name) \
897 static inline void glue(gen_, name)(DisasContext *ctx, \
898 int ra, int rb, int rc, int fn11) \
900 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
907 static inline uint64_t zapnot_mask(uint8_t lit
)
912 for (i
= 0; i
< 8; ++i
) {
913 if ((lit
>> i
) & 1) {
914 mask
|= 0xffull
<< (i
* 8);
920 /* Implement zapnot with an immediate operand, which expands to some
921 form of immediate AND. This is a basic building block in the
922 definition of many of the other byte manipulation instructions. */
923 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
927 tcg_gen_movi_i64(dest
, 0);
930 tcg_gen_ext8u_i64(dest
, src
);
933 tcg_gen_ext16u_i64(dest
, src
);
936 tcg_gen_ext32u_i64(dest
, src
);
939 tcg_gen_mov_i64(dest
, src
);
942 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
947 /* EXTWH, EXTLH, EXTQH */
948 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
949 uint8_t lit
, uint8_t byte_mask
)
952 tcg_gen_shli_i64(vc
, va
, (64 - lit
* 8) & 0x3f);
954 TCGv tmp
= tcg_temp_new();
955 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
956 tcg_gen_neg_i64(tmp
, tmp
);
957 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
958 tcg_gen_shl_i64(vc
, va
, tmp
);
961 gen_zapnoti(vc
, vc
, byte_mask
);
964 /* EXTBL, EXTWL, EXTLL, EXTQL */
965 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
966 uint8_t lit
, uint8_t byte_mask
)
969 tcg_gen_shri_i64(vc
, va
, (lit
& 7) * 8);
971 TCGv tmp
= tcg_temp_new();
972 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
973 tcg_gen_shli_i64(tmp
, tmp
, 3);
974 tcg_gen_shr_i64(vc
, va
, tmp
);
977 gen_zapnoti(vc
, vc
, byte_mask
);
980 /* INSWH, INSLH, INSQH */
981 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
982 uint8_t lit
, uint8_t byte_mask
)
984 TCGv tmp
= tcg_temp_new();
986 /* The instruction description has us left-shift the byte mask and extract
987 bits <15:8> and apply that zap at the end. This is equivalent to simply
988 performing the zap first and shifting afterward. */
989 gen_zapnoti(tmp
, va
, byte_mask
);
993 if (unlikely(lit
== 0)) {
994 tcg_gen_movi_i64(vc
, 0);
996 tcg_gen_shri_i64(vc
, tmp
, 64 - lit
* 8);
999 TCGv shift
= tcg_temp_new();
1001 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1002 portably by splitting the shift into two parts: shift_count-1 and 1.
1003 Arrange for the -1 by using ones-complement instead of
1004 twos-complement in the negation: ~(B * 8) & 63. */
1006 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1007 tcg_gen_not_i64(shift
, shift
);
1008 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1010 tcg_gen_shr_i64(vc
, tmp
, shift
);
1011 tcg_gen_shri_i64(vc
, vc
, 1);
1012 tcg_temp_free(shift
);
1017 /* INSBL, INSWL, INSLL, INSQL */
1018 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1019 uint8_t lit
, uint8_t byte_mask
)
1021 TCGv tmp
= tcg_temp_new();
1023 /* The instruction description has us left-shift the byte mask
1024 the same number of byte slots as the data and apply the zap
1025 at the end. This is equivalent to simply performing the zap
1026 first and shifting afterward. */
1027 gen_zapnoti(tmp
, va
, byte_mask
);
1030 tcg_gen_shli_i64(vc
, tmp
, (lit
& 7) * 8);
1032 TCGv shift
= tcg_temp_new();
1033 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1034 tcg_gen_shli_i64(shift
, shift
, 3);
1035 tcg_gen_shl_i64(vc
, tmp
, shift
);
1036 tcg_temp_free(shift
);
1041 /* MSKWH, MSKLH, MSKQH */
1042 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1043 uint8_t lit
, uint8_t byte_mask
)
1046 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1048 TCGv shift
= tcg_temp_new();
1049 TCGv mask
= tcg_temp_new();
1051 /* The instruction description is as above, where the byte_mask
1052 is shifted left, and then we extract bits <15:8>. This can be
1053 emulated with a right-shift on the expanded byte mask. This
1054 requires extra care because for an input <2:0> == 0 we need a
1055 shift of 64 bits in order to generate a zero. This is done by
1056 splitting the shift into two parts, the variable shift - 1
1057 followed by a constant 1 shift. The code we expand below is
1058 equivalent to ~(B * 8) & 63. */
1060 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1061 tcg_gen_not_i64(shift
, shift
);
1062 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1063 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1064 tcg_gen_shr_i64(mask
, mask
, shift
);
1065 tcg_gen_shri_i64(mask
, mask
, 1);
1067 tcg_gen_andc_i64(vc
, va
, mask
);
1069 tcg_temp_free(mask
);
1070 tcg_temp_free(shift
);
1074 /* MSKBL, MSKWL, MSKLL, MSKQL */
1075 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1076 uint8_t lit
, uint8_t byte_mask
)
1079 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1081 TCGv shift
= tcg_temp_new();
1082 TCGv mask
= tcg_temp_new();
1084 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1085 tcg_gen_shli_i64(shift
, shift
, 3);
1086 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1087 tcg_gen_shl_i64(mask
, mask
, shift
);
1089 tcg_gen_andc_i64(vc
, va
, mask
);
1091 tcg_temp_free(mask
);
1092 tcg_temp_free(shift
);
1096 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1101 tcg_gen_ld8u_i64(ctx
->ir
[ra
], cpu_env
,
1102 offsetof(CPUAlphaState
, intr_flag
));
1105 tmp
= tcg_const_i32(set
);
1106 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1107 tcg_temp_free_i32(tmp
);
1110 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1112 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1113 to internal cpu registers. */
1115 /* Unprivileged PAL call */
1116 if (palcode
>= 0x80 && palcode
< 0xC0) {
1120 /* No-op inside QEMU. */
1124 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1125 offsetof(CPUAlphaState
, unique
));
1129 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1130 offsetof(CPUAlphaState
, unique
));
1139 #ifndef CONFIG_USER_ONLY
1140 /* Privileged PAL code */
1141 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1145 /* No-op inside QEMU. */
1149 /* No-op inside QEMU. */
1153 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1154 offsetof(CPUAlphaState
, vptptr
));
1158 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1159 offsetof(CPUAlphaState
, sysval
));
1163 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1164 offsetof(CPUAlphaState
, sysval
));
1171 /* Note that we already know we're in kernel mode, so we know
1172 that PS only contains the 3 IPL bits. */
1173 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1174 offsetof(CPUAlphaState
, ps
));
1176 /* But make sure and store only the 3 IPL bits from the user. */
1177 tmp
= tcg_temp_new();
1178 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1179 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1186 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1187 offsetof(CPUAlphaState
, ps
));
1191 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1192 offsetof(CPUAlphaState
, usp
));
1196 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1197 offsetof(CPUAlphaState
, usp
));
1201 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1202 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1212 return gen_invalid(ctx
);
1215 #ifdef CONFIG_USER_ONLY
1216 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1219 TCGv tmp
= tcg_temp_new();
1220 uint64_t exc_addr
= ctx
->pc
;
1221 uint64_t entry
= ctx
->palbr
;
1223 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
1226 tcg_gen_movi_i64(tmp
, 1);
1227 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
1230 tcg_gen_movi_i64(tmp
, exc_addr
);
1231 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1234 entry
+= (palcode
& 0x80
1235 ? 0x2000 + (palcode
- 0x80) * 64
1236 : 0x1000 + palcode
* 64);
1238 /* Since the destination is running in PALmode, we don't really
1239 need the page permissions check. We'll see the existence of
1240 the page when we create the TB, and we'll flush all TBs if
1241 we change the PAL base register. */
1242 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1244 tcg_gen_movi_i64(cpu_pc
, entry
);
1245 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1246 return EXIT_GOTO_TB
;
1248 tcg_gen_movi_i64(cpu_pc
, entry
);
1249 return EXIT_PC_UPDATED
;
1255 #ifndef CONFIG_USER_ONLY
1257 #define PR_BYTE 0x100000
1258 #define PR_LONG 0x200000
1260 static int cpu_pr_data(int pr
)
1263 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1264 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1265 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1266 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1267 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1268 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1269 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1270 case 7: return offsetof(CPUAlphaState
, palbr
);
1271 case 8: return offsetof(CPUAlphaState
, ptbr
);
1272 case 9: return offsetof(CPUAlphaState
, vptptr
);
1273 case 10: return offsetof(CPUAlphaState
, unique
);
1274 case 11: return offsetof(CPUAlphaState
, sysval
);
1275 case 12: return offsetof(CPUAlphaState
, usp
);
1278 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1281 return offsetof(CPUAlphaState
, alarm_expire
);
1286 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1288 void (*helper
)(TCGv
);
1293 /* Accessing the "non-shadow" general registers. */
1294 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1295 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1298 case 250: /* WALLTIME */
1299 helper
= gen_helper_get_walltime
;
1301 case 249: /* VMTIME */
1302 helper
= gen_helper_get_vmtime
;
1308 return EXIT_PC_STALE
;
1315 /* The basic registers are data only, and unknown registers
1316 are read-zero, write-ignore. */
1317 data
= cpu_pr_data(regno
);
1319 tcg_gen_movi_i64(va
, 0);
1320 } else if (data
& PR_BYTE
) {
1321 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1322 } else if (data
& PR_LONG
) {
1323 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1325 tcg_gen_ld_i64(va
, cpu_env
, data
);
1333 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1341 gen_helper_tbia(cpu_env
);
1346 gen_helper_tbis(cpu_env
, vb
);
1351 tmp
= tcg_const_i64(1);
1352 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1353 offsetof(CPUState
, halted
));
1354 return gen_excp(ctx
, EXCP_HLT
, 0);
1358 gen_helper_halt(vb
);
1359 return EXIT_PC_STALE
;
1363 gen_helper_set_alarm(cpu_env
, vb
);
1368 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1369 /* Changing the PAL base register implies un-chaining all of the TBs
1370 that ended with a CALL_PAL. Since the base register usually only
1371 changes during boot, flushing everything works well. */
1372 gen_helper_tb_flush(cpu_env
);
1373 return EXIT_PC_STALE
;
1376 /* Accessing the "non-shadow" general registers. */
1377 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1378 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1382 /* The basic registers are data only, and unknown registers
1383 are read-zero, write-ignore. */
1384 data
= cpu_pr_data(regno
);
1386 if (data
& PR_BYTE
) {
1387 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1388 } else if (data
& PR_LONG
) {
1389 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1391 tcg_gen_st_i64(vb
, cpu_env
, data
);
1399 #endif /* !USER_ONLY*/
1401 #define REQUIRE_NO_LIT \
1408 #define REQUIRE_TB_FLAG(FLAG) \
1410 if ((ctx->tb->flags & (FLAG)) == 0) { \
1415 #define REQUIRE_REG_31(WHICH) \
1417 if (WHICH != 31) { \
1422 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1424 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1426 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1427 bool islit
, real_islit
;
1428 TCGv va
, vb
, vc
, tmp
, tmp2
;
1432 /* Decode all instruction fields */
1433 opc
= extract32(insn
, 26, 6);
1434 ra
= extract32(insn
, 21, 5);
1435 rb
= extract32(insn
, 16, 5);
1436 rc
= extract32(insn
, 0, 5);
1437 real_islit
= islit
= extract32(insn
, 12, 1);
1438 lit
= extract32(insn
, 13, 8);
1440 disp21
= sextract32(insn
, 0, 21);
1441 disp16
= sextract32(insn
, 0, 16);
1442 disp12
= sextract32(insn
, 0, 12);
1444 fn11
= extract32(insn
, 5, 11);
1445 fpfn
= extract32(insn
, 5, 6);
1446 fn7
= extract32(insn
, 5, 7);
1448 if (rb
== 31 && !islit
) {
1457 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1483 disp16
= (uint32_t)disp16
<< 16;
1487 va
= dest_gpr(ctx
, ra
);
1488 /* It's worth special-casing immediate loads. */
1490 tcg_gen_movi_i64(va
, disp16
);
1492 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1498 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1499 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1503 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1507 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1508 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1512 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1513 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1517 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1518 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1522 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1526 vc
= dest_gpr(ctx
, rc
);
1527 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1531 /* Special case ADDL as SEXTL. */
1532 tcg_gen_ext32s_i64(vc
, vb
);
1536 /* Special case SUBQ as NEGQ. */
1537 tcg_gen_neg_i64(vc
, vb
);
1542 va
= load_gpr(ctx
, ra
);
1546 tcg_gen_add_i64(vc
, va
, vb
);
1547 tcg_gen_ext32s_i64(vc
, vc
);
1551 tmp
= tcg_temp_new();
1552 tcg_gen_shli_i64(tmp
, va
, 2);
1553 tcg_gen_add_i64(tmp
, tmp
, vb
);
1554 tcg_gen_ext32s_i64(vc
, tmp
);
1559 tcg_gen_sub_i64(vc
, va
, vb
);
1560 tcg_gen_ext32s_i64(vc
, vc
);
1564 tmp
= tcg_temp_new();
1565 tcg_gen_shli_i64(tmp
, va
, 2);
1566 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1567 tcg_gen_ext32s_i64(vc
, tmp
);
1573 /* Special case 0 >= X as X == 0. */
1574 gen_helper_cmpbe0(vc
, vb
);
1576 gen_helper_cmpbge(vc
, va
, vb
);
1581 tmp
= tcg_temp_new();
1582 tcg_gen_shli_i64(tmp
, va
, 3);
1583 tcg_gen_add_i64(tmp
, tmp
, vb
);
1584 tcg_gen_ext32s_i64(vc
, tmp
);
1589 tmp
= tcg_temp_new();
1590 tcg_gen_shli_i64(tmp
, va
, 3);
1591 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1592 tcg_gen_ext32s_i64(vc
, tmp
);
1597 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1601 tcg_gen_add_i64(vc
, va
, vb
);
1605 tmp
= tcg_temp_new();
1606 tcg_gen_shli_i64(tmp
, va
, 2);
1607 tcg_gen_add_i64(vc
, tmp
, vb
);
1612 tcg_gen_sub_i64(vc
, va
, vb
);
1616 tmp
= tcg_temp_new();
1617 tcg_gen_shli_i64(tmp
, va
, 2);
1618 tcg_gen_sub_i64(vc
, tmp
, vb
);
1623 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1627 tmp
= tcg_temp_new();
1628 tcg_gen_shli_i64(tmp
, va
, 3);
1629 tcg_gen_add_i64(vc
, tmp
, vb
);
1634 tmp
= tcg_temp_new();
1635 tcg_gen_shli_i64(tmp
, va
, 3);
1636 tcg_gen_sub_i64(vc
, tmp
, vb
);
1641 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1645 tmp
= tcg_temp_new();
1646 tcg_gen_ext32s_i64(tmp
, va
);
1647 tcg_gen_ext32s_i64(vc
, vb
);
1648 tcg_gen_add_i64(tmp
, tmp
, vc
);
1649 tcg_gen_ext32s_i64(vc
, tmp
);
1650 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1655 tmp
= tcg_temp_new();
1656 tcg_gen_ext32s_i64(tmp
, va
);
1657 tcg_gen_ext32s_i64(vc
, vb
);
1658 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1659 tcg_gen_ext32s_i64(vc
, tmp
);
1660 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1665 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1669 tmp
= tcg_temp_new();
1670 tmp2
= tcg_temp_new();
1671 tcg_gen_eqv_i64(tmp
, va
, vb
);
1672 tcg_gen_mov_i64(tmp2
, va
);
1673 tcg_gen_add_i64(vc
, va
, vb
);
1674 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1675 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1676 tcg_gen_shri_i64(tmp
, tmp
, 63);
1677 tcg_gen_movi_i64(tmp2
, 0);
1678 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1680 tcg_temp_free(tmp2
);
1684 tmp
= tcg_temp_new();
1685 tmp2
= tcg_temp_new();
1686 tcg_gen_xor_i64(tmp
, va
, vb
);
1687 tcg_gen_mov_i64(tmp2
, va
);
1688 tcg_gen_sub_i64(vc
, va
, vb
);
1689 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1690 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1691 tcg_gen_shri_i64(tmp
, tmp
, 63);
1692 tcg_gen_movi_i64(tmp2
, 0);
1693 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1695 tcg_temp_free(tmp2
);
1699 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1709 /* Special case BIS as NOP. */
1713 /* Special case BIS as MOV. */
1714 vc
= dest_gpr(ctx
, rc
);
1716 tcg_gen_movi_i64(vc
, lit
);
1718 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1724 vc
= dest_gpr(ctx
, rc
);
1725 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1727 if (fn7
== 0x28 && ra
== 31) {
1728 /* Special case ORNOT as NOT. */
1729 tcg_gen_not_i64(vc
, vb
);
1733 va
= load_gpr(ctx
, ra
);
1737 tcg_gen_and_i64(vc
, va
, vb
);
1741 tcg_gen_andc_i64(vc
, va
, vb
);
1745 tmp
= tcg_temp_new();
1746 tcg_gen_andi_i64(tmp
, va
, 1);
1747 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1748 vb
, load_gpr(ctx
, rc
));
1753 tmp
= tcg_temp_new();
1754 tcg_gen_andi_i64(tmp
, va
, 1);
1755 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1756 vb
, load_gpr(ctx
, rc
));
1761 tcg_gen_or_i64(vc
, va
, vb
);
1765 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1766 vb
, load_gpr(ctx
, rc
));
1770 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1771 vb
, load_gpr(ctx
, rc
));
1775 tcg_gen_orc_i64(vc
, va
, vb
);
1779 tcg_gen_xor_i64(vc
, va
, vb
);
1783 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1784 vb
, load_gpr(ctx
, rc
));
1788 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1789 vb
, load_gpr(ctx
, rc
));
1793 tcg_gen_eqv_i64(vc
, va
, vb
);
1799 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1800 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1805 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1806 vb
, load_gpr(ctx
, rc
));
1810 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1811 vb
, load_gpr(ctx
, rc
));
1816 tcg_gen_movi_i64(vc
, ctx
->implver
);
1824 vc
= dest_gpr(ctx
, rc
);
1825 va
= load_gpr(ctx
, ra
);
1829 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1833 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1837 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1841 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1845 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1849 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1853 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1857 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1861 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1866 gen_zapnoti(vc
, va
, ~lit
);
1868 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1874 gen_zapnoti(vc
, va
, lit
);
1876 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1881 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1886 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1888 tmp
= tcg_temp_new();
1889 vb
= load_gpr(ctx
, rb
);
1890 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1891 tcg_gen_shr_i64(vc
, va
, tmp
);
1897 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1902 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1904 tmp
= tcg_temp_new();
1905 vb
= load_gpr(ctx
, rb
);
1906 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1907 tcg_gen_shl_i64(vc
, va
, tmp
);
1913 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1918 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1920 tmp
= tcg_temp_new();
1921 vb
= load_gpr(ctx
, rb
);
1922 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1923 tcg_gen_sar_i64(vc
, va
, tmp
);
1929 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1933 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1937 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1941 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1945 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1949 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1953 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1957 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1961 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1969 vc
= dest_gpr(ctx
, rc
);
1970 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1971 va
= load_gpr(ctx
, ra
);
1975 tcg_gen_mul_i64(vc
, va
, vb
);
1976 tcg_gen_ext32s_i64(vc
, vc
);
1980 tcg_gen_mul_i64(vc
, va
, vb
);
1984 tmp
= tcg_temp_new();
1985 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1990 tmp
= tcg_temp_new();
1991 tcg_gen_ext32s_i64(tmp
, va
);
1992 tcg_gen_ext32s_i64(vc
, vb
);
1993 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1994 tcg_gen_ext32s_i64(vc
, tmp
);
1995 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2000 tmp
= tcg_temp_new();
2001 tmp2
= tcg_temp_new();
2002 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2003 tcg_gen_sari_i64(tmp2
, vc
, 63);
2004 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2006 tcg_temp_free(tmp2
);
2014 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2015 vc
= dest_fpr(ctx
, rc
);
2016 switch (fpfn
) { /* fn11 & 0x3F */
2020 t32
= tcg_temp_new_i32();
2021 va
= load_gpr(ctx
, ra
);
2022 tcg_gen_extrl_i64_i32(t32
, va
);
2023 gen_helper_memory_to_s(vc
, t32
);
2024 tcg_temp_free_i32(t32
);
2029 vb
= load_fpr(ctx
, rb
);
2030 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2035 gen_sqrts(ctx
, rb
, rc
, fn11
);
2040 t32
= tcg_temp_new_i32();
2041 va
= load_gpr(ctx
, ra
);
2042 tcg_gen_extrl_i64_i32(t32
, va
);
2043 gen_helper_memory_to_f(vc
, t32
);
2044 tcg_temp_free_i32(t32
);
2049 va
= load_gpr(ctx
, ra
);
2050 tcg_gen_mov_i64(vc
, va
);
2055 vb
= load_fpr(ctx
, rb
);
2056 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2061 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2069 /* VAX floating point */
2070 /* XXX: rounding mode and trap are ignored (!) */
2071 vc
= dest_fpr(ctx
, rc
);
2072 vb
= load_fpr(ctx
, rb
);
2073 va
= load_fpr(ctx
, ra
);
2074 switch (fpfn
) { /* fn11 & 0x3F */
2077 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2081 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2085 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2089 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2097 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2101 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2105 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2109 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2113 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2117 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2121 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2126 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2135 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2140 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2145 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2153 /* IEEE floating-point */
2154 switch (fpfn
) { /* fn11 & 0x3F */
2157 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2161 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2165 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2169 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2173 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2177 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2181 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2185 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2189 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2193 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2197 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2201 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2205 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2207 gen_cvtst(ctx
, rb
, rc
, fn11
);
2210 gen_cvtts(ctx
, rb
, rc
, fn11
);
2216 gen_cvttq(ctx
, rb
, rc
, fn11
);
2221 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2226 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2238 vc
= dest_fpr(ctx
, rc
);
2239 vb
= load_fpr(ctx
, rb
);
2245 /* Special case CPYS as FNOP. */
2247 vc
= dest_fpr(ctx
, rc
);
2248 va
= load_fpr(ctx
, ra
);
2250 /* Special case CPYS as FMOV. */
2251 tcg_gen_mov_i64(vc
, va
);
2253 vb
= load_fpr(ctx
, rb
);
2254 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2260 vc
= dest_fpr(ctx
, rc
);
2261 vb
= load_fpr(ctx
, rb
);
2262 va
= load_fpr(ctx
, ra
);
2263 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2267 vc
= dest_fpr(ctx
, rc
);
2268 vb
= load_fpr(ctx
, rb
);
2269 va
= load_fpr(ctx
, ra
);
2270 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2274 va
= load_fpr(ctx
, ra
);
2275 gen_helper_store_fpcr(cpu_env
, va
);
2276 if (ctx
->tb_rm
== QUAL_RM_D
) {
2277 /* Re-do the copy of the rounding mode to fp_status
2278 the next time we use dynamic rounding. */
2284 va
= dest_fpr(ctx
, ra
);
2285 gen_helper_load_fpcr(va
, cpu_env
);
2289 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2293 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2297 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2301 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2305 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2309 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2311 case 0x030: /* CVTQL */
2312 case 0x130: /* CVTQL/V */
2313 case 0x530: /* CVTQL/SV */
2315 vc
= dest_fpr(ctx
, rc
);
2316 vb
= load_fpr(ctx
, rb
);
2317 gen_helper_cvtql(vc
, cpu_env
, vb
);
2318 gen_fp_exc_raise(rc
, fn11
);
2326 switch ((uint16_t)disp16
) {
2337 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2341 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2353 va
= dest_gpr(ctx
, ra
);
2354 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2356 gen_helper_load_pcc(va
, cpu_env
);
2358 ret
= EXIT_PC_STALE
;
2360 gen_helper_load_pcc(va
, cpu_env
);
2388 /* HW_MFPR (PALcode) */
2389 #ifndef CONFIG_USER_ONLY
2390 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2391 va
= dest_gpr(ctx
, ra
);
2392 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2399 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2400 prediction stack action, which of course we don't implement. */
2401 vb
= load_gpr(ctx
, rb
);
2402 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2404 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2406 ret
= EXIT_PC_UPDATED
;
2410 /* HW_LD (PALcode) */
2411 #ifndef CONFIG_USER_ONLY
2412 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2414 TCGv addr
= tcg_temp_new();
2415 vb
= load_gpr(ctx
, rb
);
2416 va
= dest_gpr(ctx
, ra
);
2418 tcg_gen_addi_i64(addr
, vb
, disp12
);
2419 switch ((insn
>> 12) & 0xF) {
2421 /* Longword physical access (hw_ldl/p) */
2422 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2425 /* Quadword physical access (hw_ldq/p) */
2426 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2429 /* Longword physical access with lock (hw_ldl_l/p) */
2430 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2433 /* Quadword physical access with lock (hw_ldq_l/p) */
2434 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2437 /* Longword virtual PTE fetch (hw_ldl/v) */
2440 /* Quadword virtual PTE fetch (hw_ldq/v) */
2450 /* Longword virtual access (hw_ldl) */
2453 /* Quadword virtual access (hw_ldq) */
2456 /* Longword virtual access with protection check (hw_ldl/w) */
2457 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2460 /* Quadword virtual access with protection check (hw_ldq/w) */
2461 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2464 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2467 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2470 /* Longword virtual access with alternate access mode and
2471 protection checks (hw_ldl/wa) */
2472 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2475 /* Quadword virtual access with alternate access mode and
2476 protection checks (hw_ldq/wa) */
2477 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2480 tcg_temp_free(addr
);
2488 vc
= dest_gpr(ctx
, rc
);
2491 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2493 va
= load_fpr(ctx
, ra
);
2494 tcg_gen_mov_i64(vc
, va
);
2496 } else if (fn7
== 0x78) {
2498 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2500 t32
= tcg_temp_new_i32();
2501 va
= load_fpr(ctx
, ra
);
2502 gen_helper_s_to_memory(t32
, va
);
2503 tcg_gen_ext_i32_i64(vc
, t32
);
2504 tcg_temp_free_i32(t32
);
2508 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2512 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2514 tcg_gen_ext8s_i64(vc
, vb
);
2518 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2520 tcg_gen_ext16s_i64(vc
, vb
);
2524 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2527 gen_helper_ctpop(vc
, vb
);
2531 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2533 va
= load_gpr(ctx
, ra
);
2534 gen_helper_perr(vc
, va
, vb
);
2538 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2541 gen_helper_ctlz(vc
, vb
);
2545 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2548 gen_helper_cttz(vc
, vb
);
2552 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2555 gen_helper_unpkbw(vc
, vb
);
2559 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2562 gen_helper_unpkbl(vc
, vb
);
2566 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2569 gen_helper_pkwb(vc
, vb
);
2573 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2576 gen_helper_pklb(vc
, vb
);
2580 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2581 va
= load_gpr(ctx
, ra
);
2582 gen_helper_minsb8(vc
, va
, vb
);
2586 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2587 va
= load_gpr(ctx
, ra
);
2588 gen_helper_minsw4(vc
, va
, vb
);
2592 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2593 va
= load_gpr(ctx
, ra
);
2594 gen_helper_minub8(vc
, va
, vb
);
2598 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2599 va
= load_gpr(ctx
, ra
);
2600 gen_helper_minuw4(vc
, va
, vb
);
2604 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2605 va
= load_gpr(ctx
, ra
);
2606 gen_helper_maxub8(vc
, va
, vb
);
2610 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2611 va
= load_gpr(ctx
, ra
);
2612 gen_helper_maxuw4(vc
, va
, vb
);
2616 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2617 va
= load_gpr(ctx
, ra
);
2618 gen_helper_maxsb8(vc
, va
, vb
);
2622 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2623 va
= load_gpr(ctx
, ra
);
2624 gen_helper_maxsw4(vc
, va
, vb
);
2632 /* HW_MTPR (PALcode) */
2633 #ifndef CONFIG_USER_ONLY
2634 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2635 vb
= load_gpr(ctx
, rb
);
2636 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2643 /* HW_RET (PALcode) */
2644 #ifndef CONFIG_USER_ONLY
2645 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2647 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2648 address from EXC_ADDR. This turns out to be useful for our
2649 emulation PALcode, so continue to accept it. */
2650 ctx
->lit
= vb
= tcg_temp_new();
2651 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2653 vb
= load_gpr(ctx
, rb
);
2655 tmp
= tcg_temp_new();
2656 tcg_gen_movi_i64(tmp
, 0);
2657 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
2658 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2659 tcg_gen_andi_i64(tmp
, vb
, 1);
2660 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
2661 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2662 ret
= EXIT_PC_UPDATED
;
2669 /* HW_ST (PALcode) */
2670 #ifndef CONFIG_USER_ONLY
2671 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2673 switch ((insn
>> 12) & 0xF) {
2675 /* Longword physical access */
2676 va
= load_gpr(ctx
, ra
);
2677 vb
= load_gpr(ctx
, rb
);
2678 tmp
= tcg_temp_new();
2679 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2680 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2684 /* Quadword physical access */
2685 va
= load_gpr(ctx
, ra
);
2686 vb
= load_gpr(ctx
, rb
);
2687 tmp
= tcg_temp_new();
2688 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2689 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2693 /* Longword physical access with lock */
2694 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2695 MMU_PHYS_IDX
, MO_LESL
);
2698 /* Quadword physical access with lock */
2699 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2700 MMU_PHYS_IDX
, MO_LEQ
);
2703 /* Longword virtual access */
2706 /* Quadword virtual access */
2727 /* Longword virtual access with alternate access mode */
2730 /* Quadword virtual access with alternate access mode */
2746 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2750 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2754 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2758 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2762 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2766 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2770 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2774 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2778 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2782 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2786 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2790 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2794 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2798 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2802 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2803 ctx
->mem_idx
, MO_LESL
);
2807 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2808 ctx
->mem_idx
, MO_LEQ
);
2812 ret
= gen_bdirect(ctx
, ra
, disp21
);
2814 case 0x31: /* FBEQ */
2815 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2817 case 0x32: /* FBLT */
2818 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2820 case 0x33: /* FBLE */
2821 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2825 ret
= gen_bdirect(ctx
, ra
, disp21
);
2827 case 0x35: /* FBNE */
2828 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2830 case 0x36: /* FBGE */
2831 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2833 case 0x37: /* FBGT */
2834 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2838 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2842 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2846 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2850 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2854 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2858 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2862 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2866 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2869 ret
= gen_invalid(ctx
);
2876 void gen_intermediate_code(CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2878 AlphaCPU
*cpu
= alpha_env_get_cpu(env
);
2879 CPUState
*cs
= CPU(cpu
);
2880 DisasContext ctx
, *ctxp
= &ctx
;
2881 target_ulong pc_start
;
2882 target_ulong pc_mask
;
2892 ctx
.mem_idx
= cpu_mmu_index(env
, false);
2893 ctx
.implver
= env
->implver
;
2894 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2896 #ifdef CONFIG_USER_ONLY
2897 ctx
.ir
= cpu_std_ir
;
2899 ctx
.palbr
= env
->palbr
;
2900 ctx
.ir
= (tb
->flags
& TB_FLAGS_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2903 /* ??? Every TB begins with unset rounding mode, to be initialized on
2904 the first fp insn of the TB. Alternately we could define a proper
2905 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2906 to reset the FP_STATUS to that default at the end of any TB that
2907 changes the default. We could even (gasp) dynamiclly figure out
2908 what default would be most efficient given the running program. */
2910 /* Similarly for flush-to-zero. */
2913 TCGV_UNUSED_I64(ctx
.zero
);
2914 TCGV_UNUSED_I64(ctx
.sink
);
2915 TCGV_UNUSED_I64(ctx
.lit
);
2918 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2919 if (max_insns
== 0) {
2920 max_insns
= CF_COUNT_MASK
;
2922 if (max_insns
> TCG_MAX_INSNS
) {
2923 max_insns
= TCG_MAX_INSNS
;
2926 if (in_superpage(&ctx
, pc_start
)) {
2927 pc_mask
= (1ULL << 41) - 1;
2929 pc_mask
= ~TARGET_PAGE_MASK
;
2934 tcg_gen_insn_start(ctx
.pc
);
2937 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
2938 ret
= gen_excp(&ctx
, EXCP_DEBUG
, 0);
2939 /* The address covered by the breakpoint must be included in
2940 [tb->pc, tb->pc + tb->size) in order to for it to be
2941 properly cleared -- thus we increment the PC here so that
2942 the logic setting tb->size below does the right thing. */
2946 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2949 insn
= cpu_ldl_code(env
, ctx
.pc
);
2952 ret
= translate_one(ctxp
, insn
);
2953 free_context_temps(ctxp
);
2955 /* If we reach a page boundary, are single stepping,
2956 or exhaust instruction count, stop generation. */
2958 && ((ctx
.pc
& pc_mask
) == 0
2959 || tcg_op_buf_full()
2960 || num_insns
>= max_insns
2962 || ctx
.singlestep_enabled
)) {
2963 ret
= EXIT_PC_STALE
;
2965 } while (ret
== NO_EXIT
);
2967 if (tb
->cflags
& CF_LAST_IO
) {
2976 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2978 case EXIT_PC_UPDATED
:
2979 if (ctx
.singlestep_enabled
) {
2980 gen_excp_1(EXCP_DEBUG
, 0);
2989 gen_tb_end(tb
, num_insns
);
2991 tb
->size
= ctx
.pc
- pc_start
;
2992 tb
->icount
= num_insns
;
2995 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
2996 && qemu_log_in_addr_range(pc_start
)) {
2997 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2998 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
3004 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,