2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
34 #undef ALPHA_DEBUG_DISAS
35 #define CONFIG_SOFTFLOAT_INLINE
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DISAS(...) do { } while (0)
43 typedef struct DisasContext DisasContext
;
45 struct TranslationBlock
*tb
;
47 #ifndef CONFIG_USER_ONLY
52 /* Current rounding mode for this TB. */
54 /* Current flush-to-zero setting for this TB. */
57 /* implver value for this CPU. */
60 /* The set of registers active in the current context. */
63 /* Temporaries for $31 and $f31 as source and destination. */
66 /* Temporary for immediate constants. */
69 bool singlestep_enabled
;
72 /* Return values from translate_one, indicating the state of the TB.
73 Note that zero indicates that we are not exiting the TB. */
78 /* We have emitted one or more goto_tb. No fixup required. */
81 /* We are not using a goto_tb (for whatever reason), but have updated
82 the PC (for whatever reason), so there's no need to do it again on
86 /* We are exiting the TB, but have neither emitted a goto_tb, nor
87 updated the PC for the next instruction to be executed. */
90 /* We are ending the TB with a noreturn function call, e.g. longjmp.
91 No following code will be executed. */
95 /* global register indexes */
96 static TCGv_env cpu_env
;
97 static TCGv cpu_std_ir
[31];
98 static TCGv cpu_fir
[31];
100 static TCGv cpu_lock_addr
;
101 static TCGv cpu_lock_st_addr
;
102 static TCGv cpu_lock_value
;
104 #ifndef CONFIG_USER_ONLY
105 static TCGv cpu_pal_ir
[31];
108 #include "exec/gen-icount.h"
110 void alpha_translate_init(void)
112 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
114 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
115 static const GlobalVar vars
[] = {
118 DEF_VAR(lock_st_addr
),
124 /* Use the symbolic register names that match the disassembler. */
125 static const char greg_names
[31][4] = {
126 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
127 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
128 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
129 "t10", "t11", "ra", "t12", "at", "gp", "sp"
131 static const char freg_names
[31][4] = {
132 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
133 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
134 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
135 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
137 #ifndef CONFIG_USER_ONLY
138 static const char shadow_names
[8][8] = {
139 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
140 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
144 static bool done_init
= 0;
152 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
154 for (i
= 0; i
< 31; i
++) {
155 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
156 offsetof(CPUAlphaState
, ir
[i
]),
160 for (i
= 0; i
< 31; i
++) {
161 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
162 offsetof(CPUAlphaState
, fir
[i
]),
166 #ifndef CONFIG_USER_ONLY
167 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
168 for (i
= 0; i
< 8; i
++) {
169 int r
= (i
== 7 ? 25 : i
+ 8);
170 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
171 offsetof(CPUAlphaState
,
177 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
178 const GlobalVar
*v
= &vars
[i
];
179 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
183 static TCGv
load_zero(DisasContext
*ctx
)
185 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
186 ctx
->zero
= tcg_const_i64(0);
191 static TCGv
dest_sink(DisasContext
*ctx
)
193 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
194 ctx
->sink
= tcg_temp_new();
199 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
201 if (likely(reg
< 31)) {
204 return load_zero(ctx
);
208 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
209 uint8_t lit
, bool islit
)
212 ctx
->lit
= tcg_const_i64(lit
);
214 } else if (likely(reg
< 31)) {
217 return load_zero(ctx
);
221 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
223 if (likely(reg
< 31)) {
226 return dest_sink(ctx
);
230 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
232 if (likely(reg
< 31)) {
235 return load_zero(ctx
);
239 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
241 if (likely(reg
< 31)) {
244 return dest_sink(ctx
);
248 static void gen_excp_1(int exception
, int error_code
)
252 tmp1
= tcg_const_i32(exception
);
253 tmp2
= tcg_const_i32(error_code
);
254 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
255 tcg_temp_free_i32(tmp2
);
256 tcg_temp_free_i32(tmp1
);
259 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
261 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
262 gen_excp_1(exception
, error_code
);
263 return EXIT_NORETURN
;
266 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
268 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
271 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
273 TCGv_i32 tmp32
= tcg_temp_new_i32();
274 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
275 gen_helper_memory_to_f(t0
, tmp32
);
276 tcg_temp_free_i32(tmp32
);
279 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
281 TCGv tmp
= tcg_temp_new();
282 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
283 gen_helper_memory_to_g(t0
, tmp
);
287 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
289 TCGv_i32 tmp32
= tcg_temp_new_i32();
290 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
291 gen_helper_memory_to_s(t0
, tmp32
);
292 tcg_temp_free_i32(tmp32
);
295 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
297 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
298 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
299 tcg_gen_mov_i64(cpu_lock_value
, t0
);
302 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
304 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
305 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
306 tcg_gen_mov_i64(cpu_lock_value
, t0
);
309 static inline void gen_load_mem(DisasContext
*ctx
,
310 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
312 int ra
, int rb
, int32_t disp16
, bool fp
,
317 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
318 prefetches, which we can treat as nops. No worries about
319 missed exceptions here. */
320 if (unlikely(ra
== 31)) {
324 tmp
= tcg_temp_new();
325 addr
= load_gpr(ctx
, rb
);
328 tcg_gen_addi_i64(tmp
, addr
, disp16
);
332 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
336 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
337 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
342 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
344 TCGv_i32 tmp32
= tcg_temp_new_i32();
345 gen_helper_f_to_memory(tmp32
, t0
);
346 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
347 tcg_temp_free_i32(tmp32
);
350 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
352 TCGv tmp
= tcg_temp_new();
353 gen_helper_g_to_memory(tmp
, t0
);
354 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
358 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
360 TCGv_i32 tmp32
= tcg_temp_new_i32();
361 gen_helper_s_to_memory(tmp32
, t0
);
362 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
363 tcg_temp_free_i32(tmp32
);
366 static inline void gen_store_mem(DisasContext
*ctx
,
367 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
369 int ra
, int rb
, int32_t disp16
, bool fp
,
374 tmp
= tcg_temp_new();
375 addr
= load_gpr(ctx
, rb
);
378 tcg_gen_addi_i64(tmp
, addr
, disp16
);
382 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
386 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
387 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
392 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
393 int32_t disp16
, int quad
)
398 /* ??? Don't bother storing anything. The user can't tell
399 the difference, since the zero register always reads zero. */
403 #if defined(CONFIG_USER_ONLY)
404 addr
= cpu_lock_st_addr
;
406 addr
= tcg_temp_local_new();
409 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
411 #if defined(CONFIG_USER_ONLY)
412 /* ??? This is handled via a complicated version of compare-and-swap
413 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
414 in TCG so that this isn't necessary. */
415 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
417 /* ??? In system mode we are never multi-threaded, so CAS can be
418 implemented via a non-atomic load-compare-store sequence. */
420 TCGLabel
*lab_fail
, *lab_done
;
423 lab_fail
= gen_new_label();
424 lab_done
= gen_new_label();
425 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
427 val
= tcg_temp_new();
428 tcg_gen_qemu_ld_i64(val
, addr
, ctx
->mem_idx
, quad
? MO_LEQ
: MO_LESL
);
429 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
431 tcg_gen_qemu_st_i64(ctx
->ir
[ra
], addr
, ctx
->mem_idx
,
432 quad
? MO_LEQ
: MO_LEUL
);
433 tcg_gen_movi_i64(ctx
->ir
[ra
], 1);
434 tcg_gen_br(lab_done
);
436 gen_set_label(lab_fail
);
437 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
439 gen_set_label(lab_done
);
440 tcg_gen_movi_i64(cpu_lock_addr
, -1);
448 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
450 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
452 && ((addr
>> 41) & 3) == 2
453 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
456 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
458 /* Suppress goto_tb in the case of single-steping and IO. */
459 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
460 || ctx
->singlestep_enabled
|| singlestep
) {
463 /* If the destination is in the superpage, the page perms can't change. */
464 if (in_superpage(ctx
, dest
)) {
467 /* Check for the dest on the same page as the start of the TB. */
468 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
471 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
473 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
476 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
479 /* Notice branch-to-next; used to initialize RA with the PC. */
482 } else if (use_goto_tb(ctx
, dest
)) {
484 tcg_gen_movi_i64(cpu_pc
, dest
);
485 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
488 tcg_gen_movi_i64(cpu_pc
, dest
);
489 return EXIT_PC_UPDATED
;
493 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
494 TCGv cmp
, int32_t disp
)
496 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
497 TCGLabel
*lab_true
= gen_new_label();
499 if (use_goto_tb(ctx
, dest
)) {
500 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
503 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
504 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
506 gen_set_label(lab_true
);
508 tcg_gen_movi_i64(cpu_pc
, dest
);
509 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
513 TCGv_i64 z
= tcg_const_i64(0);
514 TCGv_i64 d
= tcg_const_i64(dest
);
515 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
517 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
519 tcg_temp_free_i64(z
);
520 tcg_temp_free_i64(d
);
521 tcg_temp_free_i64(p
);
522 return EXIT_PC_UPDATED
;
526 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
527 int32_t disp
, int mask
)
532 cmp_tmp
= tcg_temp_new();
533 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
535 cmp_tmp
= load_gpr(ctx
, ra
);
538 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
541 /* Fold -0.0 for comparison with COND. */
543 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
545 uint64_t mzero
= 1ull << 63;
550 /* For <= or >, the -0.0 value directly compares the way we want. */
551 tcg_gen_mov_i64(dest
, src
);
556 /* For == or !=, we can simply mask off the sign bit and compare. */
557 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
562 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
563 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
564 tcg_gen_neg_i64(dest
, dest
);
565 tcg_gen_and_i64(dest
, dest
, src
);
573 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
576 TCGv cmp_tmp
= tcg_temp_new();
577 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
578 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
581 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
586 vb
= load_fpr(ctx
, rb
);
588 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
590 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
595 #define QUAL_RM_N 0x080 /* Round mode nearest even */
596 #define QUAL_RM_C 0x000 /* Round mode chopped */
597 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
598 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
599 #define QUAL_RM_MASK 0x0c0
601 #define QUAL_U 0x100 /* Underflow enable (fp output) */
602 #define QUAL_V 0x100 /* Overflow enable (int output) */
603 #define QUAL_S 0x400 /* Software completion enable */
604 #define QUAL_I 0x200 /* Inexact detection enable */
606 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
610 fn11
&= QUAL_RM_MASK
;
611 if (fn11
== ctx
->tb_rm
) {
616 tmp
= tcg_temp_new_i32();
619 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
622 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
625 tcg_gen_movi_i32(tmp
, float_round_down
);
628 tcg_gen_ld8u_i32(tmp
, cpu_env
,
629 offsetof(CPUAlphaState
, fpcr_dyn_round
));
633 #if defined(CONFIG_SOFTFLOAT_INLINE)
634 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
635 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
636 sets the one field. */
637 tcg_gen_st8_i32(tmp
, cpu_env
,
638 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
640 gen_helper_setroundmode(tmp
);
643 tcg_temp_free_i32(tmp
);
646 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
651 if (fn11
== ctx
->tb_ftz
) {
656 tmp
= tcg_temp_new_i32();
658 /* Underflow is enabled, use the FPCR setting. */
659 tcg_gen_ld8u_i32(tmp
, cpu_env
,
660 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
662 /* Underflow is disabled, force flush-to-zero. */
663 tcg_gen_movi_i32(tmp
, 1);
666 #if defined(CONFIG_SOFTFLOAT_INLINE)
667 tcg_gen_st8_i32(tmp
, cpu_env
,
668 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
670 gen_helper_setflushzero(tmp
);
673 tcg_temp_free_i32(tmp
);
676 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
680 if (unlikely(reg
== 31)) {
681 val
= load_zero(ctx
);
684 if ((fn11
& QUAL_S
) == 0) {
686 gen_helper_ieee_input_cmp(cpu_env
, val
);
688 gen_helper_ieee_input(cpu_env
, val
);
691 #ifndef CONFIG_USER_ONLY
692 /* In system mode, raise exceptions for denormals like real
693 hardware. In user mode, proceed as if the OS completion
694 handler is handling the denormal as per spec. */
695 gen_helper_ieee_input_s(cpu_env
, val
);
702 static void gen_fp_exc_raise(int rc
, int fn11
)
704 /* ??? We ought to be able to do something with imprecise exceptions.
705 E.g. notice we're still in the trap shadow of something within the
706 TB and do not generate the code to signal the exception; end the TB
707 when an exception is forced to arrive, either by consumption of a
708 register value or TRAPB or EXCB. */
712 if (!(fn11
& QUAL_U
)) {
713 /* Note that QUAL_U == QUAL_V, so ignore either. */
714 ignore
|= FPCR_UNF
| FPCR_IOV
;
716 if (!(fn11
& QUAL_I
)) {
719 ign
= tcg_const_i32(ignore
);
721 /* ??? Pass in the regno of the destination so that the helper can
722 set EXC_MASK, which contains a bitmask of destination registers
723 that have caused arithmetic traps. A simple userspace emulation
724 does not require this. We do need it for a guest kernel's entArith,
725 or if we were to do something clever with imprecise exceptions. */
726 reg
= tcg_const_i32(rc
+ 32);
728 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
730 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
733 tcg_temp_free_i32(reg
);
734 tcg_temp_free_i32(ign
);
737 static void gen_cvtlq(TCGv vc
, TCGv vb
)
739 TCGv tmp
= tcg_temp_new();
741 /* The arithmetic right shift here, plus the sign-extended mask below
742 yields a sign-extended result without an explicit ext32s_i64. */
743 tcg_gen_sari_i64(tmp
, vb
, 32);
744 tcg_gen_shri_i64(vc
, vb
, 29);
745 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
746 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
747 tcg_gen_or_i64(vc
, vc
, tmp
);
752 static void gen_ieee_arith2(DisasContext
*ctx
,
753 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
754 int rb
, int rc
, int fn11
)
758 gen_qual_roundmode(ctx
, fn11
);
759 gen_qual_flushzero(ctx
, fn11
);
761 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
762 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
764 gen_fp_exc_raise(rc
, fn11
);
767 #define IEEE_ARITH2(name) \
768 static inline void glue(gen_, name)(DisasContext *ctx, \
769 int rb, int rc, int fn11) \
771 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
778 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
782 /* No need to set flushzero, since we have an integer output. */
783 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
784 vc
= dest_fpr(ctx
, rc
);
786 /* Almost all integer conversions use cropped rounding;
787 special case that. */
788 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
789 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
791 gen_qual_roundmode(ctx
, fn11
);
792 gen_helper_cvttq(vc
, cpu_env
, vb
);
794 gen_fp_exc_raise(rc
, fn11
);
797 static void gen_ieee_intcvt(DisasContext
*ctx
,
798 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
799 int rb
, int rc
, int fn11
)
803 gen_qual_roundmode(ctx
, fn11
);
804 vb
= load_fpr(ctx
, rb
);
805 vc
= dest_fpr(ctx
, rc
);
807 /* The only exception that can be raised by integer conversion
808 is inexact. Thus we only need to worry about exceptions when
809 inexact handling is requested. */
811 helper(vc
, cpu_env
, vb
);
812 gen_fp_exc_raise(rc
, fn11
);
814 helper(vc
, cpu_env
, vb
);
818 #define IEEE_INTCVT(name) \
819 static inline void glue(gen_, name)(DisasContext *ctx, \
820 int rb, int rc, int fn11) \
822 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
827 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
829 TCGv vmask
= tcg_const_i64(mask
);
830 TCGv tmp
= tcg_temp_new_i64();
833 tcg_gen_andc_i64(tmp
, vmask
, va
);
835 tcg_gen_and_i64(tmp
, va
, vmask
);
838 tcg_gen_andc_i64(vc
, vb
, vmask
);
839 tcg_gen_or_i64(vc
, vc
, tmp
);
841 tcg_temp_free(vmask
);
845 static void gen_ieee_arith3(DisasContext
*ctx
,
846 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
847 int ra
, int rb
, int rc
, int fn11
)
851 gen_qual_roundmode(ctx
, fn11
);
852 gen_qual_flushzero(ctx
, fn11
);
854 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
855 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
856 vc
= dest_fpr(ctx
, rc
);
857 helper(vc
, cpu_env
, va
, vb
);
859 gen_fp_exc_raise(rc
, fn11
);
862 #define IEEE_ARITH3(name) \
863 static inline void glue(gen_, name)(DisasContext *ctx, \
864 int ra, int rb, int rc, int fn11) \
866 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
877 static void gen_ieee_compare(DisasContext
*ctx
,
878 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
879 int ra
, int rb
, int rc
, int fn11
)
883 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
884 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
885 vc
= dest_fpr(ctx
, rc
);
886 helper(vc
, cpu_env
, va
, vb
);
888 gen_fp_exc_raise(rc
, fn11
);
891 #define IEEE_CMP3(name) \
892 static inline void glue(gen_, name)(DisasContext *ctx, \
893 int ra, int rb, int rc, int fn11) \
895 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
902 static inline uint64_t zapnot_mask(uint8_t lit
)
907 for (i
= 0; i
< 8; ++i
) {
908 if ((lit
>> i
) & 1) {
909 mask
|= 0xffull
<< (i
* 8);
915 /* Implement zapnot with an immediate operand, which expands to some
916 form of immediate AND. This is a basic building block in the
917 definition of many of the other byte manipulation instructions. */
918 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
922 tcg_gen_movi_i64(dest
, 0);
925 tcg_gen_ext8u_i64(dest
, src
);
928 tcg_gen_ext16u_i64(dest
, src
);
931 tcg_gen_ext32u_i64(dest
, src
);
934 tcg_gen_mov_i64(dest
, src
);
937 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
942 /* EXTWH, EXTLH, EXTQH */
943 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
944 uint8_t lit
, uint8_t byte_mask
)
947 tcg_gen_shli_i64(vc
, va
, (64 - lit
* 8) & 0x3f);
949 TCGv tmp
= tcg_temp_new();
950 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
951 tcg_gen_neg_i64(tmp
, tmp
);
952 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
953 tcg_gen_shl_i64(vc
, va
, tmp
);
956 gen_zapnoti(vc
, vc
, byte_mask
);
959 /* EXTBL, EXTWL, EXTLL, EXTQL */
960 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
961 uint8_t lit
, uint8_t byte_mask
)
964 tcg_gen_shri_i64(vc
, va
, (lit
& 7) * 8);
966 TCGv tmp
= tcg_temp_new();
967 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
968 tcg_gen_shli_i64(tmp
, tmp
, 3);
969 tcg_gen_shr_i64(vc
, va
, tmp
);
972 gen_zapnoti(vc
, vc
, byte_mask
);
975 /* INSWH, INSLH, INSQH */
976 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
977 uint8_t lit
, uint8_t byte_mask
)
979 TCGv tmp
= tcg_temp_new();
981 /* The instruction description has us left-shift the byte mask and extract
982 bits <15:8> and apply that zap at the end. This is equivalent to simply
983 performing the zap first and shifting afterward. */
984 gen_zapnoti(tmp
, va
, byte_mask
);
988 if (unlikely(lit
== 0)) {
989 tcg_gen_movi_i64(vc
, 0);
991 tcg_gen_shri_i64(vc
, tmp
, 64 - lit
* 8);
994 TCGv shift
= tcg_temp_new();
996 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
997 portably by splitting the shift into two parts: shift_count-1 and 1.
998 Arrange for the -1 by using ones-complement instead of
999 twos-complement in the negation: ~(B * 8) & 63. */
1001 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1002 tcg_gen_not_i64(shift
, shift
);
1003 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1005 tcg_gen_shr_i64(vc
, tmp
, shift
);
1006 tcg_gen_shri_i64(vc
, vc
, 1);
1007 tcg_temp_free(shift
);
1012 /* INSBL, INSWL, INSLL, INSQL */
1013 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1014 uint8_t lit
, uint8_t byte_mask
)
1016 TCGv tmp
= tcg_temp_new();
1018 /* The instruction description has us left-shift the byte mask
1019 the same number of byte slots as the data and apply the zap
1020 at the end. This is equivalent to simply performing the zap
1021 first and shifting afterward. */
1022 gen_zapnoti(tmp
, va
, byte_mask
);
1025 tcg_gen_shli_i64(vc
, tmp
, (lit
& 7) * 8);
1027 TCGv shift
= tcg_temp_new();
1028 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1029 tcg_gen_shli_i64(shift
, shift
, 3);
1030 tcg_gen_shl_i64(vc
, tmp
, shift
);
1031 tcg_temp_free(shift
);
1036 /* MSKWH, MSKLH, MSKQH */
1037 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1038 uint8_t lit
, uint8_t byte_mask
)
1041 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1043 TCGv shift
= tcg_temp_new();
1044 TCGv mask
= tcg_temp_new();
1046 /* The instruction description is as above, where the byte_mask
1047 is shifted left, and then we extract bits <15:8>. This can be
1048 emulated with a right-shift on the expanded byte mask. This
1049 requires extra care because for an input <2:0> == 0 we need a
1050 shift of 64 bits in order to generate a zero. This is done by
1051 splitting the shift into two parts, the variable shift - 1
1052 followed by a constant 1 shift. The code we expand below is
1053 equivalent to ~(B * 8) & 63. */
1055 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1056 tcg_gen_not_i64(shift
, shift
);
1057 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1058 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1059 tcg_gen_shr_i64(mask
, mask
, shift
);
1060 tcg_gen_shri_i64(mask
, mask
, 1);
1062 tcg_gen_andc_i64(vc
, va
, mask
);
1064 tcg_temp_free(mask
);
1065 tcg_temp_free(shift
);
1069 /* MSKBL, MSKWL, MSKLL, MSKQL */
1070 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1071 uint8_t lit
, uint8_t byte_mask
)
1074 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1076 TCGv shift
= tcg_temp_new();
1077 TCGv mask
= tcg_temp_new();
1079 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1080 tcg_gen_shli_i64(shift
, shift
, 3);
1081 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1082 tcg_gen_shl_i64(mask
, mask
, shift
);
1084 tcg_gen_andc_i64(vc
, va
, mask
);
1086 tcg_temp_free(mask
);
1087 tcg_temp_free(shift
);
1091 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1096 tcg_gen_ld8u_i64(ctx
->ir
[ra
], cpu_env
,
1097 offsetof(CPUAlphaState
, intr_flag
));
1100 tmp
= tcg_const_i32(set
);
1101 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1102 tcg_temp_free_i32(tmp
);
1105 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1107 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1108 to internal cpu registers. */
1110 /* Unprivileged PAL call */
1111 if (palcode
>= 0x80 && palcode
< 0xC0) {
1115 /* No-op inside QEMU. */
1119 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1120 offsetof(CPUAlphaState
, unique
));
1124 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1125 offsetof(CPUAlphaState
, unique
));
1134 #ifndef CONFIG_USER_ONLY
1135 /* Privileged PAL code */
1136 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1140 /* No-op inside QEMU. */
1144 /* No-op inside QEMU. */
1148 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1149 offsetof(CPUAlphaState
, vptptr
));
1153 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1154 offsetof(CPUAlphaState
, sysval
));
1158 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1159 offsetof(CPUAlphaState
, sysval
));
1166 /* Note that we already know we're in kernel mode, so we know
1167 that PS only contains the 3 IPL bits. */
1168 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1169 offsetof(CPUAlphaState
, ps
));
1171 /* But make sure and store only the 3 IPL bits from the user. */
1172 tmp
= tcg_temp_new();
1173 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1174 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1181 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1182 offsetof(CPUAlphaState
, ps
));
1186 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1187 offsetof(CPUAlphaState
, usp
));
1191 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1192 offsetof(CPUAlphaState
, usp
));
1196 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1197 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1207 return gen_invalid(ctx
);
1210 #ifdef CONFIG_USER_ONLY
1211 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1214 TCGv tmp
= tcg_temp_new();
1215 uint64_t exc_addr
= ctx
->pc
;
1216 uint64_t entry
= ctx
->palbr
;
1218 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
1221 tcg_gen_movi_i64(tmp
, 1);
1222 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
1225 tcg_gen_movi_i64(tmp
, exc_addr
);
1226 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1229 entry
+= (palcode
& 0x80
1230 ? 0x2000 + (palcode
- 0x80) * 64
1231 : 0x1000 + palcode
* 64);
1233 /* Since the destination is running in PALmode, we don't really
1234 need the page permissions check. We'll see the existence of
1235 the page when we create the TB, and we'll flush all TBs if
1236 we change the PAL base register. */
1237 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1239 tcg_gen_movi_i64(cpu_pc
, entry
);
1240 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1241 return EXIT_GOTO_TB
;
1243 tcg_gen_movi_i64(cpu_pc
, entry
);
1244 return EXIT_PC_UPDATED
;
1250 #ifndef CONFIG_USER_ONLY
1252 #define PR_BYTE 0x100000
1253 #define PR_LONG 0x200000
1255 static int cpu_pr_data(int pr
)
1258 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1259 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1260 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1261 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1262 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1263 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1264 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1265 case 7: return offsetof(CPUAlphaState
, palbr
);
1266 case 8: return offsetof(CPUAlphaState
, ptbr
);
1267 case 9: return offsetof(CPUAlphaState
, vptptr
);
1268 case 10: return offsetof(CPUAlphaState
, unique
);
1269 case 11: return offsetof(CPUAlphaState
, sysval
);
1270 case 12: return offsetof(CPUAlphaState
, usp
);
1273 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1276 return offsetof(CPUAlphaState
, alarm_expire
);
1281 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1283 void (*helper
)(TCGv
);
1288 /* Accessing the "non-shadow" general registers. */
1289 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1290 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1293 case 250: /* WALLTIME */
1294 helper
= gen_helper_get_walltime
;
1296 case 249: /* VMTIME */
1297 helper
= gen_helper_get_vmtime
;
1303 return EXIT_PC_STALE
;
1310 /* The basic registers are data only, and unknown registers
1311 are read-zero, write-ignore. */
1312 data
= cpu_pr_data(regno
);
1314 tcg_gen_movi_i64(va
, 0);
1315 } else if (data
& PR_BYTE
) {
1316 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1317 } else if (data
& PR_LONG
) {
1318 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1320 tcg_gen_ld_i64(va
, cpu_env
, data
);
1328 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1336 gen_helper_tbia(cpu_env
);
1341 gen_helper_tbis(cpu_env
, vb
);
1346 tmp
= tcg_const_i64(1);
1347 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1348 offsetof(CPUState
, halted
));
1349 return gen_excp(ctx
, EXCP_HLT
, 0);
1353 gen_helper_halt(vb
);
1354 return EXIT_PC_STALE
;
1358 gen_helper_set_alarm(cpu_env
, vb
);
1363 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1364 /* Changing the PAL base register implies un-chaining all of the TBs
1365 that ended with a CALL_PAL. Since the base register usually only
1366 changes during boot, flushing everything works well. */
1367 gen_helper_tb_flush(cpu_env
);
1368 return EXIT_PC_STALE
;
1371 /* Accessing the "non-shadow" general registers. */
1372 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1373 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1377 /* The basic registers are data only, and unknown registers
1378 are read-zero, write-ignore. */
1379 data
= cpu_pr_data(regno
);
1381 if (data
& PR_BYTE
) {
1382 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1383 } else if (data
& PR_LONG
) {
1384 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1386 tcg_gen_st_i64(vb
, cpu_env
, data
);
1394 #endif /* !USER_ONLY*/
1396 #define REQUIRE_NO_LIT \
1403 #define REQUIRE_TB_FLAG(FLAG) \
1405 if ((ctx->tb->flags & (FLAG)) == 0) { \
1410 #define REQUIRE_REG_31(WHICH) \
1412 if (WHICH != 31) { \
1417 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1419 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1421 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1422 bool islit
, real_islit
;
1423 TCGv va
, vb
, vc
, tmp
, tmp2
;
1427 /* Decode all instruction fields */
1428 opc
= extract32(insn
, 26, 6);
1429 ra
= extract32(insn
, 21, 5);
1430 rb
= extract32(insn
, 16, 5);
1431 rc
= extract32(insn
, 0, 5);
1432 real_islit
= islit
= extract32(insn
, 12, 1);
1433 lit
= extract32(insn
, 13, 8);
1435 disp21
= sextract32(insn
, 0, 21);
1436 disp16
= sextract32(insn
, 0, 16);
1437 disp12
= sextract32(insn
, 0, 12);
1439 fn11
= extract32(insn
, 5, 11);
1440 fpfn
= extract32(insn
, 5, 6);
1441 fn7
= extract32(insn
, 5, 7);
1443 if (rb
== 31 && !islit
) {
1452 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1478 disp16
= (uint32_t)disp16
<< 16;
1482 va
= dest_gpr(ctx
, ra
);
1483 /* It's worth special-casing immediate loads. */
1485 tcg_gen_movi_i64(va
, disp16
);
1487 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1493 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1494 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1498 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1502 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1503 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1507 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1508 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1512 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1513 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1517 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1521 vc
= dest_gpr(ctx
, rc
);
1522 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1526 /* Special case ADDL as SEXTL. */
1527 tcg_gen_ext32s_i64(vc
, vb
);
1531 /* Special case SUBQ as NEGQ. */
1532 tcg_gen_neg_i64(vc
, vb
);
1537 va
= load_gpr(ctx
, ra
);
1541 tcg_gen_add_i64(vc
, va
, vb
);
1542 tcg_gen_ext32s_i64(vc
, vc
);
1546 tmp
= tcg_temp_new();
1547 tcg_gen_shli_i64(tmp
, va
, 2);
1548 tcg_gen_add_i64(tmp
, tmp
, vb
);
1549 tcg_gen_ext32s_i64(vc
, tmp
);
1554 tcg_gen_sub_i64(vc
, va
, vb
);
1555 tcg_gen_ext32s_i64(vc
, vc
);
1559 tmp
= tcg_temp_new();
1560 tcg_gen_shli_i64(tmp
, va
, 2);
1561 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1562 tcg_gen_ext32s_i64(vc
, tmp
);
1568 /* Special case 0 >= X as X == 0. */
1569 gen_helper_cmpbe0(vc
, vb
);
1571 gen_helper_cmpbge(vc
, va
, vb
);
1576 tmp
= tcg_temp_new();
1577 tcg_gen_shli_i64(tmp
, va
, 3);
1578 tcg_gen_add_i64(tmp
, tmp
, vb
);
1579 tcg_gen_ext32s_i64(vc
, tmp
);
1584 tmp
= tcg_temp_new();
1585 tcg_gen_shli_i64(tmp
, va
, 3);
1586 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1587 tcg_gen_ext32s_i64(vc
, tmp
);
1592 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1596 tcg_gen_add_i64(vc
, va
, vb
);
1600 tmp
= tcg_temp_new();
1601 tcg_gen_shli_i64(tmp
, va
, 2);
1602 tcg_gen_add_i64(vc
, tmp
, vb
);
1607 tcg_gen_sub_i64(vc
, va
, vb
);
1611 tmp
= tcg_temp_new();
1612 tcg_gen_shli_i64(tmp
, va
, 2);
1613 tcg_gen_sub_i64(vc
, tmp
, vb
);
1618 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1622 tmp
= tcg_temp_new();
1623 tcg_gen_shli_i64(tmp
, va
, 3);
1624 tcg_gen_add_i64(vc
, tmp
, vb
);
1629 tmp
= tcg_temp_new();
1630 tcg_gen_shli_i64(tmp
, va
, 3);
1631 tcg_gen_sub_i64(vc
, tmp
, vb
);
1636 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1640 tmp
= tcg_temp_new();
1641 tcg_gen_ext32s_i64(tmp
, va
);
1642 tcg_gen_ext32s_i64(vc
, vb
);
1643 tcg_gen_add_i64(tmp
, tmp
, vc
);
1644 tcg_gen_ext32s_i64(vc
, tmp
);
1645 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1650 tmp
= tcg_temp_new();
1651 tcg_gen_ext32s_i64(tmp
, va
);
1652 tcg_gen_ext32s_i64(vc
, vb
);
1653 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1654 tcg_gen_ext32s_i64(vc
, tmp
);
1655 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1660 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1664 tmp
= tcg_temp_new();
1665 tmp2
= tcg_temp_new();
1666 tcg_gen_eqv_i64(tmp
, va
, vb
);
1667 tcg_gen_mov_i64(tmp2
, va
);
1668 tcg_gen_add_i64(vc
, va
, vb
);
1669 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1670 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1671 tcg_gen_shri_i64(tmp
, tmp
, 63);
1672 tcg_gen_movi_i64(tmp2
, 0);
1673 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1675 tcg_temp_free(tmp2
);
1679 tmp
= tcg_temp_new();
1680 tmp2
= tcg_temp_new();
1681 tcg_gen_xor_i64(tmp
, va
, vb
);
1682 tcg_gen_mov_i64(tmp2
, va
);
1683 tcg_gen_sub_i64(vc
, va
, vb
);
1684 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1685 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1686 tcg_gen_shri_i64(tmp
, tmp
, 63);
1687 tcg_gen_movi_i64(tmp2
, 0);
1688 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1690 tcg_temp_free(tmp2
);
1694 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1704 /* Special case BIS as NOP. */
1708 /* Special case BIS as MOV. */
1709 vc
= dest_gpr(ctx
, rc
);
1711 tcg_gen_movi_i64(vc
, lit
);
1713 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1719 vc
= dest_gpr(ctx
, rc
);
1720 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1722 if (fn7
== 0x28 && ra
== 31) {
1723 /* Special case ORNOT as NOT. */
1724 tcg_gen_not_i64(vc
, vb
);
1728 va
= load_gpr(ctx
, ra
);
1732 tcg_gen_and_i64(vc
, va
, vb
);
1736 tcg_gen_andc_i64(vc
, va
, vb
);
1740 tmp
= tcg_temp_new();
1741 tcg_gen_andi_i64(tmp
, va
, 1);
1742 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1743 vb
, load_gpr(ctx
, rc
));
1748 tmp
= tcg_temp_new();
1749 tcg_gen_andi_i64(tmp
, va
, 1);
1750 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1751 vb
, load_gpr(ctx
, rc
));
1756 tcg_gen_or_i64(vc
, va
, vb
);
1760 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1761 vb
, load_gpr(ctx
, rc
));
1765 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1766 vb
, load_gpr(ctx
, rc
));
1770 tcg_gen_orc_i64(vc
, va
, vb
);
1774 tcg_gen_xor_i64(vc
, va
, vb
);
1778 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1779 vb
, load_gpr(ctx
, rc
));
1783 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1784 vb
, load_gpr(ctx
, rc
));
1788 tcg_gen_eqv_i64(vc
, va
, vb
);
1794 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1795 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1800 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1801 vb
, load_gpr(ctx
, rc
));
1805 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1806 vb
, load_gpr(ctx
, rc
));
1811 tcg_gen_movi_i64(vc
, ctx
->implver
);
1819 vc
= dest_gpr(ctx
, rc
);
1820 va
= load_gpr(ctx
, ra
);
1824 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1828 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1832 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1836 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1840 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1844 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1848 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1852 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1856 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1861 gen_zapnoti(vc
, va
, ~lit
);
1863 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1869 gen_zapnoti(vc
, va
, lit
);
1871 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1876 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1881 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1883 tmp
= tcg_temp_new();
1884 vb
= load_gpr(ctx
, rb
);
1885 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1886 tcg_gen_shr_i64(vc
, va
, tmp
);
1892 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1897 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1899 tmp
= tcg_temp_new();
1900 vb
= load_gpr(ctx
, rb
);
1901 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1902 tcg_gen_shl_i64(vc
, va
, tmp
);
1908 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1913 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1915 tmp
= tcg_temp_new();
1916 vb
= load_gpr(ctx
, rb
);
1917 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1918 tcg_gen_sar_i64(vc
, va
, tmp
);
1924 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1928 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1932 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1936 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1940 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1944 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1948 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1952 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1956 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1964 vc
= dest_gpr(ctx
, rc
);
1965 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1966 va
= load_gpr(ctx
, ra
);
1970 tcg_gen_mul_i64(vc
, va
, vb
);
1971 tcg_gen_ext32s_i64(vc
, vc
);
1975 tcg_gen_mul_i64(vc
, va
, vb
);
1979 tmp
= tcg_temp_new();
1980 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1985 tmp
= tcg_temp_new();
1986 tcg_gen_ext32s_i64(tmp
, va
);
1987 tcg_gen_ext32s_i64(vc
, vb
);
1988 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1989 tcg_gen_ext32s_i64(vc
, tmp
);
1990 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1995 tmp
= tcg_temp_new();
1996 tmp2
= tcg_temp_new();
1997 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1998 tcg_gen_sari_i64(tmp2
, vc
, 63);
1999 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2001 tcg_temp_free(tmp2
);
2009 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2010 vc
= dest_fpr(ctx
, rc
);
2011 switch (fpfn
) { /* fn11 & 0x3F */
2015 t32
= tcg_temp_new_i32();
2016 va
= load_gpr(ctx
, ra
);
2017 tcg_gen_extrl_i64_i32(t32
, va
);
2018 gen_helper_memory_to_s(vc
, t32
);
2019 tcg_temp_free_i32(t32
);
2024 vb
= load_fpr(ctx
, rb
);
2025 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2030 gen_sqrts(ctx
, rb
, rc
, fn11
);
2035 t32
= tcg_temp_new_i32();
2036 va
= load_gpr(ctx
, ra
);
2037 tcg_gen_extrl_i64_i32(t32
, va
);
2038 gen_helper_memory_to_f(vc
, t32
);
2039 tcg_temp_free_i32(t32
);
2044 va
= load_gpr(ctx
, ra
);
2045 tcg_gen_mov_i64(vc
, va
);
2050 vb
= load_fpr(ctx
, rb
);
2051 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2056 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2064 /* VAX floating point */
2065 /* XXX: rounding mode and trap are ignored (!) */
2066 vc
= dest_fpr(ctx
, rc
);
2067 vb
= load_fpr(ctx
, rb
);
2068 va
= load_fpr(ctx
, ra
);
2069 switch (fpfn
) { /* fn11 & 0x3F */
2072 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2076 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2080 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2084 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2092 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2096 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2100 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2104 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2108 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2112 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2116 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2121 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2130 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2135 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2140 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2148 /* IEEE floating-point */
2149 switch (fpfn
) { /* fn11 & 0x3F */
2152 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2156 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2160 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2164 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2168 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2172 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2176 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2180 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2184 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2188 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2192 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2196 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2200 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2202 gen_cvtst(ctx
, rb
, rc
, fn11
);
2205 gen_cvtts(ctx
, rb
, rc
, fn11
);
2211 gen_cvttq(ctx
, rb
, rc
, fn11
);
2216 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2221 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2233 vc
= dest_fpr(ctx
, rc
);
2234 vb
= load_fpr(ctx
, rb
);
2240 /* Special case CPYS as FNOP. */
2242 vc
= dest_fpr(ctx
, rc
);
2243 va
= load_fpr(ctx
, ra
);
2245 /* Special case CPYS as FMOV. */
2246 tcg_gen_mov_i64(vc
, va
);
2248 vb
= load_fpr(ctx
, rb
);
2249 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2255 vc
= dest_fpr(ctx
, rc
);
2256 vb
= load_fpr(ctx
, rb
);
2257 va
= load_fpr(ctx
, ra
);
2258 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2262 vc
= dest_fpr(ctx
, rc
);
2263 vb
= load_fpr(ctx
, rb
);
2264 va
= load_fpr(ctx
, ra
);
2265 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2269 va
= load_fpr(ctx
, ra
);
2270 gen_helper_store_fpcr(cpu_env
, va
);
2271 if (ctx
->tb_rm
== QUAL_RM_D
) {
2272 /* Re-do the copy of the rounding mode to fp_status
2273 the next time we use dynamic rounding. */
2279 va
= dest_fpr(ctx
, ra
);
2280 gen_helper_load_fpcr(va
, cpu_env
);
2284 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2288 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2292 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2296 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2300 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2304 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2306 case 0x030: /* CVTQL */
2307 case 0x130: /* CVTQL/V */
2308 case 0x530: /* CVTQL/SV */
2310 vc
= dest_fpr(ctx
, rc
);
2311 vb
= load_fpr(ctx
, rb
);
2312 gen_helper_cvtql(vc
, cpu_env
, vb
);
2313 gen_fp_exc_raise(rc
, fn11
);
2321 switch ((uint16_t)disp16
) {
2348 va
= dest_gpr(ctx
, ra
);
2349 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2351 gen_helper_load_pcc(va
, cpu_env
);
2353 ret
= EXIT_PC_STALE
;
2355 gen_helper_load_pcc(va
, cpu_env
);
2383 /* HW_MFPR (PALcode) */
2384 #ifndef CONFIG_USER_ONLY
2385 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2386 va
= dest_gpr(ctx
, ra
);
2387 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2394 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2395 prediction stack action, which of course we don't implement. */
2396 vb
= load_gpr(ctx
, rb
);
2397 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2399 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2401 ret
= EXIT_PC_UPDATED
;
2405 /* HW_LD (PALcode) */
2406 #ifndef CONFIG_USER_ONLY
2407 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2409 TCGv addr
= tcg_temp_new();
2410 vb
= load_gpr(ctx
, rb
);
2411 va
= dest_gpr(ctx
, ra
);
2413 tcg_gen_addi_i64(addr
, vb
, disp12
);
2414 switch ((insn
>> 12) & 0xF) {
2416 /* Longword physical access (hw_ldl/p) */
2417 gen_helper_ldl_phys(va
, cpu_env
, addr
);
2420 /* Quadword physical access (hw_ldq/p) */
2421 gen_helper_ldq_phys(va
, cpu_env
, addr
);
2424 /* Longword physical access with lock (hw_ldl_l/p) */
2425 gen_helper_ldl_l_phys(va
, cpu_env
, addr
);
2428 /* Quadword physical access with lock (hw_ldq_l/p) */
2429 gen_helper_ldq_l_phys(va
, cpu_env
, addr
);
2432 /* Longword virtual PTE fetch (hw_ldl/v) */
2435 /* Quadword virtual PTE fetch (hw_ldq/v) */
2445 /* Longword virtual access (hw_ldl) */
2448 /* Quadword virtual access (hw_ldq) */
2451 /* Longword virtual access with protection check (hw_ldl/w) */
2452 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2455 /* Quadword virtual access with protection check (hw_ldq/w) */
2456 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2459 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2462 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2465 /* Longword virtual access with alternate access mode and
2466 protection checks (hw_ldl/wa) */
2467 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2470 /* Quadword virtual access with alternate access mode and
2471 protection checks (hw_ldq/wa) */
2472 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2475 tcg_temp_free(addr
);
2483 vc
= dest_gpr(ctx
, rc
);
2486 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2488 va
= load_fpr(ctx
, ra
);
2489 tcg_gen_mov_i64(vc
, va
);
2491 } else if (fn7
== 0x78) {
2493 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2495 t32
= tcg_temp_new_i32();
2496 va
= load_fpr(ctx
, ra
);
2497 gen_helper_s_to_memory(t32
, va
);
2498 tcg_gen_ext_i32_i64(vc
, t32
);
2499 tcg_temp_free_i32(t32
);
2503 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2507 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2509 tcg_gen_ext8s_i64(vc
, vb
);
2513 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2515 tcg_gen_ext16s_i64(vc
, vb
);
2519 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2522 gen_helper_ctpop(vc
, vb
);
2526 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2528 va
= load_gpr(ctx
, ra
);
2529 gen_helper_perr(vc
, va
, vb
);
2533 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2536 gen_helper_ctlz(vc
, vb
);
2540 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2543 gen_helper_cttz(vc
, vb
);
2547 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2550 gen_helper_unpkbw(vc
, vb
);
2554 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2557 gen_helper_unpkbl(vc
, vb
);
2561 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2564 gen_helper_pkwb(vc
, vb
);
2568 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2571 gen_helper_pklb(vc
, vb
);
2575 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2576 va
= load_gpr(ctx
, ra
);
2577 gen_helper_minsb8(vc
, va
, vb
);
2581 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2582 va
= load_gpr(ctx
, ra
);
2583 gen_helper_minsw4(vc
, va
, vb
);
2587 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2588 va
= load_gpr(ctx
, ra
);
2589 gen_helper_minub8(vc
, va
, vb
);
2593 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2594 va
= load_gpr(ctx
, ra
);
2595 gen_helper_minuw4(vc
, va
, vb
);
2599 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2600 va
= load_gpr(ctx
, ra
);
2601 gen_helper_maxub8(vc
, va
, vb
);
2605 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2606 va
= load_gpr(ctx
, ra
);
2607 gen_helper_maxuw4(vc
, va
, vb
);
2611 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2612 va
= load_gpr(ctx
, ra
);
2613 gen_helper_maxsb8(vc
, va
, vb
);
2617 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2618 va
= load_gpr(ctx
, ra
);
2619 gen_helper_maxsw4(vc
, va
, vb
);
2627 /* HW_MTPR (PALcode) */
2628 #ifndef CONFIG_USER_ONLY
2629 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2630 vb
= load_gpr(ctx
, rb
);
2631 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2638 /* HW_RET (PALcode) */
2639 #ifndef CONFIG_USER_ONLY
2640 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2642 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2643 address from EXC_ADDR. This turns out to be useful for our
2644 emulation PALcode, so continue to accept it. */
2645 ctx
->lit
= vb
= tcg_temp_new();
2646 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2648 vb
= load_gpr(ctx
, rb
);
2650 tmp
= tcg_temp_new();
2651 tcg_gen_movi_i64(tmp
, 0);
2652 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
2653 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2654 tcg_gen_andi_i64(tmp
, vb
, 1);
2655 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
2656 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2657 ret
= EXIT_PC_UPDATED
;
2664 /* HW_ST (PALcode) */
2665 #ifndef CONFIG_USER_ONLY
2666 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2668 TCGv addr
= tcg_temp_new();
2669 va
= load_gpr(ctx
, ra
);
2670 vb
= load_gpr(ctx
, rb
);
2672 tcg_gen_addi_i64(addr
, vb
, disp12
);
2673 switch ((insn
>> 12) & 0xF) {
2675 /* Longword physical access */
2676 gen_helper_stl_phys(cpu_env
, addr
, va
);
2679 /* Quadword physical access */
2680 gen_helper_stq_phys(cpu_env
, addr
, va
);
2683 /* Longword physical access with lock */
2684 gen_helper_stl_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2687 /* Quadword physical access with lock */
2688 gen_helper_stq_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2691 /* Longword virtual access */
2694 /* Quadword virtual access */
2715 /* Longword virtual access with alternate access mode */
2718 /* Quadword virtual access with alternate access mode */
2727 tcg_temp_free(addr
);
2735 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2739 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2743 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2747 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2751 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2755 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2759 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2763 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2767 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2771 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2775 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2779 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2783 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2787 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2791 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
2795 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
2799 ret
= gen_bdirect(ctx
, ra
, disp21
);
2801 case 0x31: /* FBEQ */
2802 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2804 case 0x32: /* FBLT */
2805 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2807 case 0x33: /* FBLE */
2808 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2812 ret
= gen_bdirect(ctx
, ra
, disp21
);
2814 case 0x35: /* FBNE */
2815 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2817 case 0x36: /* FBGE */
2818 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2820 case 0x37: /* FBGT */
2821 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2825 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2829 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2833 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2837 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2841 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2845 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2849 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2853 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2856 ret
= gen_invalid(ctx
);
2863 void gen_intermediate_code(CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2865 AlphaCPU
*cpu
= alpha_env_get_cpu(env
);
2866 CPUState
*cs
= CPU(cpu
);
2867 DisasContext ctx
, *ctxp
= &ctx
;
2868 target_ulong pc_start
;
2869 target_ulong pc_mask
;
2879 ctx
.mem_idx
= cpu_mmu_index(env
, false);
2880 ctx
.implver
= env
->implver
;
2881 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2883 #ifdef CONFIG_USER_ONLY
2884 ctx
.ir
= cpu_std_ir
;
2886 ctx
.palbr
= env
->palbr
;
2887 ctx
.ir
= (tb
->flags
& TB_FLAGS_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2890 /* ??? Every TB begins with unset rounding mode, to be initialized on
2891 the first fp insn of the TB. Alternately we could define a proper
2892 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2893 to reset the FP_STATUS to that default at the end of any TB that
2894 changes the default. We could even (gasp) dynamiclly figure out
2895 what default would be most efficient given the running program. */
2897 /* Similarly for flush-to-zero. */
2901 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2902 if (max_insns
== 0) {
2903 max_insns
= CF_COUNT_MASK
;
2905 if (max_insns
> TCG_MAX_INSNS
) {
2906 max_insns
= TCG_MAX_INSNS
;
2909 if (in_superpage(&ctx
, pc_start
)) {
2910 pc_mask
= (1ULL << 41) - 1;
2912 pc_mask
= ~TARGET_PAGE_MASK
;
2917 tcg_gen_insn_start(ctx
.pc
);
2920 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
2921 ret
= gen_excp(&ctx
, EXCP_DEBUG
, 0);
2922 /* The address covered by the breakpoint must be included in
2923 [tb->pc, tb->pc + tb->size) in order to for it to be
2924 properly cleared -- thus we increment the PC here so that
2925 the logic setting tb->size below does the right thing. */
2929 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2932 insn
= cpu_ldl_code(env
, ctx
.pc
);
2934 TCGV_UNUSED_I64(ctx
.zero
);
2935 TCGV_UNUSED_I64(ctx
.sink
);
2936 TCGV_UNUSED_I64(ctx
.lit
);
2939 ret
= translate_one(ctxp
, insn
);
2941 if (!TCGV_IS_UNUSED_I64(ctx
.sink
)) {
2942 tcg_gen_discard_i64(ctx
.sink
);
2943 tcg_temp_free(ctx
.sink
);
2945 if (!TCGV_IS_UNUSED_I64(ctx
.zero
)) {
2946 tcg_temp_free(ctx
.zero
);
2948 if (!TCGV_IS_UNUSED_I64(ctx
.lit
)) {
2949 tcg_temp_free(ctx
.lit
);
2952 /* If we reach a page boundary, are single stepping,
2953 or exhaust instruction count, stop generation. */
2955 && ((ctx
.pc
& pc_mask
) == 0
2956 || tcg_op_buf_full()
2957 || num_insns
>= max_insns
2959 || ctx
.singlestep_enabled
)) {
2960 ret
= EXIT_PC_STALE
;
2962 } while (ret
== NO_EXIT
);
2964 if (tb
->cflags
& CF_LAST_IO
) {
2973 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2975 case EXIT_PC_UPDATED
:
2976 if (ctx
.singlestep_enabled
) {
2977 gen_excp_1(EXCP_DEBUG
, 0);
2986 gen_tb_end(tb
, num_insns
);
2988 tb
->size
= ctx
.pc
- pc_start
;
2989 tb
->icount
= num_insns
;
2992 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2993 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2994 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
3000 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,