2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 struct TranslationBlock
*tb
;
48 #ifndef CONFIG_USER_ONLY
53 /* Current rounding mode for this TB. */
55 /* Current flush-to-zero setting for this TB. */
58 /* implver value for this CPU. */
61 /* The set of registers active in the current context. */
64 /* Temporaries for $31 and $f31 as source and destination. */
67 /* Temporary for immediate constants. */
70 bool singlestep_enabled
;
73 /* Return values from translate_one, indicating the state of the TB.
74 Note that zero indicates that we are not exiting the TB. */
79 /* We have emitted one or more goto_tb. No fixup required. */
82 /* We are not using a goto_tb (for whatever reason), but have updated
83 the PC (for whatever reason), so there's no need to do it again on
87 /* We are exiting the TB, but have neither emitted a goto_tb, nor
88 updated the PC for the next instruction to be executed. */
91 /* We are ending the TB with a noreturn function call, e.g. longjmp.
92 No following code will be executed. */
96 /* global register indexes */
97 static TCGv_env cpu_env
;
98 static TCGv cpu_std_ir
[31];
99 static TCGv cpu_fir
[31];
101 static TCGv cpu_lock_addr
;
102 static TCGv cpu_lock_st_addr
;
103 static TCGv cpu_lock_value
;
105 #ifndef CONFIG_USER_ONLY
106 static TCGv cpu_pal_ir
[31];
109 #include "exec/gen-icount.h"
111 void alpha_translate_init(void)
113 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
115 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
116 static const GlobalVar vars
[] = {
119 DEF_VAR(lock_st_addr
),
125 /* Use the symbolic register names that match the disassembler. */
126 static const char greg_names
[31][4] = {
127 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
128 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
129 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
130 "t10", "t11", "ra", "t12", "at", "gp", "sp"
132 static const char freg_names
[31][4] = {
133 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
134 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
135 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
136 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
138 #ifndef CONFIG_USER_ONLY
139 static const char shadow_names
[8][8] = {
140 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
141 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
145 static bool done_init
= 0;
153 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
155 for (i
= 0; i
< 31; i
++) {
156 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
157 offsetof(CPUAlphaState
, ir
[i
]),
161 for (i
= 0; i
< 31; i
++) {
162 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
163 offsetof(CPUAlphaState
, fir
[i
]),
167 #ifndef CONFIG_USER_ONLY
168 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
169 for (i
= 0; i
< 8; i
++) {
170 int r
= (i
== 7 ? 25 : i
+ 8);
171 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
172 offsetof(CPUAlphaState
,
178 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
179 const GlobalVar
*v
= &vars
[i
];
180 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
184 static TCGv
load_zero(DisasContext
*ctx
)
186 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
187 ctx
->zero
= tcg_const_i64(0);
192 static TCGv
dest_sink(DisasContext
*ctx
)
194 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
195 ctx
->sink
= tcg_temp_new();
200 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
202 if (likely(reg
< 31)) {
205 return load_zero(ctx
);
209 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
210 uint8_t lit
, bool islit
)
213 ctx
->lit
= tcg_const_i64(lit
);
215 } else if (likely(reg
< 31)) {
218 return load_zero(ctx
);
222 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
224 if (likely(reg
< 31)) {
227 return dest_sink(ctx
);
231 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
233 if (likely(reg
< 31)) {
236 return load_zero(ctx
);
240 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
242 if (likely(reg
< 31)) {
245 return dest_sink(ctx
);
249 static void gen_excp_1(int exception
, int error_code
)
253 tmp1
= tcg_const_i32(exception
);
254 tmp2
= tcg_const_i32(error_code
);
255 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
256 tcg_temp_free_i32(tmp2
);
257 tcg_temp_free_i32(tmp1
);
260 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
262 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
263 gen_excp_1(exception
, error_code
);
264 return EXIT_NORETURN
;
267 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
269 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
272 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
274 TCGv_i32 tmp32
= tcg_temp_new_i32();
275 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
276 gen_helper_memory_to_f(t0
, tmp32
);
277 tcg_temp_free_i32(tmp32
);
280 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
282 TCGv tmp
= tcg_temp_new();
283 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
284 gen_helper_memory_to_g(t0
, tmp
);
288 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
290 TCGv_i32 tmp32
= tcg_temp_new_i32();
291 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
292 gen_helper_memory_to_s(t0
, tmp32
);
293 tcg_temp_free_i32(tmp32
);
296 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
298 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
299 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
300 tcg_gen_mov_i64(cpu_lock_value
, t0
);
303 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
305 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
306 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
307 tcg_gen_mov_i64(cpu_lock_value
, t0
);
310 static inline void gen_load_mem(DisasContext
*ctx
,
311 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
313 int ra
, int rb
, int32_t disp16
, bool fp
,
318 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
319 prefetches, which we can treat as nops. No worries about
320 missed exceptions here. */
321 if (unlikely(ra
== 31)) {
325 tmp
= tcg_temp_new();
326 addr
= load_gpr(ctx
, rb
);
329 tcg_gen_addi_i64(tmp
, addr
, disp16
);
333 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
337 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
338 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
343 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
345 TCGv_i32 tmp32
= tcg_temp_new_i32();
346 gen_helper_f_to_memory(tmp32
, t0
);
347 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
348 tcg_temp_free_i32(tmp32
);
351 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
353 TCGv tmp
= tcg_temp_new();
354 gen_helper_g_to_memory(tmp
, t0
);
355 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
359 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
361 TCGv_i32 tmp32
= tcg_temp_new_i32();
362 gen_helper_s_to_memory(tmp32
, t0
);
363 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
364 tcg_temp_free_i32(tmp32
);
367 static inline void gen_store_mem(DisasContext
*ctx
,
368 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
370 int ra
, int rb
, int32_t disp16
, bool fp
,
375 tmp
= tcg_temp_new();
376 addr
= load_gpr(ctx
, rb
);
379 tcg_gen_addi_i64(tmp
, addr
, disp16
);
383 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
387 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
388 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
393 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
394 int32_t disp16
, int quad
)
399 /* ??? Don't bother storing anything. The user can't tell
400 the difference, since the zero register always reads zero. */
404 #if defined(CONFIG_USER_ONLY)
405 addr
= cpu_lock_st_addr
;
407 addr
= tcg_temp_local_new();
410 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
412 #if defined(CONFIG_USER_ONLY)
413 /* ??? This is handled via a complicated version of compare-and-swap
414 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
415 in TCG so that this isn't necessary. */
416 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
418 /* ??? In system mode we are never multi-threaded, so CAS can be
419 implemented via a non-atomic load-compare-store sequence. */
421 TCGLabel
*lab_fail
, *lab_done
;
424 lab_fail
= gen_new_label();
425 lab_done
= gen_new_label();
426 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
428 val
= tcg_temp_new();
429 tcg_gen_qemu_ld_i64(val
, addr
, ctx
->mem_idx
, quad
? MO_LEQ
: MO_LESL
);
430 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
432 tcg_gen_qemu_st_i64(ctx
->ir
[ra
], addr
, ctx
->mem_idx
,
433 quad
? MO_LEQ
: MO_LEUL
);
434 tcg_gen_movi_i64(ctx
->ir
[ra
], 1);
435 tcg_gen_br(lab_done
);
437 gen_set_label(lab_fail
);
438 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
440 gen_set_label(lab_done
);
441 tcg_gen_movi_i64(cpu_lock_addr
, -1);
449 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
451 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
453 && ((addr
>> 41) & 3) == 2
454 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
457 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
459 /* Suppress goto_tb in the case of single-steping and IO. */
460 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
461 || ctx
->singlestep_enabled
|| singlestep
) {
464 #ifndef CONFIG_USER_ONLY
465 /* If the destination is in the superpage, the page perms can't change. */
466 if (in_superpage(ctx
, dest
)) {
469 /* Check for the dest on the same page as the start of the TB. */
470 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
476 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
478 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
481 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
484 /* Notice branch-to-next; used to initialize RA with the PC. */
487 } else if (use_goto_tb(ctx
, dest
)) {
489 tcg_gen_movi_i64(cpu_pc
, dest
);
490 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
493 tcg_gen_movi_i64(cpu_pc
, dest
);
494 return EXIT_PC_UPDATED
;
498 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
499 TCGv cmp
, int32_t disp
)
501 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
502 TCGLabel
*lab_true
= gen_new_label();
504 if (use_goto_tb(ctx
, dest
)) {
505 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
508 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
509 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
511 gen_set_label(lab_true
);
513 tcg_gen_movi_i64(cpu_pc
, dest
);
514 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
518 TCGv_i64 z
= tcg_const_i64(0);
519 TCGv_i64 d
= tcg_const_i64(dest
);
520 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
522 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
524 tcg_temp_free_i64(z
);
525 tcg_temp_free_i64(d
);
526 tcg_temp_free_i64(p
);
527 return EXIT_PC_UPDATED
;
531 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
532 int32_t disp
, int mask
)
537 cmp_tmp
= tcg_temp_new();
538 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
540 cmp_tmp
= load_gpr(ctx
, ra
);
543 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
546 /* Fold -0.0 for comparison with COND. */
548 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
550 uint64_t mzero
= 1ull << 63;
555 /* For <= or >, the -0.0 value directly compares the way we want. */
556 tcg_gen_mov_i64(dest
, src
);
561 /* For == or !=, we can simply mask off the sign bit and compare. */
562 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
567 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
568 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
569 tcg_gen_neg_i64(dest
, dest
);
570 tcg_gen_and_i64(dest
, dest
, src
);
578 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
581 TCGv cmp_tmp
= tcg_temp_new();
582 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
583 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
586 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
591 vb
= load_fpr(ctx
, rb
);
593 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
595 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
600 #define QUAL_RM_N 0x080 /* Round mode nearest even */
601 #define QUAL_RM_C 0x000 /* Round mode chopped */
602 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
603 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
604 #define QUAL_RM_MASK 0x0c0
606 #define QUAL_U 0x100 /* Underflow enable (fp output) */
607 #define QUAL_V 0x100 /* Overflow enable (int output) */
608 #define QUAL_S 0x400 /* Software completion enable */
609 #define QUAL_I 0x200 /* Inexact detection enable */
611 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
615 fn11
&= QUAL_RM_MASK
;
616 if (fn11
== ctx
->tb_rm
) {
621 tmp
= tcg_temp_new_i32();
624 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
627 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
630 tcg_gen_movi_i32(tmp
, float_round_down
);
633 tcg_gen_ld8u_i32(tmp
, cpu_env
,
634 offsetof(CPUAlphaState
, fpcr_dyn_round
));
638 #if defined(CONFIG_SOFTFLOAT_INLINE)
639 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
640 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
641 sets the one field. */
642 tcg_gen_st8_i32(tmp
, cpu_env
,
643 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
645 gen_helper_setroundmode(tmp
);
648 tcg_temp_free_i32(tmp
);
651 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
656 if (fn11
== ctx
->tb_ftz
) {
661 tmp
= tcg_temp_new_i32();
663 /* Underflow is enabled, use the FPCR setting. */
664 tcg_gen_ld8u_i32(tmp
, cpu_env
,
665 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
667 /* Underflow is disabled, force flush-to-zero. */
668 tcg_gen_movi_i32(tmp
, 1);
671 #if defined(CONFIG_SOFTFLOAT_INLINE)
672 tcg_gen_st8_i32(tmp
, cpu_env
,
673 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
675 gen_helper_setflushzero(tmp
);
678 tcg_temp_free_i32(tmp
);
681 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
685 if (unlikely(reg
== 31)) {
686 val
= load_zero(ctx
);
689 if ((fn11
& QUAL_S
) == 0) {
691 gen_helper_ieee_input_cmp(cpu_env
, val
);
693 gen_helper_ieee_input(cpu_env
, val
);
696 #ifndef CONFIG_USER_ONLY
697 /* In system mode, raise exceptions for denormals like real
698 hardware. In user mode, proceed as if the OS completion
699 handler is handling the denormal as per spec. */
700 gen_helper_ieee_input_s(cpu_env
, val
);
707 static void gen_fp_exc_raise(int rc
, int fn11
)
709 /* ??? We ought to be able to do something with imprecise exceptions.
710 E.g. notice we're still in the trap shadow of something within the
711 TB and do not generate the code to signal the exception; end the TB
712 when an exception is forced to arrive, either by consumption of a
713 register value or TRAPB or EXCB. */
717 if (!(fn11
& QUAL_U
)) {
718 /* Note that QUAL_U == QUAL_V, so ignore either. */
719 ignore
|= FPCR_UNF
| FPCR_IOV
;
721 if (!(fn11
& QUAL_I
)) {
724 ign
= tcg_const_i32(ignore
);
726 /* ??? Pass in the regno of the destination so that the helper can
727 set EXC_MASK, which contains a bitmask of destination registers
728 that have caused arithmetic traps. A simple userspace emulation
729 does not require this. We do need it for a guest kernel's entArith,
730 or if we were to do something clever with imprecise exceptions. */
731 reg
= tcg_const_i32(rc
+ 32);
733 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
735 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
738 tcg_temp_free_i32(reg
);
739 tcg_temp_free_i32(ign
);
742 static void gen_cvtlq(TCGv vc
, TCGv vb
)
744 TCGv tmp
= tcg_temp_new();
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp
, vb
, 32);
749 tcg_gen_shri_i64(vc
, vb
, 29);
750 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
752 tcg_gen_or_i64(vc
, vc
, tmp
);
757 static void gen_ieee_arith2(DisasContext
*ctx
,
758 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
759 int rb
, int rc
, int fn11
)
763 gen_qual_roundmode(ctx
, fn11
);
764 gen_qual_flushzero(ctx
, fn11
);
766 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
767 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
769 gen_fp_exc_raise(rc
, fn11
);
772 #define IEEE_ARITH2(name) \
773 static inline void glue(gen_, name)(DisasContext *ctx, \
774 int rb, int rc, int fn11) \
776 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
783 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
787 /* No need to set flushzero, since we have an integer output. */
788 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
789 vc
= dest_fpr(ctx
, rc
);
791 /* Almost all integer conversions use cropped rounding;
792 special case that. */
793 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
794 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
796 gen_qual_roundmode(ctx
, fn11
);
797 gen_helper_cvttq(vc
, cpu_env
, vb
);
799 gen_fp_exc_raise(rc
, fn11
);
802 static void gen_ieee_intcvt(DisasContext
*ctx
,
803 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
804 int rb
, int rc
, int fn11
)
808 gen_qual_roundmode(ctx
, fn11
);
809 vb
= load_fpr(ctx
, rb
);
810 vc
= dest_fpr(ctx
, rc
);
812 /* The only exception that can be raised by integer conversion
813 is inexact. Thus we only need to worry about exceptions when
814 inexact handling is requested. */
816 helper(vc
, cpu_env
, vb
);
817 gen_fp_exc_raise(rc
, fn11
);
819 helper(vc
, cpu_env
, vb
);
823 #define IEEE_INTCVT(name) \
824 static inline void glue(gen_, name)(DisasContext *ctx, \
825 int rb, int rc, int fn11) \
827 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
832 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
834 TCGv vmask
= tcg_const_i64(mask
);
835 TCGv tmp
= tcg_temp_new_i64();
838 tcg_gen_andc_i64(tmp
, vmask
, va
);
840 tcg_gen_and_i64(tmp
, va
, vmask
);
843 tcg_gen_andc_i64(vc
, vb
, vmask
);
844 tcg_gen_or_i64(vc
, vc
, tmp
);
846 tcg_temp_free(vmask
);
850 static void gen_ieee_arith3(DisasContext
*ctx
,
851 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
852 int ra
, int rb
, int rc
, int fn11
)
856 gen_qual_roundmode(ctx
, fn11
);
857 gen_qual_flushzero(ctx
, fn11
);
859 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
860 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
861 vc
= dest_fpr(ctx
, rc
);
862 helper(vc
, cpu_env
, va
, vb
);
864 gen_fp_exc_raise(rc
, fn11
);
867 #define IEEE_ARITH3(name) \
868 static inline void glue(gen_, name)(DisasContext *ctx, \
869 int ra, int rb, int rc, int fn11) \
871 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
882 static void gen_ieee_compare(DisasContext
*ctx
,
883 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
884 int ra
, int rb
, int rc
, int fn11
)
888 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
889 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
890 vc
= dest_fpr(ctx
, rc
);
891 helper(vc
, cpu_env
, va
, vb
);
893 gen_fp_exc_raise(rc
, fn11
);
896 #define IEEE_CMP3(name) \
897 static inline void glue(gen_, name)(DisasContext *ctx, \
898 int ra, int rb, int rc, int fn11) \
900 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
907 static inline uint64_t zapnot_mask(uint8_t lit
)
912 for (i
= 0; i
< 8; ++i
) {
913 if ((lit
>> i
) & 1) {
914 mask
|= 0xffull
<< (i
* 8);
920 /* Implement zapnot with an immediate operand, which expands to some
921 form of immediate AND. This is a basic building block in the
922 definition of many of the other byte manipulation instructions. */
923 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
927 tcg_gen_movi_i64(dest
, 0);
930 tcg_gen_ext8u_i64(dest
, src
);
933 tcg_gen_ext16u_i64(dest
, src
);
936 tcg_gen_ext32u_i64(dest
, src
);
939 tcg_gen_mov_i64(dest
, src
);
942 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
947 /* EXTWH, EXTLH, EXTQH */
948 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
949 uint8_t lit
, uint8_t byte_mask
)
952 tcg_gen_shli_i64(vc
, va
, (64 - lit
* 8) & 0x3f);
954 TCGv tmp
= tcg_temp_new();
955 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
956 tcg_gen_neg_i64(tmp
, tmp
);
957 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
958 tcg_gen_shl_i64(vc
, va
, tmp
);
961 gen_zapnoti(vc
, vc
, byte_mask
);
964 /* EXTBL, EXTWL, EXTLL, EXTQL */
965 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
966 uint8_t lit
, uint8_t byte_mask
)
969 tcg_gen_shri_i64(vc
, va
, (lit
& 7) * 8);
971 TCGv tmp
= tcg_temp_new();
972 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
973 tcg_gen_shli_i64(tmp
, tmp
, 3);
974 tcg_gen_shr_i64(vc
, va
, tmp
);
977 gen_zapnoti(vc
, vc
, byte_mask
);
980 /* INSWH, INSLH, INSQH */
981 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
982 uint8_t lit
, uint8_t byte_mask
)
984 TCGv tmp
= tcg_temp_new();
986 /* The instruction description has us left-shift the byte mask and extract
987 bits <15:8> and apply that zap at the end. This is equivalent to simply
988 performing the zap first and shifting afterward. */
989 gen_zapnoti(tmp
, va
, byte_mask
);
993 if (unlikely(lit
== 0)) {
994 tcg_gen_movi_i64(vc
, 0);
996 tcg_gen_shri_i64(vc
, tmp
, 64 - lit
* 8);
999 TCGv shift
= tcg_temp_new();
1001 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1002 portably by splitting the shift into two parts: shift_count-1 and 1.
1003 Arrange for the -1 by using ones-complement instead of
1004 twos-complement in the negation: ~(B * 8) & 63. */
1006 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1007 tcg_gen_not_i64(shift
, shift
);
1008 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1010 tcg_gen_shr_i64(vc
, tmp
, shift
);
1011 tcg_gen_shri_i64(vc
, vc
, 1);
1012 tcg_temp_free(shift
);
1017 /* INSBL, INSWL, INSLL, INSQL */
1018 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1019 uint8_t lit
, uint8_t byte_mask
)
1021 TCGv tmp
= tcg_temp_new();
1023 /* The instruction description has us left-shift the byte mask
1024 the same number of byte slots as the data and apply the zap
1025 at the end. This is equivalent to simply performing the zap
1026 first and shifting afterward. */
1027 gen_zapnoti(tmp
, va
, byte_mask
);
1030 tcg_gen_shli_i64(vc
, tmp
, (lit
& 7) * 8);
1032 TCGv shift
= tcg_temp_new();
1033 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1034 tcg_gen_shli_i64(shift
, shift
, 3);
1035 tcg_gen_shl_i64(vc
, tmp
, shift
);
1036 tcg_temp_free(shift
);
1041 /* MSKWH, MSKLH, MSKQH */
1042 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1043 uint8_t lit
, uint8_t byte_mask
)
1046 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1048 TCGv shift
= tcg_temp_new();
1049 TCGv mask
= tcg_temp_new();
1051 /* The instruction description is as above, where the byte_mask
1052 is shifted left, and then we extract bits <15:8>. This can be
1053 emulated with a right-shift on the expanded byte mask. This
1054 requires extra care because for an input <2:0> == 0 we need a
1055 shift of 64 bits in order to generate a zero. This is done by
1056 splitting the shift into two parts, the variable shift - 1
1057 followed by a constant 1 shift. The code we expand below is
1058 equivalent to ~(B * 8) & 63. */
1060 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1061 tcg_gen_not_i64(shift
, shift
);
1062 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1063 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1064 tcg_gen_shr_i64(mask
, mask
, shift
);
1065 tcg_gen_shri_i64(mask
, mask
, 1);
1067 tcg_gen_andc_i64(vc
, va
, mask
);
1069 tcg_temp_free(mask
);
1070 tcg_temp_free(shift
);
1074 /* MSKBL, MSKWL, MSKLL, MSKQL */
1075 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1076 uint8_t lit
, uint8_t byte_mask
)
1079 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1081 TCGv shift
= tcg_temp_new();
1082 TCGv mask
= tcg_temp_new();
1084 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1085 tcg_gen_shli_i64(shift
, shift
, 3);
1086 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1087 tcg_gen_shl_i64(mask
, mask
, shift
);
1089 tcg_gen_andc_i64(vc
, va
, mask
);
1091 tcg_temp_free(mask
);
1092 tcg_temp_free(shift
);
1096 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1101 tcg_gen_ld8u_i64(ctx
->ir
[ra
], cpu_env
,
1102 offsetof(CPUAlphaState
, intr_flag
));
1105 tmp
= tcg_const_i32(set
);
1106 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1107 tcg_temp_free_i32(tmp
);
1110 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1112 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1113 to internal cpu registers. */
1115 /* Unprivileged PAL call */
1116 if (palcode
>= 0x80 && palcode
< 0xC0) {
1120 /* No-op inside QEMU. */
1124 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1125 offsetof(CPUAlphaState
, unique
));
1129 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1130 offsetof(CPUAlphaState
, unique
));
1139 #ifndef CONFIG_USER_ONLY
1140 /* Privileged PAL code */
1141 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1145 /* No-op inside QEMU. */
1149 /* No-op inside QEMU. */
1153 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1154 offsetof(CPUAlphaState
, vptptr
));
1158 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1159 offsetof(CPUAlphaState
, sysval
));
1163 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1164 offsetof(CPUAlphaState
, sysval
));
1171 /* Note that we already know we're in kernel mode, so we know
1172 that PS only contains the 3 IPL bits. */
1173 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1174 offsetof(CPUAlphaState
, ps
));
1176 /* But make sure and store only the 3 IPL bits from the user. */
1177 tmp
= tcg_temp_new();
1178 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1179 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1186 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1187 offsetof(CPUAlphaState
, ps
));
1191 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1192 offsetof(CPUAlphaState
, usp
));
1196 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1197 offsetof(CPUAlphaState
, usp
));
1201 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1202 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1212 return gen_invalid(ctx
);
1215 #ifdef CONFIG_USER_ONLY
1216 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1219 TCGv tmp
= tcg_temp_new();
1220 uint64_t exc_addr
= ctx
->pc
;
1221 uint64_t entry
= ctx
->palbr
;
1223 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
1226 tcg_gen_movi_i64(tmp
, 1);
1227 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
1230 tcg_gen_movi_i64(tmp
, exc_addr
);
1231 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1234 entry
+= (palcode
& 0x80
1235 ? 0x2000 + (palcode
- 0x80) * 64
1236 : 0x1000 + palcode
* 64);
1238 /* Since the destination is running in PALmode, we don't really
1239 need the page permissions check. We'll see the existence of
1240 the page when we create the TB, and we'll flush all TBs if
1241 we change the PAL base register. */
1242 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1244 tcg_gen_movi_i64(cpu_pc
, entry
);
1245 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1246 return EXIT_GOTO_TB
;
1248 tcg_gen_movi_i64(cpu_pc
, entry
);
1249 return EXIT_PC_UPDATED
;
1255 #ifndef CONFIG_USER_ONLY
1257 #define PR_BYTE 0x100000
1258 #define PR_LONG 0x200000
1260 static int cpu_pr_data(int pr
)
1263 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1264 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1265 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1266 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1267 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1268 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1269 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1270 case 7: return offsetof(CPUAlphaState
, palbr
);
1271 case 8: return offsetof(CPUAlphaState
, ptbr
);
1272 case 9: return offsetof(CPUAlphaState
, vptptr
);
1273 case 10: return offsetof(CPUAlphaState
, unique
);
1274 case 11: return offsetof(CPUAlphaState
, sysval
);
1275 case 12: return offsetof(CPUAlphaState
, usp
);
1278 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1281 return offsetof(CPUAlphaState
, alarm_expire
);
1286 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1288 void (*helper
)(TCGv
);
1293 /* Accessing the "non-shadow" general registers. */
1294 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1295 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1298 case 250: /* WALLTIME */
1299 helper
= gen_helper_get_walltime
;
1301 case 249: /* VMTIME */
1302 helper
= gen_helper_get_vmtime
;
1308 return EXIT_PC_STALE
;
1315 /* The basic registers are data only, and unknown registers
1316 are read-zero, write-ignore. */
1317 data
= cpu_pr_data(regno
);
1319 tcg_gen_movi_i64(va
, 0);
1320 } else if (data
& PR_BYTE
) {
1321 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1322 } else if (data
& PR_LONG
) {
1323 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1325 tcg_gen_ld_i64(va
, cpu_env
, data
);
1333 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1341 gen_helper_tbia(cpu_env
);
1346 gen_helper_tbis(cpu_env
, vb
);
1351 tmp
= tcg_const_i64(1);
1352 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1353 offsetof(CPUState
, halted
));
1354 return gen_excp(ctx
, EXCP_HLT
, 0);
1358 gen_helper_halt(vb
);
1359 return EXIT_PC_STALE
;
1363 gen_helper_set_alarm(cpu_env
, vb
);
1368 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1369 /* Changing the PAL base register implies un-chaining all of the TBs
1370 that ended with a CALL_PAL. Since the base register usually only
1371 changes during boot, flushing everything works well. */
1372 gen_helper_tb_flush(cpu_env
);
1373 return EXIT_PC_STALE
;
1376 /* Accessing the "non-shadow" general registers. */
1377 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1378 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1382 /* The basic registers are data only, and unknown registers
1383 are read-zero, write-ignore. */
1384 data
= cpu_pr_data(regno
);
1386 if (data
& PR_BYTE
) {
1387 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1388 } else if (data
& PR_LONG
) {
1389 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1391 tcg_gen_st_i64(vb
, cpu_env
, data
);
1399 #endif /* !USER_ONLY*/
1401 #define REQUIRE_NO_LIT \
1408 #define REQUIRE_TB_FLAG(FLAG) \
1410 if ((ctx->tb->flags & (FLAG)) == 0) { \
1415 #define REQUIRE_REG_31(WHICH) \
1417 if (WHICH != 31) { \
1422 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1424 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1426 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1427 bool islit
, real_islit
;
1428 TCGv va
, vb
, vc
, tmp
, tmp2
;
1432 /* Decode all instruction fields */
1433 opc
= extract32(insn
, 26, 6);
1434 ra
= extract32(insn
, 21, 5);
1435 rb
= extract32(insn
, 16, 5);
1436 rc
= extract32(insn
, 0, 5);
1437 real_islit
= islit
= extract32(insn
, 12, 1);
1438 lit
= extract32(insn
, 13, 8);
1440 disp21
= sextract32(insn
, 0, 21);
1441 disp16
= sextract32(insn
, 0, 16);
1442 disp12
= sextract32(insn
, 0, 12);
1444 fn11
= extract32(insn
, 5, 11);
1445 fpfn
= extract32(insn
, 5, 6);
1446 fn7
= extract32(insn
, 5, 7);
1448 if (rb
== 31 && !islit
) {
1457 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1483 disp16
= (uint32_t)disp16
<< 16;
1487 va
= dest_gpr(ctx
, ra
);
1488 /* It's worth special-casing immediate loads. */
1490 tcg_gen_movi_i64(va
, disp16
);
1492 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1498 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1499 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1503 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1507 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1508 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1512 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1513 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1517 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1518 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1522 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1526 vc
= dest_gpr(ctx
, rc
);
1527 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1531 /* Special case ADDL as SEXTL. */
1532 tcg_gen_ext32s_i64(vc
, vb
);
1536 /* Special case SUBQ as NEGQ. */
1537 tcg_gen_neg_i64(vc
, vb
);
1542 va
= load_gpr(ctx
, ra
);
1546 tcg_gen_add_i64(vc
, va
, vb
);
1547 tcg_gen_ext32s_i64(vc
, vc
);
1551 tmp
= tcg_temp_new();
1552 tcg_gen_shli_i64(tmp
, va
, 2);
1553 tcg_gen_add_i64(tmp
, tmp
, vb
);
1554 tcg_gen_ext32s_i64(vc
, tmp
);
1559 tcg_gen_sub_i64(vc
, va
, vb
);
1560 tcg_gen_ext32s_i64(vc
, vc
);
1564 tmp
= tcg_temp_new();
1565 tcg_gen_shli_i64(tmp
, va
, 2);
1566 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1567 tcg_gen_ext32s_i64(vc
, tmp
);
1573 /* Special case 0 >= X as X == 0. */
1574 gen_helper_cmpbe0(vc
, vb
);
1576 gen_helper_cmpbge(vc
, va
, vb
);
1581 tmp
= tcg_temp_new();
1582 tcg_gen_shli_i64(tmp
, va
, 3);
1583 tcg_gen_add_i64(tmp
, tmp
, vb
);
1584 tcg_gen_ext32s_i64(vc
, tmp
);
1589 tmp
= tcg_temp_new();
1590 tcg_gen_shli_i64(tmp
, va
, 3);
1591 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1592 tcg_gen_ext32s_i64(vc
, tmp
);
1597 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1601 tcg_gen_add_i64(vc
, va
, vb
);
1605 tmp
= tcg_temp_new();
1606 tcg_gen_shli_i64(tmp
, va
, 2);
1607 tcg_gen_add_i64(vc
, tmp
, vb
);
1612 tcg_gen_sub_i64(vc
, va
, vb
);
1616 tmp
= tcg_temp_new();
1617 tcg_gen_shli_i64(tmp
, va
, 2);
1618 tcg_gen_sub_i64(vc
, tmp
, vb
);
1623 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1627 tmp
= tcg_temp_new();
1628 tcg_gen_shli_i64(tmp
, va
, 3);
1629 tcg_gen_add_i64(vc
, tmp
, vb
);
1634 tmp
= tcg_temp_new();
1635 tcg_gen_shli_i64(tmp
, va
, 3);
1636 tcg_gen_sub_i64(vc
, tmp
, vb
);
1641 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1645 tmp
= tcg_temp_new();
1646 tcg_gen_ext32s_i64(tmp
, va
);
1647 tcg_gen_ext32s_i64(vc
, vb
);
1648 tcg_gen_add_i64(tmp
, tmp
, vc
);
1649 tcg_gen_ext32s_i64(vc
, tmp
);
1650 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1655 tmp
= tcg_temp_new();
1656 tcg_gen_ext32s_i64(tmp
, va
);
1657 tcg_gen_ext32s_i64(vc
, vb
);
1658 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1659 tcg_gen_ext32s_i64(vc
, tmp
);
1660 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1665 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1669 tmp
= tcg_temp_new();
1670 tmp2
= tcg_temp_new();
1671 tcg_gen_eqv_i64(tmp
, va
, vb
);
1672 tcg_gen_mov_i64(tmp2
, va
);
1673 tcg_gen_add_i64(vc
, va
, vb
);
1674 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1675 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1676 tcg_gen_shri_i64(tmp
, tmp
, 63);
1677 tcg_gen_movi_i64(tmp2
, 0);
1678 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1680 tcg_temp_free(tmp2
);
1684 tmp
= tcg_temp_new();
1685 tmp2
= tcg_temp_new();
1686 tcg_gen_xor_i64(tmp
, va
, vb
);
1687 tcg_gen_mov_i64(tmp2
, va
);
1688 tcg_gen_sub_i64(vc
, va
, vb
);
1689 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1690 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1691 tcg_gen_shri_i64(tmp
, tmp
, 63);
1692 tcg_gen_movi_i64(tmp2
, 0);
1693 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1695 tcg_temp_free(tmp2
);
1699 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1709 /* Special case BIS as NOP. */
1713 /* Special case BIS as MOV. */
1714 vc
= dest_gpr(ctx
, rc
);
1716 tcg_gen_movi_i64(vc
, lit
);
1718 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1724 vc
= dest_gpr(ctx
, rc
);
1725 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1727 if (fn7
== 0x28 && ra
== 31) {
1728 /* Special case ORNOT as NOT. */
1729 tcg_gen_not_i64(vc
, vb
);
1733 va
= load_gpr(ctx
, ra
);
1737 tcg_gen_and_i64(vc
, va
, vb
);
1741 tcg_gen_andc_i64(vc
, va
, vb
);
1745 tmp
= tcg_temp_new();
1746 tcg_gen_andi_i64(tmp
, va
, 1);
1747 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1748 vb
, load_gpr(ctx
, rc
));
1753 tmp
= tcg_temp_new();
1754 tcg_gen_andi_i64(tmp
, va
, 1);
1755 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1756 vb
, load_gpr(ctx
, rc
));
1761 tcg_gen_or_i64(vc
, va
, vb
);
1765 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1766 vb
, load_gpr(ctx
, rc
));
1770 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1771 vb
, load_gpr(ctx
, rc
));
1775 tcg_gen_orc_i64(vc
, va
, vb
);
1779 tcg_gen_xor_i64(vc
, va
, vb
);
1783 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1784 vb
, load_gpr(ctx
, rc
));
1788 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1789 vb
, load_gpr(ctx
, rc
));
1793 tcg_gen_eqv_i64(vc
, va
, vb
);
1799 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1800 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1805 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1806 vb
, load_gpr(ctx
, rc
));
1810 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1811 vb
, load_gpr(ctx
, rc
));
1816 tcg_gen_movi_i64(vc
, ctx
->implver
);
1824 vc
= dest_gpr(ctx
, rc
);
1825 va
= load_gpr(ctx
, ra
);
1829 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1833 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1837 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1841 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1845 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1849 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1853 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1857 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1861 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1866 gen_zapnoti(vc
, va
, ~lit
);
1868 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1874 gen_zapnoti(vc
, va
, lit
);
1876 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1881 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1886 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1888 tmp
= tcg_temp_new();
1889 vb
= load_gpr(ctx
, rb
);
1890 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1891 tcg_gen_shr_i64(vc
, va
, tmp
);
1897 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1902 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1904 tmp
= tcg_temp_new();
1905 vb
= load_gpr(ctx
, rb
);
1906 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1907 tcg_gen_shl_i64(vc
, va
, tmp
);
1913 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1918 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1920 tmp
= tcg_temp_new();
1921 vb
= load_gpr(ctx
, rb
);
1922 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1923 tcg_gen_sar_i64(vc
, va
, tmp
);
1929 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1933 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1937 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1941 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1945 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1949 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1953 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1957 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1961 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1969 vc
= dest_gpr(ctx
, rc
);
1970 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1971 va
= load_gpr(ctx
, ra
);
1975 tcg_gen_mul_i64(vc
, va
, vb
);
1976 tcg_gen_ext32s_i64(vc
, vc
);
1980 tcg_gen_mul_i64(vc
, va
, vb
);
1984 tmp
= tcg_temp_new();
1985 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1990 tmp
= tcg_temp_new();
1991 tcg_gen_ext32s_i64(tmp
, va
);
1992 tcg_gen_ext32s_i64(vc
, vb
);
1993 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1994 tcg_gen_ext32s_i64(vc
, tmp
);
1995 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2000 tmp
= tcg_temp_new();
2001 tmp2
= tcg_temp_new();
2002 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2003 tcg_gen_sari_i64(tmp2
, vc
, 63);
2004 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2006 tcg_temp_free(tmp2
);
2014 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2015 vc
= dest_fpr(ctx
, rc
);
2016 switch (fpfn
) { /* fn11 & 0x3F */
2020 t32
= tcg_temp_new_i32();
2021 va
= load_gpr(ctx
, ra
);
2022 tcg_gen_extrl_i64_i32(t32
, va
);
2023 gen_helper_memory_to_s(vc
, t32
);
2024 tcg_temp_free_i32(t32
);
2029 vb
= load_fpr(ctx
, rb
);
2030 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2035 gen_sqrts(ctx
, rb
, rc
, fn11
);
2040 t32
= tcg_temp_new_i32();
2041 va
= load_gpr(ctx
, ra
);
2042 tcg_gen_extrl_i64_i32(t32
, va
);
2043 gen_helper_memory_to_f(vc
, t32
);
2044 tcg_temp_free_i32(t32
);
2049 va
= load_gpr(ctx
, ra
);
2050 tcg_gen_mov_i64(vc
, va
);
2055 vb
= load_fpr(ctx
, rb
);
2056 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2061 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2069 /* VAX floating point */
2070 /* XXX: rounding mode and trap are ignored (!) */
2071 vc
= dest_fpr(ctx
, rc
);
2072 vb
= load_fpr(ctx
, rb
);
2073 va
= load_fpr(ctx
, ra
);
2074 switch (fpfn
) { /* fn11 & 0x3F */
2077 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2081 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2085 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2089 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2097 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2101 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2105 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2109 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2113 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2117 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2121 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2126 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2135 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2140 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2145 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2153 /* IEEE floating-point */
2154 switch (fpfn
) { /* fn11 & 0x3F */
2157 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2161 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2165 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2169 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2173 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2177 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2181 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2185 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2189 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2193 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2197 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2201 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2205 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2207 gen_cvtst(ctx
, rb
, rc
, fn11
);
2210 gen_cvtts(ctx
, rb
, rc
, fn11
);
2216 gen_cvttq(ctx
, rb
, rc
, fn11
);
2221 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2226 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2238 vc
= dest_fpr(ctx
, rc
);
2239 vb
= load_fpr(ctx
, rb
);
2245 /* Special case CPYS as FNOP. */
2247 vc
= dest_fpr(ctx
, rc
);
2248 va
= load_fpr(ctx
, ra
);
2250 /* Special case CPYS as FMOV. */
2251 tcg_gen_mov_i64(vc
, va
);
2253 vb
= load_fpr(ctx
, rb
);
2254 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2260 vc
= dest_fpr(ctx
, rc
);
2261 vb
= load_fpr(ctx
, rb
);
2262 va
= load_fpr(ctx
, ra
);
2263 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2267 vc
= dest_fpr(ctx
, rc
);
2268 vb
= load_fpr(ctx
, rb
);
2269 va
= load_fpr(ctx
, ra
);
2270 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2274 va
= load_fpr(ctx
, ra
);
2275 gen_helper_store_fpcr(cpu_env
, va
);
2276 if (ctx
->tb_rm
== QUAL_RM_D
) {
2277 /* Re-do the copy of the rounding mode to fp_status
2278 the next time we use dynamic rounding. */
2284 va
= dest_fpr(ctx
, ra
);
2285 gen_helper_load_fpcr(va
, cpu_env
);
2289 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2293 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2297 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2301 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2305 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2309 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2311 case 0x030: /* CVTQL */
2312 case 0x130: /* CVTQL/V */
2313 case 0x530: /* CVTQL/SV */
2315 vc
= dest_fpr(ctx
, rc
);
2316 vb
= load_fpr(ctx
, rb
);
2317 gen_helper_cvtql(vc
, cpu_env
, vb
);
2318 gen_fp_exc_raise(rc
, fn11
);
2326 switch ((uint16_t)disp16
) {
2353 va
= dest_gpr(ctx
, ra
);
2354 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2356 gen_helper_load_pcc(va
, cpu_env
);
2358 ret
= EXIT_PC_STALE
;
2360 gen_helper_load_pcc(va
, cpu_env
);
2388 /* HW_MFPR (PALcode) */
2389 #ifndef CONFIG_USER_ONLY
2390 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2391 va
= dest_gpr(ctx
, ra
);
2392 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2399 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2400 prediction stack action, which of course we don't implement. */
2401 vb
= load_gpr(ctx
, rb
);
2402 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2404 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2406 ret
= EXIT_PC_UPDATED
;
2410 /* HW_LD (PALcode) */
2411 #ifndef CONFIG_USER_ONLY
2412 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2414 TCGv addr
= tcg_temp_new();
2415 vb
= load_gpr(ctx
, rb
);
2416 va
= dest_gpr(ctx
, ra
);
2418 tcg_gen_addi_i64(addr
, vb
, disp12
);
2419 switch ((insn
>> 12) & 0xF) {
2421 /* Longword physical access (hw_ldl/p) */
2422 gen_helper_ldl_phys(va
, cpu_env
, addr
);
2425 /* Quadword physical access (hw_ldq/p) */
2426 gen_helper_ldq_phys(va
, cpu_env
, addr
);
2429 /* Longword physical access with lock (hw_ldl_l/p) */
2430 gen_helper_ldl_l_phys(va
, cpu_env
, addr
);
2433 /* Quadword physical access with lock (hw_ldq_l/p) */
2434 gen_helper_ldq_l_phys(va
, cpu_env
, addr
);
2437 /* Longword virtual PTE fetch (hw_ldl/v) */
2440 /* Quadword virtual PTE fetch (hw_ldq/v) */
2450 /* Longword virtual access (hw_ldl) */
2453 /* Quadword virtual access (hw_ldq) */
2456 /* Longword virtual access with protection check (hw_ldl/w) */
2457 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2460 /* Quadword virtual access with protection check (hw_ldq/w) */
2461 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2464 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2467 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2470 /* Longword virtual access with alternate access mode and
2471 protection checks (hw_ldl/wa) */
2472 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2475 /* Quadword virtual access with alternate access mode and
2476 protection checks (hw_ldq/wa) */
2477 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2480 tcg_temp_free(addr
);
2488 vc
= dest_gpr(ctx
, rc
);
2491 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2493 va
= load_fpr(ctx
, ra
);
2494 tcg_gen_mov_i64(vc
, va
);
2496 } else if (fn7
== 0x78) {
2498 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2500 t32
= tcg_temp_new_i32();
2501 va
= load_fpr(ctx
, ra
);
2502 gen_helper_s_to_memory(t32
, va
);
2503 tcg_gen_ext_i32_i64(vc
, t32
);
2504 tcg_temp_free_i32(t32
);
2508 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2512 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2514 tcg_gen_ext8s_i64(vc
, vb
);
2518 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2520 tcg_gen_ext16s_i64(vc
, vb
);
2524 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2527 gen_helper_ctpop(vc
, vb
);
2531 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2533 va
= load_gpr(ctx
, ra
);
2534 gen_helper_perr(vc
, va
, vb
);
2538 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2541 gen_helper_ctlz(vc
, vb
);
2545 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2548 gen_helper_cttz(vc
, vb
);
2552 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2555 gen_helper_unpkbw(vc
, vb
);
2559 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2562 gen_helper_unpkbl(vc
, vb
);
2566 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2569 gen_helper_pkwb(vc
, vb
);
2573 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2576 gen_helper_pklb(vc
, vb
);
2580 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2581 va
= load_gpr(ctx
, ra
);
2582 gen_helper_minsb8(vc
, va
, vb
);
2586 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2587 va
= load_gpr(ctx
, ra
);
2588 gen_helper_minsw4(vc
, va
, vb
);
2592 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2593 va
= load_gpr(ctx
, ra
);
2594 gen_helper_minub8(vc
, va
, vb
);
2598 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2599 va
= load_gpr(ctx
, ra
);
2600 gen_helper_minuw4(vc
, va
, vb
);
2604 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2605 va
= load_gpr(ctx
, ra
);
2606 gen_helper_maxub8(vc
, va
, vb
);
2610 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2611 va
= load_gpr(ctx
, ra
);
2612 gen_helper_maxuw4(vc
, va
, vb
);
2616 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2617 va
= load_gpr(ctx
, ra
);
2618 gen_helper_maxsb8(vc
, va
, vb
);
2622 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2623 va
= load_gpr(ctx
, ra
);
2624 gen_helper_maxsw4(vc
, va
, vb
);
2632 /* HW_MTPR (PALcode) */
2633 #ifndef CONFIG_USER_ONLY
2634 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2635 vb
= load_gpr(ctx
, rb
);
2636 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2643 /* HW_RET (PALcode) */
2644 #ifndef CONFIG_USER_ONLY
2645 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2647 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2648 address from EXC_ADDR. This turns out to be useful for our
2649 emulation PALcode, so continue to accept it. */
2650 ctx
->lit
= vb
= tcg_temp_new();
2651 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2653 vb
= load_gpr(ctx
, rb
);
2655 tmp
= tcg_temp_new();
2656 tcg_gen_movi_i64(tmp
, 0);
2657 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
2658 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2659 tcg_gen_andi_i64(tmp
, vb
, 1);
2660 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
2661 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2662 ret
= EXIT_PC_UPDATED
;
2669 /* HW_ST (PALcode) */
2670 #ifndef CONFIG_USER_ONLY
2671 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2673 TCGv addr
= tcg_temp_new();
2674 va
= load_gpr(ctx
, ra
);
2675 vb
= load_gpr(ctx
, rb
);
2677 tcg_gen_addi_i64(addr
, vb
, disp12
);
2678 switch ((insn
>> 12) & 0xF) {
2680 /* Longword physical access */
2681 gen_helper_stl_phys(cpu_env
, addr
, va
);
2684 /* Quadword physical access */
2685 gen_helper_stq_phys(cpu_env
, addr
, va
);
2688 /* Longword physical access with lock */
2689 gen_helper_stl_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2692 /* Quadword physical access with lock */
2693 gen_helper_stq_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2696 /* Longword virtual access */
2699 /* Quadword virtual access */
2720 /* Longword virtual access with alternate access mode */
2723 /* Quadword virtual access with alternate access mode */
2732 tcg_temp_free(addr
);
2740 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2744 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2748 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2752 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2756 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2760 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2764 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2768 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2772 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2776 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2780 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2784 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2788 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2792 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2796 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
2800 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
2804 ret
= gen_bdirect(ctx
, ra
, disp21
);
2806 case 0x31: /* FBEQ */
2807 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2809 case 0x32: /* FBLT */
2810 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2812 case 0x33: /* FBLE */
2813 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2817 ret
= gen_bdirect(ctx
, ra
, disp21
);
2819 case 0x35: /* FBNE */
2820 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2822 case 0x36: /* FBGE */
2823 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2825 case 0x37: /* FBGT */
2826 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2830 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2834 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2838 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2842 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2846 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2850 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2854 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2858 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2861 ret
= gen_invalid(ctx
);
2868 void gen_intermediate_code(CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2870 AlphaCPU
*cpu
= alpha_env_get_cpu(env
);
2871 CPUState
*cs
= CPU(cpu
);
2872 DisasContext ctx
, *ctxp
= &ctx
;
2873 target_ulong pc_start
;
2874 target_ulong pc_mask
;
2884 ctx
.mem_idx
= cpu_mmu_index(env
, false);
2885 ctx
.implver
= env
->implver
;
2886 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2888 #ifdef CONFIG_USER_ONLY
2889 ctx
.ir
= cpu_std_ir
;
2891 ctx
.palbr
= env
->palbr
;
2892 ctx
.ir
= (tb
->flags
& TB_FLAGS_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2895 /* ??? Every TB begins with unset rounding mode, to be initialized on
2896 the first fp insn of the TB. Alternately we could define a proper
2897 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2898 to reset the FP_STATUS to that default at the end of any TB that
2899 changes the default. We could even (gasp) dynamiclly figure out
2900 what default would be most efficient given the running program. */
2902 /* Similarly for flush-to-zero. */
2906 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2907 if (max_insns
== 0) {
2908 max_insns
= CF_COUNT_MASK
;
2910 if (max_insns
> TCG_MAX_INSNS
) {
2911 max_insns
= TCG_MAX_INSNS
;
2914 if (in_superpage(&ctx
, pc_start
)) {
2915 pc_mask
= (1ULL << 41) - 1;
2917 pc_mask
= ~TARGET_PAGE_MASK
;
2922 tcg_gen_insn_start(ctx
.pc
);
2925 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
2926 ret
= gen_excp(&ctx
, EXCP_DEBUG
, 0);
2927 /* The address covered by the breakpoint must be included in
2928 [tb->pc, tb->pc + tb->size) in order to for it to be
2929 properly cleared -- thus we increment the PC here so that
2930 the logic setting tb->size below does the right thing. */
2934 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2937 insn
= cpu_ldl_code(env
, ctx
.pc
);
2939 TCGV_UNUSED_I64(ctx
.zero
);
2940 TCGV_UNUSED_I64(ctx
.sink
);
2941 TCGV_UNUSED_I64(ctx
.lit
);
2944 ret
= translate_one(ctxp
, insn
);
2946 if (!TCGV_IS_UNUSED_I64(ctx
.sink
)) {
2947 tcg_gen_discard_i64(ctx
.sink
);
2948 tcg_temp_free(ctx
.sink
);
2950 if (!TCGV_IS_UNUSED_I64(ctx
.zero
)) {
2951 tcg_temp_free(ctx
.zero
);
2953 if (!TCGV_IS_UNUSED_I64(ctx
.lit
)) {
2954 tcg_temp_free(ctx
.lit
);
2957 /* If we reach a page boundary, are single stepping,
2958 or exhaust instruction count, stop generation. */
2960 && ((ctx
.pc
& pc_mask
) == 0
2961 || tcg_op_buf_full()
2962 || num_insns
>= max_insns
2964 || ctx
.singlestep_enabled
)) {
2965 ret
= EXIT_PC_STALE
;
2967 } while (ret
== NO_EXIT
);
2969 if (tb
->cflags
& CF_LAST_IO
) {
2978 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2980 case EXIT_PC_UPDATED
:
2981 if (ctx
.singlestep_enabled
) {
2982 gen_excp_1(EXCP_DEBUG
, 0);
2991 gen_tb_end(tb
, num_insns
);
2993 tb
->size
= ctx
.pc
- pc_start
;
2994 tb
->icount
= num_insns
;
2997 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2998 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2999 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
3005 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,