2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "sysemu/cpu-timers.h"
24 #include "disas/disas.h"
25 #include "qemu/host-utils.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/translator.h"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DISAS(...) do { } while (0)
45 typedef struct DisasContext DisasContext
;
47 DisasContextBase base
;
49 #ifndef CONFIG_USER_ONLY
55 /* implver and amask values for this CPU. */
59 /* Current rounding mode for this TB. */
61 /* Current flush-to-zero setting for this TB. */
64 /* The set of registers active in the current context. */
67 /* Temporaries for $31 and $f31 as source and destination. */
70 /* Temporary for immediate constants. */
74 /* Target-specific return values from translate_one, indicating the
75 state of the TB. Note that DISAS_NEXT indicates that we are not
77 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
78 #define DISAS_PC_UPDATED DISAS_TARGET_1
79 #define DISAS_PC_STALE DISAS_TARGET_2
81 /* global register indexes */
82 static TCGv cpu_std_ir
[31];
83 static TCGv cpu_fir
[31];
85 static TCGv cpu_lock_addr
;
86 static TCGv cpu_lock_value
;
88 #ifndef CONFIG_USER_ONLY
89 static TCGv cpu_pal_ir
[31];
92 #include "exec/gen-icount.h"
94 void alpha_translate_init(void)
96 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
98 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
99 static const GlobalVar vars
[] = {
107 /* Use the symbolic register names that match the disassembler. */
108 static const char greg_names
[31][4] = {
109 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
110 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
111 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
112 "t10", "t11", "ra", "t12", "at", "gp", "sp"
114 static const char freg_names
[31][4] = {
115 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
116 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
117 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
118 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
120 #ifndef CONFIG_USER_ONLY
121 static const char shadow_names
[8][8] = {
122 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
123 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
129 for (i
= 0; i
< 31; i
++) {
130 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
131 offsetof(CPUAlphaState
, ir
[i
]),
135 for (i
= 0; i
< 31; i
++) {
136 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
137 offsetof(CPUAlphaState
, fir
[i
]),
141 #ifndef CONFIG_USER_ONLY
142 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
143 for (i
= 0; i
< 8; i
++) {
144 int r
= (i
== 7 ? 25 : i
+ 8);
145 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
146 offsetof(CPUAlphaState
,
152 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
153 const GlobalVar
*v
= &vars
[i
];
154 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
158 static TCGv
load_zero(DisasContext
*ctx
)
161 ctx
->zero
= tcg_const_i64(0);
166 static TCGv
dest_sink(DisasContext
*ctx
)
169 ctx
->sink
= tcg_temp_new();
174 static void free_context_temps(DisasContext
*ctx
)
177 tcg_gen_discard_i64(ctx
->sink
);
178 tcg_temp_free(ctx
->sink
);
182 tcg_temp_free(ctx
->zero
);
186 tcg_temp_free(ctx
->lit
);
191 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
193 if (likely(reg
< 31)) {
196 return load_zero(ctx
);
200 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
201 uint8_t lit
, bool islit
)
204 ctx
->lit
= tcg_const_i64(lit
);
206 } else if (likely(reg
< 31)) {
209 return load_zero(ctx
);
213 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
215 if (likely(reg
< 31)) {
218 return dest_sink(ctx
);
222 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
224 if (likely(reg
< 31)) {
227 return load_zero(ctx
);
231 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
233 if (likely(reg
< 31)) {
236 return dest_sink(ctx
);
240 static int get_flag_ofs(unsigned shift
)
242 int ofs
= offsetof(CPUAlphaState
, flags
);
243 #ifdef HOST_WORDS_BIGENDIAN
244 ofs
+= 3 - (shift
/ 8);
251 static void ld_flag_byte(TCGv val
, unsigned shift
)
253 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
256 static void st_flag_byte(TCGv val
, unsigned shift
)
258 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
261 static void gen_excp_1(int exception
, int error_code
)
265 tmp1
= tcg_const_i32(exception
);
266 tmp2
= tcg_const_i32(error_code
);
267 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
268 tcg_temp_free_i32(tmp2
);
269 tcg_temp_free_i32(tmp1
);
272 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
274 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
275 gen_excp_1(exception
, error_code
);
276 return DISAS_NORETURN
;
279 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
281 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
284 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
286 TCGv_i32 tmp32
= tcg_temp_new_i32();
287 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
288 gen_helper_memory_to_f(t0
, tmp32
);
289 tcg_temp_free_i32(tmp32
);
292 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
294 TCGv tmp
= tcg_temp_new();
295 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
296 gen_helper_memory_to_g(t0
, tmp
);
300 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
302 TCGv_i32 tmp32
= tcg_temp_new_i32();
303 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
304 gen_helper_memory_to_s(t0
, tmp32
);
305 tcg_temp_free_i32(tmp32
);
308 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
310 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
311 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
312 tcg_gen_mov_i64(cpu_lock_value
, t0
);
315 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
317 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
318 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
319 tcg_gen_mov_i64(cpu_lock_value
, t0
);
322 static inline void gen_load_mem(DisasContext
*ctx
,
323 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
325 int ra
, int rb
, int32_t disp16
, bool fp
,
330 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
331 prefetches, which we can treat as nops. No worries about
332 missed exceptions here. */
333 if (unlikely(ra
== 31)) {
337 tmp
= tcg_temp_new();
338 addr
= load_gpr(ctx
, rb
);
341 tcg_gen_addi_i64(tmp
, addr
, disp16
);
345 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
349 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
350 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
355 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
357 TCGv_i32 tmp32
= tcg_temp_new_i32();
358 gen_helper_f_to_memory(tmp32
, t0
);
359 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
360 tcg_temp_free_i32(tmp32
);
363 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
365 TCGv tmp
= tcg_temp_new();
366 gen_helper_g_to_memory(tmp
, t0
);
367 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
371 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
373 TCGv_i32 tmp32
= tcg_temp_new_i32();
374 gen_helper_s_to_memory(tmp32
, t0
);
375 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
376 tcg_temp_free_i32(tmp32
);
379 static inline void gen_store_mem(DisasContext
*ctx
,
380 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
382 int ra
, int rb
, int32_t disp16
, bool fp
,
387 tmp
= tcg_temp_new();
388 addr
= load_gpr(ctx
, rb
);
391 tcg_gen_addi_i64(tmp
, addr
, disp16
);
395 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
399 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
400 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
405 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
406 int32_t disp16
, int mem_idx
,
409 TCGLabel
*lab_fail
, *lab_done
;
412 addr
= tcg_temp_new_i64();
413 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
414 free_context_temps(ctx
);
416 lab_fail
= gen_new_label();
417 lab_done
= gen_new_label();
418 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
419 tcg_temp_free_i64(addr
);
421 val
= tcg_temp_new_i64();
422 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
423 load_gpr(ctx
, ra
), mem_idx
, op
);
424 free_context_temps(ctx
);
427 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
429 tcg_temp_free_i64(val
);
430 tcg_gen_br(lab_done
);
432 gen_set_label(lab_fail
);
434 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
437 gen_set_label(lab_done
);
438 tcg_gen_movi_i64(cpu_lock_addr
, -1);
442 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
444 #ifndef CONFIG_USER_ONLY
445 return ((ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0
446 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
447 && ((addr
>> 41) & 3) == 2);
453 static bool use_exit_tb(DisasContext
*ctx
)
455 return ((tb_cflags(ctx
->base
.tb
) & CF_LAST_IO
)
456 || ctx
->base
.singlestep_enabled
460 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
462 /* Suppress goto_tb in the case of single-steping and IO. */
463 if (unlikely(use_exit_tb(ctx
))) {
466 #ifndef CONFIG_USER_ONLY
467 /* If the destination is in the superpage, the page perms can't change. */
468 if (in_superpage(ctx
, dest
)) {
471 /* Check for the dest on the same page as the start of the TB. */
472 return ((ctx
->base
.tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
478 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
480 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
483 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
486 /* Notice branch-to-next; used to initialize RA with the PC. */
489 } else if (use_goto_tb(ctx
, dest
)) {
491 tcg_gen_movi_i64(cpu_pc
, dest
);
492 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
493 return DISAS_NORETURN
;
495 tcg_gen_movi_i64(cpu_pc
, dest
);
496 return DISAS_PC_UPDATED
;
500 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
501 TCGv cmp
, int32_t disp
)
503 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
504 TCGLabel
*lab_true
= gen_new_label();
506 if (use_goto_tb(ctx
, dest
)) {
507 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
510 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
511 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
513 gen_set_label(lab_true
);
515 tcg_gen_movi_i64(cpu_pc
, dest
);
516 tcg_gen_exit_tb(ctx
->base
.tb
, 1);
518 return DISAS_NORETURN
;
520 TCGv_i64 z
= tcg_const_i64(0);
521 TCGv_i64 d
= tcg_const_i64(dest
);
522 TCGv_i64 p
= tcg_const_i64(ctx
->base
.pc_next
);
524 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
526 tcg_temp_free_i64(z
);
527 tcg_temp_free_i64(d
);
528 tcg_temp_free_i64(p
);
529 return DISAS_PC_UPDATED
;
533 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
534 int32_t disp
, int mask
)
537 TCGv tmp
= tcg_temp_new();
540 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
541 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
545 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
548 /* Fold -0.0 for comparison with COND. */
550 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
552 uint64_t mzero
= 1ull << 63;
557 /* For <= or >, the -0.0 value directly compares the way we want. */
558 tcg_gen_mov_i64(dest
, src
);
563 /* For == or !=, we can simply mask off the sign bit and compare. */
564 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
569 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
570 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
571 tcg_gen_neg_i64(dest
, dest
);
572 tcg_gen_and_i64(dest
, dest
, src
);
580 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
583 TCGv cmp_tmp
= tcg_temp_new();
586 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
587 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
588 tcg_temp_free(cmp_tmp
);
592 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
597 vb
= load_fpr(ctx
, rb
);
599 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
601 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
606 #define QUAL_RM_N 0x080 /* Round mode nearest even */
607 #define QUAL_RM_C 0x000 /* Round mode chopped */
608 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
609 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
610 #define QUAL_RM_MASK 0x0c0
612 #define QUAL_U 0x100 /* Underflow enable (fp output) */
613 #define QUAL_V 0x100 /* Overflow enable (int output) */
614 #define QUAL_S 0x400 /* Software completion enable */
615 #define QUAL_I 0x200 /* Inexact detection enable */
617 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
621 fn11
&= QUAL_RM_MASK
;
622 if (fn11
== ctx
->tb_rm
) {
627 tmp
= tcg_temp_new_i32();
630 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
633 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
636 tcg_gen_movi_i32(tmp
, float_round_down
);
639 tcg_gen_ld8u_i32(tmp
, cpu_env
,
640 offsetof(CPUAlphaState
, fpcr_dyn_round
));
644 #if defined(CONFIG_SOFTFLOAT_INLINE)
645 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
646 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
647 sets the one field. */
648 tcg_gen_st8_i32(tmp
, cpu_env
,
649 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
651 gen_helper_setroundmode(tmp
);
654 tcg_temp_free_i32(tmp
);
657 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
662 if (fn11
== ctx
->tb_ftz
) {
667 tmp
= tcg_temp_new_i32();
669 /* Underflow is enabled, use the FPCR setting. */
670 tcg_gen_ld8u_i32(tmp
, cpu_env
,
671 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
673 /* Underflow is disabled, force flush-to-zero. */
674 tcg_gen_movi_i32(tmp
, 1);
677 #if defined(CONFIG_SOFTFLOAT_INLINE)
678 tcg_gen_st8_i32(tmp
, cpu_env
,
679 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
681 gen_helper_setflushzero(tmp
);
684 tcg_temp_free_i32(tmp
);
687 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
691 if (unlikely(reg
== 31)) {
692 val
= load_zero(ctx
);
695 if ((fn11
& QUAL_S
) == 0) {
697 gen_helper_ieee_input_cmp(cpu_env
, val
);
699 gen_helper_ieee_input(cpu_env
, val
);
702 #ifndef CONFIG_USER_ONLY
703 /* In system mode, raise exceptions for denormals like real
704 hardware. In user mode, proceed as if the OS completion
705 handler is handling the denormal as per spec. */
706 gen_helper_ieee_input_s(cpu_env
, val
);
713 static void gen_fp_exc_raise(int rc
, int fn11
)
715 /* ??? We ought to be able to do something with imprecise exceptions.
716 E.g. notice we're still in the trap shadow of something within the
717 TB and do not generate the code to signal the exception; end the TB
718 when an exception is forced to arrive, either by consumption of a
719 register value or TRAPB or EXCB. */
723 if (!(fn11
& QUAL_U
)) {
724 /* Note that QUAL_U == QUAL_V, so ignore either. */
725 ignore
|= FPCR_UNF
| FPCR_IOV
;
727 if (!(fn11
& QUAL_I
)) {
730 ign
= tcg_const_i32(ignore
);
732 /* ??? Pass in the regno of the destination so that the helper can
733 set EXC_MASK, which contains a bitmask of destination registers
734 that have caused arithmetic traps. A simple userspace emulation
735 does not require this. We do need it for a guest kernel's entArith,
736 or if we were to do something clever with imprecise exceptions. */
737 reg
= tcg_const_i32(rc
+ 32);
739 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
741 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
744 tcg_temp_free_i32(reg
);
745 tcg_temp_free_i32(ign
);
748 static void gen_cvtlq(TCGv vc
, TCGv vb
)
750 TCGv tmp
= tcg_temp_new();
752 /* The arithmetic right shift here, plus the sign-extended mask below
753 yields a sign-extended result without an explicit ext32s_i64. */
754 tcg_gen_shri_i64(tmp
, vb
, 29);
755 tcg_gen_sari_i64(vc
, vb
, 32);
756 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
761 static void gen_ieee_arith2(DisasContext
*ctx
,
762 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
763 int rb
, int rc
, int fn11
)
767 gen_qual_roundmode(ctx
, fn11
);
768 gen_qual_flushzero(ctx
, fn11
);
770 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
771 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
773 gen_fp_exc_raise(rc
, fn11
);
776 #define IEEE_ARITH2(name) \
777 static inline void glue(gen_, name)(DisasContext *ctx, \
778 int rb, int rc, int fn11) \
780 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
787 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
791 /* No need to set flushzero, since we have an integer output. */
792 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
793 vc
= dest_fpr(ctx
, rc
);
795 /* Almost all integer conversions use cropped rounding;
796 special case that. */
797 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
798 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
800 gen_qual_roundmode(ctx
, fn11
);
801 gen_helper_cvttq(vc
, cpu_env
, vb
);
803 gen_fp_exc_raise(rc
, fn11
);
806 static void gen_ieee_intcvt(DisasContext
*ctx
,
807 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
808 int rb
, int rc
, int fn11
)
812 gen_qual_roundmode(ctx
, fn11
);
813 vb
= load_fpr(ctx
, rb
);
814 vc
= dest_fpr(ctx
, rc
);
816 /* The only exception that can be raised by integer conversion
817 is inexact. Thus we only need to worry about exceptions when
818 inexact handling is requested. */
820 helper(vc
, cpu_env
, vb
);
821 gen_fp_exc_raise(rc
, fn11
);
823 helper(vc
, cpu_env
, vb
);
827 #define IEEE_INTCVT(name) \
828 static inline void glue(gen_, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
831 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
836 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
838 TCGv vmask
= tcg_const_i64(mask
);
839 TCGv tmp
= tcg_temp_new_i64();
842 tcg_gen_andc_i64(tmp
, vmask
, va
);
844 tcg_gen_and_i64(tmp
, va
, vmask
);
847 tcg_gen_andc_i64(vc
, vb
, vmask
);
848 tcg_gen_or_i64(vc
, vc
, tmp
);
850 tcg_temp_free(vmask
);
854 static void gen_ieee_arith3(DisasContext
*ctx
,
855 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
856 int ra
, int rb
, int rc
, int fn11
)
860 gen_qual_roundmode(ctx
, fn11
);
861 gen_qual_flushzero(ctx
, fn11
);
863 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
864 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
865 vc
= dest_fpr(ctx
, rc
);
866 helper(vc
, cpu_env
, va
, vb
);
868 gen_fp_exc_raise(rc
, fn11
);
871 #define IEEE_ARITH3(name) \
872 static inline void glue(gen_, name)(DisasContext *ctx, \
873 int ra, int rb, int rc, int fn11) \
875 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
886 static void gen_ieee_compare(DisasContext
*ctx
,
887 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
888 int ra
, int rb
, int rc
, int fn11
)
892 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
893 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
894 vc
= dest_fpr(ctx
, rc
);
895 helper(vc
, cpu_env
, va
, vb
);
897 gen_fp_exc_raise(rc
, fn11
);
900 #define IEEE_CMP3(name) \
901 static inline void glue(gen_, name)(DisasContext *ctx, \
902 int ra, int rb, int rc, int fn11) \
904 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
911 static inline uint64_t zapnot_mask(uint8_t lit
)
916 for (i
= 0; i
< 8; ++i
) {
917 if ((lit
>> i
) & 1) {
918 mask
|= 0xffull
<< (i
* 8);
924 /* Implement zapnot with an immediate operand, which expands to some
925 form of immediate AND. This is a basic building block in the
926 definition of many of the other byte manipulation instructions. */
927 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
931 tcg_gen_movi_i64(dest
, 0);
934 tcg_gen_ext8u_i64(dest
, src
);
937 tcg_gen_ext16u_i64(dest
, src
);
940 tcg_gen_ext32u_i64(dest
, src
);
943 tcg_gen_mov_i64(dest
, src
);
946 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
951 /* EXTWH, EXTLH, EXTQH */
952 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
953 uint8_t lit
, uint8_t byte_mask
)
956 int pos
= (64 - lit
* 8) & 0x3f;
957 int len
= cto32(byte_mask
) * 8;
959 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
961 tcg_gen_movi_i64(vc
, 0);
964 TCGv tmp
= tcg_temp_new();
965 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
966 tcg_gen_neg_i64(tmp
, tmp
);
967 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
968 tcg_gen_shl_i64(vc
, va
, tmp
);
971 gen_zapnoti(vc
, vc
, byte_mask
);
974 /* EXTBL, EXTWL, EXTLL, EXTQL */
975 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
976 uint8_t lit
, uint8_t byte_mask
)
979 int pos
= (lit
& 7) * 8;
980 int len
= cto32(byte_mask
) * 8;
981 if (pos
+ len
>= 64) {
984 tcg_gen_extract_i64(vc
, va
, pos
, len
);
986 TCGv tmp
= tcg_temp_new();
987 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
988 tcg_gen_shli_i64(tmp
, tmp
, 3);
989 tcg_gen_shr_i64(vc
, va
, tmp
);
991 gen_zapnoti(vc
, vc
, byte_mask
);
995 /* INSWH, INSLH, INSQH */
996 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
997 uint8_t lit
, uint8_t byte_mask
)
1000 int pos
= 64 - (lit
& 7) * 8;
1001 int len
= cto32(byte_mask
) * 8;
1003 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
1005 tcg_gen_movi_i64(vc
, 0);
1008 TCGv tmp
= tcg_temp_new();
1009 TCGv shift
= tcg_temp_new();
1011 /* The instruction description has us left-shift the byte mask
1012 and extract bits <15:8> and apply that zap at the end. This
1013 is equivalent to simply performing the zap first and shifting
1015 gen_zapnoti(tmp
, va
, byte_mask
);
1017 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1018 portably by splitting the shift into two parts: shift_count-1 and 1.
1019 Arrange for the -1 by using ones-complement instead of
1020 twos-complement in the negation: ~(B * 8) & 63. */
1022 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1023 tcg_gen_not_i64(shift
, shift
);
1024 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1026 tcg_gen_shr_i64(vc
, tmp
, shift
);
1027 tcg_gen_shri_i64(vc
, vc
, 1);
1028 tcg_temp_free(shift
);
1033 /* INSBL, INSWL, INSLL, INSQL */
1034 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1035 uint8_t lit
, uint8_t byte_mask
)
1038 int pos
= (lit
& 7) * 8;
1039 int len
= cto32(byte_mask
) * 8;
1040 if (pos
+ len
> 64) {
1043 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1045 TCGv tmp
= tcg_temp_new();
1046 TCGv shift
= tcg_temp_new();
1048 /* The instruction description has us left-shift the byte mask
1049 and extract bits <15:8> and apply that zap at the end. This
1050 is equivalent to simply performing the zap first and shifting
1052 gen_zapnoti(tmp
, va
, byte_mask
);
1054 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1055 tcg_gen_shli_i64(shift
, shift
, 3);
1056 tcg_gen_shl_i64(vc
, tmp
, shift
);
1057 tcg_temp_free(shift
);
1062 /* MSKWH, MSKLH, MSKQH */
1063 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1064 uint8_t lit
, uint8_t byte_mask
)
1067 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1069 TCGv shift
= tcg_temp_new();
1070 TCGv mask
= tcg_temp_new();
1072 /* The instruction description is as above, where the byte_mask
1073 is shifted left, and then we extract bits <15:8>. This can be
1074 emulated with a right-shift on the expanded byte mask. This
1075 requires extra care because for an input <2:0> == 0 we need a
1076 shift of 64 bits in order to generate a zero. This is done by
1077 splitting the shift into two parts, the variable shift - 1
1078 followed by a constant 1 shift. The code we expand below is
1079 equivalent to ~(B * 8) & 63. */
1081 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1082 tcg_gen_not_i64(shift
, shift
);
1083 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1084 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1085 tcg_gen_shr_i64(mask
, mask
, shift
);
1086 tcg_gen_shri_i64(mask
, mask
, 1);
1088 tcg_gen_andc_i64(vc
, va
, mask
);
1090 tcg_temp_free(mask
);
1091 tcg_temp_free(shift
);
1095 /* MSKBL, MSKWL, MSKLL, MSKQL */
1096 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1097 uint8_t lit
, uint8_t byte_mask
)
1100 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1102 TCGv shift
= tcg_temp_new();
1103 TCGv mask
= tcg_temp_new();
1105 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1106 tcg_gen_shli_i64(shift
, shift
, 3);
1107 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1108 tcg_gen_shl_i64(mask
, mask
, shift
);
1110 tcg_gen_andc_i64(vc
, va
, mask
);
1112 tcg_temp_free(mask
);
1113 tcg_temp_free(shift
);
1117 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1122 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1125 tmp
= tcg_const_i64(set
);
1126 st_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1130 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1132 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1133 to internal cpu registers. */
1135 /* Unprivileged PAL call */
1136 if (palcode
>= 0x80 && palcode
< 0xC0) {
1140 /* No-op inside QEMU. */
1144 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1145 offsetof(CPUAlphaState
, unique
));
1149 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1150 offsetof(CPUAlphaState
, unique
));
1159 #ifndef CONFIG_USER_ONLY
1160 /* Privileged PAL code */
1161 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1165 /* No-op inside QEMU. */
1169 /* No-op inside QEMU. */
1173 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1174 offsetof(CPUAlphaState
, vptptr
));
1178 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1179 offsetof(CPUAlphaState
, sysval
));
1183 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1184 offsetof(CPUAlphaState
, sysval
));
1189 /* Note that we already know we're in kernel mode, so we know
1190 that PS only contains the 3 IPL bits. */
1191 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1193 /* But make sure and store only the 3 IPL bits from the user. */
1195 TCGv tmp
= tcg_temp_new();
1196 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1197 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1201 /* Allow interrupts to be recognized right away. */
1202 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1203 return DISAS_PC_UPDATED_NOCHAIN
;
1207 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1212 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1213 offsetof(CPUAlphaState
, usp
));
1217 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1218 offsetof(CPUAlphaState
, usp
));
1222 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1223 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1229 TCGv_i32 tmp
= tcg_const_i32(1);
1230 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1231 offsetof(CPUState
, halted
));
1232 tcg_temp_free_i32(tmp
);
1234 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1235 return gen_excp(ctx
, EXCP_HALTED
, 0);
1244 return gen_invalid(ctx
);
1247 #ifdef CONFIG_USER_ONLY
1248 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1251 TCGv tmp
= tcg_temp_new();
1252 uint64_t exc_addr
= ctx
->base
.pc_next
;
1253 uint64_t entry
= ctx
->palbr
;
1255 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1258 tcg_gen_movi_i64(tmp
, 1);
1259 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1262 tcg_gen_movi_i64(tmp
, exc_addr
);
1263 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1266 entry
+= (palcode
& 0x80
1267 ? 0x2000 + (palcode
- 0x80) * 64
1268 : 0x1000 + palcode
* 64);
1270 /* Since the destination is running in PALmode, we don't really
1271 need the page permissions check. We'll see the existence of
1272 the page when we create the TB, and we'll flush all TBs if
1273 we change the PAL base register. */
1274 if (!use_exit_tb(ctx
)) {
1276 tcg_gen_movi_i64(cpu_pc
, entry
);
1277 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
1278 return DISAS_NORETURN
;
1280 tcg_gen_movi_i64(cpu_pc
, entry
);
1281 return DISAS_PC_UPDATED
;
1287 #ifndef CONFIG_USER_ONLY
1289 #define PR_LONG 0x200000
1291 static int cpu_pr_data(int pr
)
1294 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1295 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1296 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1297 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1298 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1299 case 7: return offsetof(CPUAlphaState
, palbr
);
1300 case 8: return offsetof(CPUAlphaState
, ptbr
);
1301 case 9: return offsetof(CPUAlphaState
, vptptr
);
1302 case 10: return offsetof(CPUAlphaState
, unique
);
1303 case 11: return offsetof(CPUAlphaState
, sysval
);
1304 case 12: return offsetof(CPUAlphaState
, usp
);
1307 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1310 return offsetof(CPUAlphaState
, alarm_expire
);
1315 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1317 void (*helper
)(TCGv
);
1322 /* Accessing the "non-shadow" general registers. */
1323 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1324 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1327 case 250: /* WALLTIME */
1328 helper
= gen_helper_get_walltime
;
1330 case 249: /* VMTIME */
1331 helper
= gen_helper_get_vmtime
;
1333 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1336 return DISAS_PC_STALE
;
1343 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1346 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1350 /* The basic registers are data only, and unknown registers
1351 are read-zero, write-ignore. */
1352 data
= cpu_pr_data(regno
);
1354 tcg_gen_movi_i64(va
, 0);
1355 } else if (data
& PR_LONG
) {
1356 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1358 tcg_gen_ld_i64(va
, cpu_env
, data
);
1366 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1369 DisasJumpType ret
= DISAS_NEXT
;
1374 gen_helper_tbia(cpu_env
);
1379 gen_helper_tbis(cpu_env
, vb
);
1385 TCGv_i32 tmp
= tcg_const_i32(1);
1386 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1387 offsetof(CPUState
, halted
));
1388 tcg_temp_free_i32(tmp
);
1390 return gen_excp(ctx
, EXCP_HALTED
, 0);
1394 gen_helper_halt(vb
);
1395 return DISAS_PC_STALE
;
1399 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1401 ret
= DISAS_PC_STALE
;
1403 gen_helper_set_alarm(cpu_env
, vb
);
1408 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1409 /* Changing the PAL base register implies un-chaining all of the TBs
1410 that ended with a CALL_PAL. Since the base register usually only
1411 changes during boot, flushing everything works well. */
1412 gen_helper_tb_flush(cpu_env
);
1413 return DISAS_PC_STALE
;
1416 /* Accessing the "non-shadow" general registers. */
1417 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1418 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1422 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1425 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1429 /* The basic registers are data only, and unknown registers
1430 are read-zero, write-ignore. */
1431 data
= cpu_pr_data(regno
);
1433 if (data
& PR_LONG
) {
1434 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1436 tcg_gen_st_i64(vb
, cpu_env
, data
);
1444 #endif /* !USER_ONLY*/
1446 #define REQUIRE_NO_LIT \
1453 #define REQUIRE_AMASK(FLAG) \
1455 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1460 #define REQUIRE_TB_FLAG(FLAG) \
1462 if ((ctx->tbflags & (FLAG)) == 0) { \
1467 #define REQUIRE_REG_31(WHICH) \
1469 if (WHICH != 31) { \
1474 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1476 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1478 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1479 bool islit
, real_islit
;
1480 TCGv va
, vb
, vc
, tmp
, tmp2
;
1484 /* Decode all instruction fields */
1485 opc
= extract32(insn
, 26, 6);
1486 ra
= extract32(insn
, 21, 5);
1487 rb
= extract32(insn
, 16, 5);
1488 rc
= extract32(insn
, 0, 5);
1489 real_islit
= islit
= extract32(insn
, 12, 1);
1490 lit
= extract32(insn
, 13, 8);
1492 disp21
= sextract32(insn
, 0, 21);
1493 disp16
= sextract32(insn
, 0, 16);
1494 disp12
= sextract32(insn
, 0, 12);
1496 fn11
= extract32(insn
, 5, 11);
1497 fpfn
= extract32(insn
, 5, 6);
1498 fn7
= extract32(insn
, 5, 7);
1500 if (rb
== 31 && !islit
) {
1509 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1535 disp16
= (uint32_t)disp16
<< 16;
1539 va
= dest_gpr(ctx
, ra
);
1540 /* It's worth special-casing immediate loads. */
1542 tcg_gen_movi_i64(va
, disp16
);
1544 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1551 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1555 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1560 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1565 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1570 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1574 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1578 vc
= dest_gpr(ctx
, rc
);
1579 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1583 /* Special case ADDL as SEXTL. */
1584 tcg_gen_ext32s_i64(vc
, vb
);
1588 /* Special case SUBQ as NEGQ. */
1589 tcg_gen_neg_i64(vc
, vb
);
1594 va
= load_gpr(ctx
, ra
);
1598 tcg_gen_add_i64(vc
, va
, vb
);
1599 tcg_gen_ext32s_i64(vc
, vc
);
1603 tmp
= tcg_temp_new();
1604 tcg_gen_shli_i64(tmp
, va
, 2);
1605 tcg_gen_add_i64(tmp
, tmp
, vb
);
1606 tcg_gen_ext32s_i64(vc
, tmp
);
1611 tcg_gen_sub_i64(vc
, va
, vb
);
1612 tcg_gen_ext32s_i64(vc
, vc
);
1616 tmp
= tcg_temp_new();
1617 tcg_gen_shli_i64(tmp
, va
, 2);
1618 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1619 tcg_gen_ext32s_i64(vc
, tmp
);
1625 /* Special case 0 >= X as X == 0. */
1626 gen_helper_cmpbe0(vc
, vb
);
1628 gen_helper_cmpbge(vc
, va
, vb
);
1633 tmp
= tcg_temp_new();
1634 tcg_gen_shli_i64(tmp
, va
, 3);
1635 tcg_gen_add_i64(tmp
, tmp
, vb
);
1636 tcg_gen_ext32s_i64(vc
, tmp
);
1641 tmp
= tcg_temp_new();
1642 tcg_gen_shli_i64(tmp
, va
, 3);
1643 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1644 tcg_gen_ext32s_i64(vc
, tmp
);
1649 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1653 tcg_gen_add_i64(vc
, va
, vb
);
1657 tmp
= tcg_temp_new();
1658 tcg_gen_shli_i64(tmp
, va
, 2);
1659 tcg_gen_add_i64(vc
, tmp
, vb
);
1664 tcg_gen_sub_i64(vc
, va
, vb
);
1668 tmp
= tcg_temp_new();
1669 tcg_gen_shli_i64(tmp
, va
, 2);
1670 tcg_gen_sub_i64(vc
, tmp
, vb
);
1675 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1679 tmp
= tcg_temp_new();
1680 tcg_gen_shli_i64(tmp
, va
, 3);
1681 tcg_gen_add_i64(vc
, tmp
, vb
);
1686 tmp
= tcg_temp_new();
1687 tcg_gen_shli_i64(tmp
, va
, 3);
1688 tcg_gen_sub_i64(vc
, tmp
, vb
);
1693 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1697 tmp
= tcg_temp_new();
1698 tcg_gen_ext32s_i64(tmp
, va
);
1699 tcg_gen_ext32s_i64(vc
, vb
);
1700 tcg_gen_add_i64(tmp
, tmp
, vc
);
1701 tcg_gen_ext32s_i64(vc
, tmp
);
1702 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1707 tmp
= tcg_temp_new();
1708 tcg_gen_ext32s_i64(tmp
, va
);
1709 tcg_gen_ext32s_i64(vc
, vb
);
1710 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1711 tcg_gen_ext32s_i64(vc
, tmp
);
1712 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1717 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1721 tmp
= tcg_temp_new();
1722 tmp2
= tcg_temp_new();
1723 tcg_gen_eqv_i64(tmp
, va
, vb
);
1724 tcg_gen_mov_i64(tmp2
, va
);
1725 tcg_gen_add_i64(vc
, va
, vb
);
1726 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1727 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1728 tcg_gen_shri_i64(tmp
, tmp
, 63);
1729 tcg_gen_movi_i64(tmp2
, 0);
1730 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1732 tcg_temp_free(tmp2
);
1736 tmp
= tcg_temp_new();
1737 tmp2
= tcg_temp_new();
1738 tcg_gen_xor_i64(tmp
, va
, vb
);
1739 tcg_gen_mov_i64(tmp2
, va
);
1740 tcg_gen_sub_i64(vc
, va
, vb
);
1741 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1742 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1743 tcg_gen_shri_i64(tmp
, tmp
, 63);
1744 tcg_gen_movi_i64(tmp2
, 0);
1745 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1747 tcg_temp_free(tmp2
);
1751 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1761 /* Special case BIS as NOP. */
1765 /* Special case BIS as MOV. */
1766 vc
= dest_gpr(ctx
, rc
);
1768 tcg_gen_movi_i64(vc
, lit
);
1770 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1776 vc
= dest_gpr(ctx
, rc
);
1777 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1779 if (fn7
== 0x28 && ra
== 31) {
1780 /* Special case ORNOT as NOT. */
1781 tcg_gen_not_i64(vc
, vb
);
1785 va
= load_gpr(ctx
, ra
);
1789 tcg_gen_and_i64(vc
, va
, vb
);
1793 tcg_gen_andc_i64(vc
, va
, vb
);
1797 tmp
= tcg_temp_new();
1798 tcg_gen_andi_i64(tmp
, va
, 1);
1799 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1800 vb
, load_gpr(ctx
, rc
));
1805 tmp
= tcg_temp_new();
1806 tcg_gen_andi_i64(tmp
, va
, 1);
1807 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1808 vb
, load_gpr(ctx
, rc
));
1813 tcg_gen_or_i64(vc
, va
, vb
);
1817 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1818 vb
, load_gpr(ctx
, rc
));
1822 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1823 vb
, load_gpr(ctx
, rc
));
1827 tcg_gen_orc_i64(vc
, va
, vb
);
1831 tcg_gen_xor_i64(vc
, va
, vb
);
1835 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1836 vb
, load_gpr(ctx
, rc
));
1840 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1841 vb
, load_gpr(ctx
, rc
));
1845 tcg_gen_eqv_i64(vc
, va
, vb
);
1850 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1854 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1855 vb
, load_gpr(ctx
, rc
));
1859 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1860 vb
, load_gpr(ctx
, rc
));
1865 tcg_gen_movi_i64(vc
, ctx
->implver
);
1873 vc
= dest_gpr(ctx
, rc
);
1874 va
= load_gpr(ctx
, ra
);
1878 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1882 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1886 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1890 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1894 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1898 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1902 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1906 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1910 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1915 gen_zapnoti(vc
, va
, ~lit
);
1917 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1923 gen_zapnoti(vc
, va
, lit
);
1925 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1930 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1935 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1937 tmp
= tcg_temp_new();
1938 vb
= load_gpr(ctx
, rb
);
1939 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1940 tcg_gen_shr_i64(vc
, va
, tmp
);
1946 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1951 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1953 tmp
= tcg_temp_new();
1954 vb
= load_gpr(ctx
, rb
);
1955 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1956 tcg_gen_shl_i64(vc
, va
, tmp
);
1962 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1967 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1969 tmp
= tcg_temp_new();
1970 vb
= load_gpr(ctx
, rb
);
1971 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1972 tcg_gen_sar_i64(vc
, va
, tmp
);
1978 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1982 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1986 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1990 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1994 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1998 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
2002 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2006 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2010 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2018 vc
= dest_gpr(ctx
, rc
);
2019 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2020 va
= load_gpr(ctx
, ra
);
2024 tcg_gen_mul_i64(vc
, va
, vb
);
2025 tcg_gen_ext32s_i64(vc
, vc
);
2029 tcg_gen_mul_i64(vc
, va
, vb
);
2033 tmp
= tcg_temp_new();
2034 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
2039 tmp
= tcg_temp_new();
2040 tcg_gen_ext32s_i64(tmp
, va
);
2041 tcg_gen_ext32s_i64(vc
, vb
);
2042 tcg_gen_mul_i64(tmp
, tmp
, vc
);
2043 tcg_gen_ext32s_i64(vc
, tmp
);
2044 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2049 tmp
= tcg_temp_new();
2050 tmp2
= tcg_temp_new();
2051 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2052 tcg_gen_sari_i64(tmp2
, vc
, 63);
2053 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2055 tcg_temp_free(tmp2
);
2064 vc
= dest_fpr(ctx
, rc
);
2065 switch (fpfn
) { /* fn11 & 0x3F */
2069 t32
= tcg_temp_new_i32();
2070 va
= load_gpr(ctx
, ra
);
2071 tcg_gen_extrl_i64_i32(t32
, va
);
2072 gen_helper_memory_to_s(vc
, t32
);
2073 tcg_temp_free_i32(t32
);
2078 vb
= load_fpr(ctx
, rb
);
2079 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2084 gen_sqrts(ctx
, rb
, rc
, fn11
);
2089 t32
= tcg_temp_new_i32();
2090 va
= load_gpr(ctx
, ra
);
2091 tcg_gen_extrl_i64_i32(t32
, va
);
2092 gen_helper_memory_to_f(vc
, t32
);
2093 tcg_temp_free_i32(t32
);
2098 va
= load_gpr(ctx
, ra
);
2099 tcg_gen_mov_i64(vc
, va
);
2104 vb
= load_fpr(ctx
, rb
);
2105 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2110 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2118 /* VAX floating point */
2119 /* XXX: rounding mode and trap are ignored (!) */
2120 vc
= dest_fpr(ctx
, rc
);
2121 vb
= load_fpr(ctx
, rb
);
2122 va
= load_fpr(ctx
, ra
);
2123 switch (fpfn
) { /* fn11 & 0x3F */
2126 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2130 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2134 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2138 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2146 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2150 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2154 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2158 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2162 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2166 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2170 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2175 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2184 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2189 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2194 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2202 /* IEEE floating-point */
2203 switch (fpfn
) { /* fn11 & 0x3F */
2206 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2210 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2214 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2218 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2222 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2226 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2230 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2234 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2238 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2242 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2246 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2250 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2254 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2256 gen_cvtst(ctx
, rb
, rc
, fn11
);
2259 gen_cvtts(ctx
, rb
, rc
, fn11
);
2265 gen_cvttq(ctx
, rb
, rc
, fn11
);
2270 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2275 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2287 vc
= dest_fpr(ctx
, rc
);
2288 vb
= load_fpr(ctx
, rb
);
2294 /* Special case CPYS as FNOP. */
2296 vc
= dest_fpr(ctx
, rc
);
2297 va
= load_fpr(ctx
, ra
);
2299 /* Special case CPYS as FMOV. */
2300 tcg_gen_mov_i64(vc
, va
);
2302 vb
= load_fpr(ctx
, rb
);
2303 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2309 vc
= dest_fpr(ctx
, rc
);
2310 vb
= load_fpr(ctx
, rb
);
2311 va
= load_fpr(ctx
, ra
);
2312 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2316 vc
= dest_fpr(ctx
, rc
);
2317 vb
= load_fpr(ctx
, rb
);
2318 va
= load_fpr(ctx
, ra
);
2319 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2323 va
= load_fpr(ctx
, ra
);
2324 gen_helper_store_fpcr(cpu_env
, va
);
2325 if (ctx
->tb_rm
== QUAL_RM_D
) {
2326 /* Re-do the copy of the rounding mode to fp_status
2327 the next time we use dynamic rounding. */
2333 va
= dest_fpr(ctx
, ra
);
2334 gen_helper_load_fpcr(va
, cpu_env
);
2338 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2342 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2346 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2350 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2354 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2358 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2360 case 0x030: /* CVTQL */
2361 case 0x130: /* CVTQL/V */
2362 case 0x530: /* CVTQL/SV */
2364 vc
= dest_fpr(ctx
, rc
);
2365 vb
= load_fpr(ctx
, rb
);
2366 gen_helper_cvtql(vc
, cpu_env
, vb
);
2367 gen_fp_exc_raise(rc
, fn11
);
2375 switch ((uint16_t)disp16
) {
2386 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2390 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2402 va
= dest_gpr(ctx
, ra
);
2403 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2405 gen_helper_load_pcc(va
, cpu_env
);
2406 ret
= DISAS_PC_STALE
;
2408 gen_helper_load_pcc(va
, cpu_env
);
2436 /* HW_MFPR (PALcode) */
2437 #ifndef CONFIG_USER_ONLY
2438 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2439 va
= dest_gpr(ctx
, ra
);
2440 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2447 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2448 prediction stack action, which of course we don't implement. */
2449 vb
= load_gpr(ctx
, rb
);
2450 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2452 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2454 ret
= DISAS_PC_UPDATED
;
2458 /* HW_LD (PALcode) */
2459 #ifndef CONFIG_USER_ONLY
2460 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2462 TCGv addr
= tcg_temp_new();
2463 vb
= load_gpr(ctx
, rb
);
2464 va
= dest_gpr(ctx
, ra
);
2466 tcg_gen_addi_i64(addr
, vb
, disp12
);
2467 switch ((insn
>> 12) & 0xF) {
2469 /* Longword physical access (hw_ldl/p) */
2470 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2473 /* Quadword physical access (hw_ldq/p) */
2474 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2477 /* Longword physical access with lock (hw_ldl_l/p) */
2478 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2481 /* Quadword physical access with lock (hw_ldq_l/p) */
2482 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2485 /* Longword virtual PTE fetch (hw_ldl/v) */
2488 /* Quadword virtual PTE fetch (hw_ldq/v) */
2498 /* Longword virtual access (hw_ldl) */
2501 /* Quadword virtual access (hw_ldq) */
2504 /* Longword virtual access with protection check (hw_ldl/w) */
2505 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2508 /* Quadword virtual access with protection check (hw_ldq/w) */
2509 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2512 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2515 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2518 /* Longword virtual access with alternate access mode and
2519 protection checks (hw_ldl/wa) */
2520 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2523 /* Quadword virtual access with alternate access mode and
2524 protection checks (hw_ldq/wa) */
2525 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2528 tcg_temp_free(addr
);
2536 vc
= dest_gpr(ctx
, rc
);
2541 va
= load_fpr(ctx
, ra
);
2542 tcg_gen_mov_i64(vc
, va
);
2544 } else if (fn7
== 0x78) {
2548 t32
= tcg_temp_new_i32();
2549 va
= load_fpr(ctx
, ra
);
2550 gen_helper_s_to_memory(t32
, va
);
2551 tcg_gen_ext_i32_i64(vc
, t32
);
2552 tcg_temp_free_i32(t32
);
2556 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2562 tcg_gen_ext8s_i64(vc
, vb
);
2568 tcg_gen_ext16s_i64(vc
, vb
);
2575 tcg_gen_ctpop_i64(vc
, vb
);
2581 va
= load_gpr(ctx
, ra
);
2582 gen_helper_perr(vc
, va
, vb
);
2589 tcg_gen_clzi_i64(vc
, vb
, 64);
2596 tcg_gen_ctzi_i64(vc
, vb
, 64);
2603 gen_helper_unpkbw(vc
, vb
);
2610 gen_helper_unpkbl(vc
, vb
);
2617 gen_helper_pkwb(vc
, vb
);
2624 gen_helper_pklb(vc
, vb
);
2629 va
= load_gpr(ctx
, ra
);
2630 gen_helper_minsb8(vc
, va
, vb
);
2635 va
= load_gpr(ctx
, ra
);
2636 gen_helper_minsw4(vc
, va
, vb
);
2641 va
= load_gpr(ctx
, ra
);
2642 gen_helper_minub8(vc
, va
, vb
);
2647 va
= load_gpr(ctx
, ra
);
2648 gen_helper_minuw4(vc
, va
, vb
);
2653 va
= load_gpr(ctx
, ra
);
2654 gen_helper_maxub8(vc
, va
, vb
);
2659 va
= load_gpr(ctx
, ra
);
2660 gen_helper_maxuw4(vc
, va
, vb
);
2665 va
= load_gpr(ctx
, ra
);
2666 gen_helper_maxsb8(vc
, va
, vb
);
2671 va
= load_gpr(ctx
, ra
);
2672 gen_helper_maxsw4(vc
, va
, vb
);
2680 /* HW_MTPR (PALcode) */
2681 #ifndef CONFIG_USER_ONLY
2682 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2683 vb
= load_gpr(ctx
, rb
);
2684 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2691 /* HW_RET (PALcode) */
2692 #ifndef CONFIG_USER_ONLY
2693 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2695 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2696 address from EXC_ADDR. This turns out to be useful for our
2697 emulation PALcode, so continue to accept it. */
2698 ctx
->lit
= vb
= tcg_temp_new();
2699 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2701 vb
= load_gpr(ctx
, rb
);
2703 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2704 tmp
= tcg_temp_new();
2705 tcg_gen_movi_i64(tmp
, 0);
2706 st_flag_byte(tmp
, ENV_FLAG_RX_SHIFT
);
2707 tcg_gen_andi_i64(tmp
, vb
, 1);
2708 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2710 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2711 /* Allow interrupts to be recognized right away. */
2712 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2719 /* HW_ST (PALcode) */
2720 #ifndef CONFIG_USER_ONLY
2721 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2723 switch ((insn
>> 12) & 0xF) {
2725 /* Longword physical access */
2726 va
= load_gpr(ctx
, ra
);
2727 vb
= load_gpr(ctx
, rb
);
2728 tmp
= tcg_temp_new();
2729 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2730 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2734 /* Quadword physical access */
2735 va
= load_gpr(ctx
, ra
);
2736 vb
= load_gpr(ctx
, rb
);
2737 tmp
= tcg_temp_new();
2738 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2739 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2743 /* Longword physical access with lock */
2744 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2745 MMU_PHYS_IDX
, MO_LESL
);
2748 /* Quadword physical access with lock */
2749 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2750 MMU_PHYS_IDX
, MO_LEQ
);
2753 /* Longword virtual access */
2756 /* Quadword virtual access */
2777 /* Longword virtual access with alternate access mode */
2780 /* Quadword virtual access with alternate access mode */
2796 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2800 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2804 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2808 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2812 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2816 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2820 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2824 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2828 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2832 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2836 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2840 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2844 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2848 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2852 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2853 ctx
->mem_idx
, MO_LESL
);
2857 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2858 ctx
->mem_idx
, MO_LEQ
);
2862 ret
= gen_bdirect(ctx
, ra
, disp21
);
2864 case 0x31: /* FBEQ */
2865 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2867 case 0x32: /* FBLT */
2868 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2870 case 0x33: /* FBLE */
2871 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2875 ret
= gen_bdirect(ctx
, ra
, disp21
);
2877 case 0x35: /* FBNE */
2878 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2880 case 0x36: /* FBGE */
2881 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2883 case 0x37: /* FBGT */
2884 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2888 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2892 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2896 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2900 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2904 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2908 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2912 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2916 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2919 ret
= gen_invalid(ctx
);
2926 static void alpha_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
2928 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2929 CPUAlphaState
*env
= cpu
->env_ptr
;
2930 int64_t bound
, mask
;
2932 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2933 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2934 ctx
->implver
= env
->implver
;
2935 ctx
->amask
= env
->amask
;
2937 #ifdef CONFIG_USER_ONLY
2938 ctx
->ir
= cpu_std_ir
;
2940 ctx
->palbr
= env
->palbr
;
2941 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2944 /* ??? Every TB begins with unset rounding mode, to be initialized on
2945 the first fp insn of the TB. Alternately we could define a proper
2946 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2947 to reset the FP_STATUS to that default at the end of any TB that
2948 changes the default. We could even (gasp) dynamiclly figure out
2949 what default would be most efficient given the running program. */
2951 /* Similarly for flush-to-zero. */
2958 /* Bound the number of insns to execute to those left on the page. */
2959 if (in_superpage(ctx
, ctx
->base
.pc_first
)) {
2962 mask
= TARGET_PAGE_MASK
;
2964 bound
= -(ctx
->base
.pc_first
| mask
) / 4;
2965 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
2968 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2972 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2974 tcg_gen_insn_start(dcbase
->pc_next
);
2977 static bool alpha_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
2978 const CPUBreakpoint
*bp
)
2980 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2982 ctx
->base
.is_jmp
= gen_excp(ctx
, EXCP_DEBUG
, 0);
2984 /* The address covered by the breakpoint must be included in
2985 [tb->pc, tb->pc + tb->size) in order to for it to be
2986 properly cleared -- thus we increment the PC here so that
2987 the logic setting tb->size below does the right thing. */
2988 ctx
->base
.pc_next
+= 4;
2992 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2994 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2995 CPUAlphaState
*env
= cpu
->env_ptr
;
2996 uint32_t insn
= translator_ldl(env
, ctx
->base
.pc_next
);
2998 ctx
->base
.pc_next
+= 4;
2999 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
3001 free_context_temps(ctx
);
3002 translator_loop_temp_check(&ctx
->base
);
3005 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
3007 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
3009 switch (ctx
->base
.is_jmp
) {
3010 case DISAS_NORETURN
:
3012 case DISAS_TOO_MANY
:
3013 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
3015 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3016 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
3019 case DISAS_PC_STALE
:
3020 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3022 case DISAS_PC_UPDATED
:
3023 if (!use_exit_tb(ctx
)) {
3024 tcg_gen_lookup_and_goto_ptr();
3028 case DISAS_PC_UPDATED_NOCHAIN
:
3029 if (ctx
->base
.singlestep_enabled
) {
3030 gen_excp_1(EXCP_DEBUG
, 0);
3032 tcg_gen_exit_tb(NULL
, 0);
3036 g_assert_not_reached();
3040 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
3042 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
3043 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
3046 static const TranslatorOps alpha_tr_ops
= {
3047 .init_disas_context
= alpha_tr_init_disas_context
,
3048 .tb_start
= alpha_tr_tb_start
,
3049 .insn_start
= alpha_tr_insn_start
,
3050 .breakpoint_check
= alpha_tr_breakpoint_check
,
3051 .translate_insn
= alpha_tr_translate_insn
,
3052 .tb_stop
= alpha_tr_tb_stop
,
3053 .disas_log
= alpha_tr_disas_log
,
3056 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
3059 translator_loop(&alpha_tr_ops
, &dc
.base
, cpu
, tb
, max_insns
);
3062 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,