2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/translator.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 DisasContextBase base
;
48 #ifndef CONFIG_USER_ONLY
54 /* implver and amask values for this CPU. */
58 /* Current rounding mode for this TB. */
60 /* Current flush-to-zero setting for this TB. */
63 /* The set of registers active in the current context. */
66 /* Temporaries for $31 and $f31 as source and destination. */
69 /* Temporary for immediate constants. */
73 /* Target-specific return values from translate_one, indicating the
74 state of the TB. Note that DISAS_NEXT indicates that we are not
76 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
77 #define DISAS_PC_UPDATED DISAS_TARGET_1
78 #define DISAS_PC_STALE DISAS_TARGET_2
80 /* global register indexes */
81 static TCGv cpu_std_ir
[31];
82 static TCGv cpu_fir
[31];
84 static TCGv cpu_lock_addr
;
85 static TCGv cpu_lock_value
;
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_pal_ir
[31];
91 #include "exec/gen-icount.h"
93 void alpha_translate_init(void)
95 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
97 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
98 static const GlobalVar vars
[] = {
106 /* Use the symbolic register names that match the disassembler. */
107 static const char greg_names
[31][4] = {
108 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
109 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
110 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
111 "t10", "t11", "ra", "t12", "at", "gp", "sp"
113 static const char freg_names
[31][4] = {
114 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
115 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
116 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
117 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
119 #ifndef CONFIG_USER_ONLY
120 static const char shadow_names
[8][8] = {
121 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
122 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
128 for (i
= 0; i
< 31; i
++) {
129 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
130 offsetof(CPUAlphaState
, ir
[i
]),
134 for (i
= 0; i
< 31; i
++) {
135 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
136 offsetof(CPUAlphaState
, fir
[i
]),
140 #ifndef CONFIG_USER_ONLY
141 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
142 for (i
= 0; i
< 8; i
++) {
143 int r
= (i
== 7 ? 25 : i
+ 8);
144 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
145 offsetof(CPUAlphaState
,
151 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
152 const GlobalVar
*v
= &vars
[i
];
153 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
157 static TCGv
load_zero(DisasContext
*ctx
)
160 ctx
->zero
= tcg_const_i64(0);
165 static TCGv
dest_sink(DisasContext
*ctx
)
168 ctx
->sink
= tcg_temp_new();
173 static void free_context_temps(DisasContext
*ctx
)
176 tcg_gen_discard_i64(ctx
->sink
);
177 tcg_temp_free(ctx
->sink
);
181 tcg_temp_free(ctx
->zero
);
185 tcg_temp_free(ctx
->lit
);
190 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
192 if (likely(reg
< 31)) {
195 return load_zero(ctx
);
199 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
200 uint8_t lit
, bool islit
)
203 ctx
->lit
= tcg_const_i64(lit
);
205 } else if (likely(reg
< 31)) {
208 return load_zero(ctx
);
212 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
214 if (likely(reg
< 31)) {
217 return dest_sink(ctx
);
221 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
223 if (likely(reg
< 31)) {
226 return load_zero(ctx
);
230 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
232 if (likely(reg
< 31)) {
235 return dest_sink(ctx
);
239 static int get_flag_ofs(unsigned shift
)
241 int ofs
= offsetof(CPUAlphaState
, flags
);
242 #ifdef HOST_WORDS_BIGENDIAN
243 ofs
+= 3 - (shift
/ 8);
250 static void ld_flag_byte(TCGv val
, unsigned shift
)
252 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
255 static void st_flag_byte(TCGv val
, unsigned shift
)
257 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
260 static void gen_excp_1(int exception
, int error_code
)
264 tmp1
= tcg_const_i32(exception
);
265 tmp2
= tcg_const_i32(error_code
);
266 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
267 tcg_temp_free_i32(tmp2
);
268 tcg_temp_free_i32(tmp1
);
271 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
273 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
274 gen_excp_1(exception
, error_code
);
275 return DISAS_NORETURN
;
278 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
280 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
283 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
285 TCGv_i32 tmp32
= tcg_temp_new_i32();
286 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
287 gen_helper_memory_to_f(t0
, tmp32
);
288 tcg_temp_free_i32(tmp32
);
291 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
293 TCGv tmp
= tcg_temp_new();
294 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
295 gen_helper_memory_to_g(t0
, tmp
);
299 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
301 TCGv_i32 tmp32
= tcg_temp_new_i32();
302 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
303 gen_helper_memory_to_s(t0
, tmp32
);
304 tcg_temp_free_i32(tmp32
);
307 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
309 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
310 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
311 tcg_gen_mov_i64(cpu_lock_value
, t0
);
314 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
316 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
317 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
318 tcg_gen_mov_i64(cpu_lock_value
, t0
);
321 static inline void gen_load_mem(DisasContext
*ctx
,
322 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
324 int ra
, int rb
, int32_t disp16
, bool fp
,
329 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
330 prefetches, which we can treat as nops. No worries about
331 missed exceptions here. */
332 if (unlikely(ra
== 31)) {
336 tmp
= tcg_temp_new();
337 addr
= load_gpr(ctx
, rb
);
340 tcg_gen_addi_i64(tmp
, addr
, disp16
);
344 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
348 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
349 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
354 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
356 TCGv_i32 tmp32
= tcg_temp_new_i32();
357 gen_helper_f_to_memory(tmp32
, t0
);
358 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
359 tcg_temp_free_i32(tmp32
);
362 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
364 TCGv tmp
= tcg_temp_new();
365 gen_helper_g_to_memory(tmp
, t0
);
366 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
370 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
372 TCGv_i32 tmp32
= tcg_temp_new_i32();
373 gen_helper_s_to_memory(tmp32
, t0
);
374 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
375 tcg_temp_free_i32(tmp32
);
378 static inline void gen_store_mem(DisasContext
*ctx
,
379 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
381 int ra
, int rb
, int32_t disp16
, bool fp
,
386 tmp
= tcg_temp_new();
387 addr
= load_gpr(ctx
, rb
);
390 tcg_gen_addi_i64(tmp
, addr
, disp16
);
394 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
398 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
399 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
404 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
405 int32_t disp16
, int mem_idx
,
408 TCGLabel
*lab_fail
, *lab_done
;
411 addr
= tcg_temp_new_i64();
412 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
413 free_context_temps(ctx
);
415 lab_fail
= gen_new_label();
416 lab_done
= gen_new_label();
417 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
418 tcg_temp_free_i64(addr
);
420 val
= tcg_temp_new_i64();
421 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
422 load_gpr(ctx
, ra
), mem_idx
, op
);
423 free_context_temps(ctx
);
426 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
428 tcg_temp_free_i64(val
);
429 tcg_gen_br(lab_done
);
431 gen_set_label(lab_fail
);
433 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
436 gen_set_label(lab_done
);
437 tcg_gen_movi_i64(cpu_lock_addr
, -1);
441 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
443 #ifndef CONFIG_USER_ONLY
444 return ((ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0
445 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
446 && ((addr
>> 41) & 3) == 2);
452 static bool use_exit_tb(DisasContext
*ctx
)
454 return ((tb_cflags(ctx
->base
.tb
) & CF_LAST_IO
)
455 || ctx
->base
.singlestep_enabled
459 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
461 /* Suppress goto_tb in the case of single-steping and IO. */
462 if (unlikely(use_exit_tb(ctx
))) {
465 #ifndef CONFIG_USER_ONLY
466 /* If the destination is in the superpage, the page perms can't change. */
467 if (in_superpage(ctx
, dest
)) {
470 /* Check for the dest on the same page as the start of the TB. */
471 return ((ctx
->base
.tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
477 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
479 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
482 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
485 /* Notice branch-to-next; used to initialize RA with the PC. */
488 } else if (use_goto_tb(ctx
, dest
)) {
490 tcg_gen_movi_i64(cpu_pc
, dest
);
491 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
492 return DISAS_NORETURN
;
494 tcg_gen_movi_i64(cpu_pc
, dest
);
495 return DISAS_PC_UPDATED
;
499 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
500 TCGv cmp
, int32_t disp
)
502 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
503 TCGLabel
*lab_true
= gen_new_label();
505 if (use_goto_tb(ctx
, dest
)) {
506 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
509 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
510 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
512 gen_set_label(lab_true
);
514 tcg_gen_movi_i64(cpu_pc
, dest
);
515 tcg_gen_exit_tb(ctx
->base
.tb
, 1);
517 return DISAS_NORETURN
;
519 TCGv_i64 z
= tcg_const_i64(0);
520 TCGv_i64 d
= tcg_const_i64(dest
);
521 TCGv_i64 p
= tcg_const_i64(ctx
->base
.pc_next
);
523 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
525 tcg_temp_free_i64(z
);
526 tcg_temp_free_i64(d
);
527 tcg_temp_free_i64(p
);
528 return DISAS_PC_UPDATED
;
532 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
533 int32_t disp
, int mask
)
536 TCGv tmp
= tcg_temp_new();
539 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
540 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
544 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
547 /* Fold -0.0 for comparison with COND. */
549 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
551 uint64_t mzero
= 1ull << 63;
556 /* For <= or >, the -0.0 value directly compares the way we want. */
557 tcg_gen_mov_i64(dest
, src
);
562 /* For == or !=, we can simply mask off the sign bit and compare. */
563 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
568 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
569 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
570 tcg_gen_neg_i64(dest
, dest
);
571 tcg_gen_and_i64(dest
, dest
, src
);
579 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
582 TCGv cmp_tmp
= tcg_temp_new();
585 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
586 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
587 tcg_temp_free(cmp_tmp
);
591 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
596 vb
= load_fpr(ctx
, rb
);
598 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
600 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
605 #define QUAL_RM_N 0x080 /* Round mode nearest even */
606 #define QUAL_RM_C 0x000 /* Round mode chopped */
607 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
608 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
609 #define QUAL_RM_MASK 0x0c0
611 #define QUAL_U 0x100 /* Underflow enable (fp output) */
612 #define QUAL_V 0x100 /* Overflow enable (int output) */
613 #define QUAL_S 0x400 /* Software completion enable */
614 #define QUAL_I 0x200 /* Inexact detection enable */
616 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
620 fn11
&= QUAL_RM_MASK
;
621 if (fn11
== ctx
->tb_rm
) {
626 tmp
= tcg_temp_new_i32();
629 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
632 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
635 tcg_gen_movi_i32(tmp
, float_round_down
);
638 tcg_gen_ld8u_i32(tmp
, cpu_env
,
639 offsetof(CPUAlphaState
, fpcr_dyn_round
));
643 #if defined(CONFIG_SOFTFLOAT_INLINE)
644 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
645 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
646 sets the one field. */
647 tcg_gen_st8_i32(tmp
, cpu_env
,
648 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
650 gen_helper_setroundmode(tmp
);
653 tcg_temp_free_i32(tmp
);
656 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
661 if (fn11
== ctx
->tb_ftz
) {
666 tmp
= tcg_temp_new_i32();
668 /* Underflow is enabled, use the FPCR setting. */
669 tcg_gen_ld8u_i32(tmp
, cpu_env
,
670 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
672 /* Underflow is disabled, force flush-to-zero. */
673 tcg_gen_movi_i32(tmp
, 1);
676 #if defined(CONFIG_SOFTFLOAT_INLINE)
677 tcg_gen_st8_i32(tmp
, cpu_env
,
678 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
680 gen_helper_setflushzero(tmp
);
683 tcg_temp_free_i32(tmp
);
686 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
690 if (unlikely(reg
== 31)) {
691 val
= load_zero(ctx
);
694 if ((fn11
& QUAL_S
) == 0) {
696 gen_helper_ieee_input_cmp(cpu_env
, val
);
698 gen_helper_ieee_input(cpu_env
, val
);
701 #ifndef CONFIG_USER_ONLY
702 /* In system mode, raise exceptions for denormals like real
703 hardware. In user mode, proceed as if the OS completion
704 handler is handling the denormal as per spec. */
705 gen_helper_ieee_input_s(cpu_env
, val
);
712 static void gen_fp_exc_raise(int rc
, int fn11
)
714 /* ??? We ought to be able to do something with imprecise exceptions.
715 E.g. notice we're still in the trap shadow of something within the
716 TB and do not generate the code to signal the exception; end the TB
717 when an exception is forced to arrive, either by consumption of a
718 register value or TRAPB or EXCB. */
722 if (!(fn11
& QUAL_U
)) {
723 /* Note that QUAL_U == QUAL_V, so ignore either. */
724 ignore
|= FPCR_UNF
| FPCR_IOV
;
726 if (!(fn11
& QUAL_I
)) {
729 ign
= tcg_const_i32(ignore
);
731 /* ??? Pass in the regno of the destination so that the helper can
732 set EXC_MASK, which contains a bitmask of destination registers
733 that have caused arithmetic traps. A simple userspace emulation
734 does not require this. We do need it for a guest kernel's entArith,
735 or if we were to do something clever with imprecise exceptions. */
736 reg
= tcg_const_i32(rc
+ 32);
738 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
740 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
743 tcg_temp_free_i32(reg
);
744 tcg_temp_free_i32(ign
);
747 static void gen_cvtlq(TCGv vc
, TCGv vb
)
749 TCGv tmp
= tcg_temp_new();
751 /* The arithmetic right shift here, plus the sign-extended mask below
752 yields a sign-extended result without an explicit ext32s_i64. */
753 tcg_gen_shri_i64(tmp
, vb
, 29);
754 tcg_gen_sari_i64(vc
, vb
, 32);
755 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
760 static void gen_ieee_arith2(DisasContext
*ctx
,
761 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
762 int rb
, int rc
, int fn11
)
766 gen_qual_roundmode(ctx
, fn11
);
767 gen_qual_flushzero(ctx
, fn11
);
769 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
770 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
772 gen_fp_exc_raise(rc
, fn11
);
775 #define IEEE_ARITH2(name) \
776 static inline void glue(gen_, name)(DisasContext *ctx, \
777 int rb, int rc, int fn11) \
779 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
786 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
790 /* No need to set flushzero, since we have an integer output. */
791 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
792 vc
= dest_fpr(ctx
, rc
);
794 /* Almost all integer conversions use cropped rounding;
795 special case that. */
796 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
797 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
799 gen_qual_roundmode(ctx
, fn11
);
800 gen_helper_cvttq(vc
, cpu_env
, vb
);
802 gen_fp_exc_raise(rc
, fn11
);
805 static void gen_ieee_intcvt(DisasContext
*ctx
,
806 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
807 int rb
, int rc
, int fn11
)
811 gen_qual_roundmode(ctx
, fn11
);
812 vb
= load_fpr(ctx
, rb
);
813 vc
= dest_fpr(ctx
, rc
);
815 /* The only exception that can be raised by integer conversion
816 is inexact. Thus we only need to worry about exceptions when
817 inexact handling is requested. */
819 helper(vc
, cpu_env
, vb
);
820 gen_fp_exc_raise(rc
, fn11
);
822 helper(vc
, cpu_env
, vb
);
826 #define IEEE_INTCVT(name) \
827 static inline void glue(gen_, name)(DisasContext *ctx, \
828 int rb, int rc, int fn11) \
830 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
835 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
837 TCGv vmask
= tcg_const_i64(mask
);
838 TCGv tmp
= tcg_temp_new_i64();
841 tcg_gen_andc_i64(tmp
, vmask
, va
);
843 tcg_gen_and_i64(tmp
, va
, vmask
);
846 tcg_gen_andc_i64(vc
, vb
, vmask
);
847 tcg_gen_or_i64(vc
, vc
, tmp
);
849 tcg_temp_free(vmask
);
853 static void gen_ieee_arith3(DisasContext
*ctx
,
854 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
855 int ra
, int rb
, int rc
, int fn11
)
859 gen_qual_roundmode(ctx
, fn11
);
860 gen_qual_flushzero(ctx
, fn11
);
862 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
863 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
864 vc
= dest_fpr(ctx
, rc
);
865 helper(vc
, cpu_env
, va
, vb
);
867 gen_fp_exc_raise(rc
, fn11
);
870 #define IEEE_ARITH3(name) \
871 static inline void glue(gen_, name)(DisasContext *ctx, \
872 int ra, int rb, int rc, int fn11) \
874 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
885 static void gen_ieee_compare(DisasContext
*ctx
,
886 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
887 int ra
, int rb
, int rc
, int fn11
)
891 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
892 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
893 vc
= dest_fpr(ctx
, rc
);
894 helper(vc
, cpu_env
, va
, vb
);
896 gen_fp_exc_raise(rc
, fn11
);
899 #define IEEE_CMP3(name) \
900 static inline void glue(gen_, name)(DisasContext *ctx, \
901 int ra, int rb, int rc, int fn11) \
903 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
910 static inline uint64_t zapnot_mask(uint8_t lit
)
915 for (i
= 0; i
< 8; ++i
) {
916 if ((lit
>> i
) & 1) {
917 mask
|= 0xffull
<< (i
* 8);
923 /* Implement zapnot with an immediate operand, which expands to some
924 form of immediate AND. This is a basic building block in the
925 definition of many of the other byte manipulation instructions. */
926 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
930 tcg_gen_movi_i64(dest
, 0);
933 tcg_gen_ext8u_i64(dest
, src
);
936 tcg_gen_ext16u_i64(dest
, src
);
939 tcg_gen_ext32u_i64(dest
, src
);
942 tcg_gen_mov_i64(dest
, src
);
945 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
950 /* EXTWH, EXTLH, EXTQH */
951 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
952 uint8_t lit
, uint8_t byte_mask
)
955 int pos
= (64 - lit
* 8) & 0x3f;
956 int len
= cto32(byte_mask
) * 8;
958 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
960 tcg_gen_movi_i64(vc
, 0);
963 TCGv tmp
= tcg_temp_new();
964 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
965 tcg_gen_neg_i64(tmp
, tmp
);
966 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
967 tcg_gen_shl_i64(vc
, va
, tmp
);
970 gen_zapnoti(vc
, vc
, byte_mask
);
973 /* EXTBL, EXTWL, EXTLL, EXTQL */
974 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
975 uint8_t lit
, uint8_t byte_mask
)
978 int pos
= (lit
& 7) * 8;
979 int len
= cto32(byte_mask
) * 8;
980 if (pos
+ len
>= 64) {
983 tcg_gen_extract_i64(vc
, va
, pos
, len
);
985 TCGv tmp
= tcg_temp_new();
986 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
987 tcg_gen_shli_i64(tmp
, tmp
, 3);
988 tcg_gen_shr_i64(vc
, va
, tmp
);
990 gen_zapnoti(vc
, vc
, byte_mask
);
994 /* INSWH, INSLH, INSQH */
995 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
996 uint8_t lit
, uint8_t byte_mask
)
999 int pos
= 64 - (lit
& 7) * 8;
1000 int len
= cto32(byte_mask
) * 8;
1002 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
1004 tcg_gen_movi_i64(vc
, 0);
1007 TCGv tmp
= tcg_temp_new();
1008 TCGv shift
= tcg_temp_new();
1010 /* The instruction description has us left-shift the byte mask
1011 and extract bits <15:8> and apply that zap at the end. This
1012 is equivalent to simply performing the zap first and shifting
1014 gen_zapnoti(tmp
, va
, byte_mask
);
1016 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1017 portably by splitting the shift into two parts: shift_count-1 and 1.
1018 Arrange for the -1 by using ones-complement instead of
1019 twos-complement in the negation: ~(B * 8) & 63. */
1021 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1022 tcg_gen_not_i64(shift
, shift
);
1023 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1025 tcg_gen_shr_i64(vc
, tmp
, shift
);
1026 tcg_gen_shri_i64(vc
, vc
, 1);
1027 tcg_temp_free(shift
);
1032 /* INSBL, INSWL, INSLL, INSQL */
1033 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1034 uint8_t lit
, uint8_t byte_mask
)
1037 int pos
= (lit
& 7) * 8;
1038 int len
= cto32(byte_mask
) * 8;
1039 if (pos
+ len
> 64) {
1042 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1044 TCGv tmp
= tcg_temp_new();
1045 TCGv shift
= tcg_temp_new();
1047 /* The instruction description has us left-shift the byte mask
1048 and extract bits <15:8> and apply that zap at the end. This
1049 is equivalent to simply performing the zap first and shifting
1051 gen_zapnoti(tmp
, va
, byte_mask
);
1053 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1054 tcg_gen_shli_i64(shift
, shift
, 3);
1055 tcg_gen_shl_i64(vc
, tmp
, shift
);
1056 tcg_temp_free(shift
);
1061 /* MSKWH, MSKLH, MSKQH */
1062 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1063 uint8_t lit
, uint8_t byte_mask
)
1066 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1068 TCGv shift
= tcg_temp_new();
1069 TCGv mask
= tcg_temp_new();
1071 /* The instruction description is as above, where the byte_mask
1072 is shifted left, and then we extract bits <15:8>. This can be
1073 emulated with a right-shift on the expanded byte mask. This
1074 requires extra care because for an input <2:0> == 0 we need a
1075 shift of 64 bits in order to generate a zero. This is done by
1076 splitting the shift into two parts, the variable shift - 1
1077 followed by a constant 1 shift. The code we expand below is
1078 equivalent to ~(B * 8) & 63. */
1080 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1081 tcg_gen_not_i64(shift
, shift
);
1082 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1083 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1084 tcg_gen_shr_i64(mask
, mask
, shift
);
1085 tcg_gen_shri_i64(mask
, mask
, 1);
1087 tcg_gen_andc_i64(vc
, va
, mask
);
1089 tcg_temp_free(mask
);
1090 tcg_temp_free(shift
);
1094 /* MSKBL, MSKWL, MSKLL, MSKQL */
1095 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1096 uint8_t lit
, uint8_t byte_mask
)
1099 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1101 TCGv shift
= tcg_temp_new();
1102 TCGv mask
= tcg_temp_new();
1104 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1105 tcg_gen_shli_i64(shift
, shift
, 3);
1106 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1107 tcg_gen_shl_i64(mask
, mask
, shift
);
1109 tcg_gen_andc_i64(vc
, va
, mask
);
1111 tcg_temp_free(mask
);
1112 tcg_temp_free(shift
);
1116 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1121 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1124 tmp
= tcg_const_i64(set
);
1125 st_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1129 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1131 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1132 to internal cpu registers. */
1134 /* Unprivileged PAL call */
1135 if (palcode
>= 0x80 && palcode
< 0xC0) {
1139 /* No-op inside QEMU. */
1143 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1144 offsetof(CPUAlphaState
, unique
));
1148 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1149 offsetof(CPUAlphaState
, unique
));
1158 #ifndef CONFIG_USER_ONLY
1159 /* Privileged PAL code */
1160 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1164 /* No-op inside QEMU. */
1168 /* No-op inside QEMU. */
1172 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1173 offsetof(CPUAlphaState
, vptptr
));
1177 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1178 offsetof(CPUAlphaState
, sysval
));
1182 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1183 offsetof(CPUAlphaState
, sysval
));
1188 /* Note that we already know we're in kernel mode, so we know
1189 that PS only contains the 3 IPL bits. */
1190 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1192 /* But make sure and store only the 3 IPL bits from the user. */
1194 TCGv tmp
= tcg_temp_new();
1195 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1196 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1200 /* Allow interrupts to be recognized right away. */
1201 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1202 return DISAS_PC_UPDATED_NOCHAIN
;
1206 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1211 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1212 offsetof(CPUAlphaState
, usp
));
1216 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1217 offsetof(CPUAlphaState
, usp
));
1221 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1222 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1228 TCGv_i32 tmp
= tcg_const_i32(1);
1229 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1230 offsetof(CPUState
, halted
));
1231 tcg_temp_free_i32(tmp
);
1233 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1234 return gen_excp(ctx
, EXCP_HALTED
, 0);
1243 return gen_invalid(ctx
);
1246 #ifdef CONFIG_USER_ONLY
1247 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1250 TCGv tmp
= tcg_temp_new();
1251 uint64_t exc_addr
= ctx
->base
.pc_next
;
1252 uint64_t entry
= ctx
->palbr
;
1254 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1257 tcg_gen_movi_i64(tmp
, 1);
1258 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1261 tcg_gen_movi_i64(tmp
, exc_addr
);
1262 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1265 entry
+= (palcode
& 0x80
1266 ? 0x2000 + (palcode
- 0x80) * 64
1267 : 0x1000 + palcode
* 64);
1269 /* Since the destination is running in PALmode, we don't really
1270 need the page permissions check. We'll see the existence of
1271 the page when we create the TB, and we'll flush all TBs if
1272 we change the PAL base register. */
1273 if (!use_exit_tb(ctx
)) {
1275 tcg_gen_movi_i64(cpu_pc
, entry
);
1276 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
1277 return DISAS_NORETURN
;
1279 tcg_gen_movi_i64(cpu_pc
, entry
);
1280 return DISAS_PC_UPDATED
;
1286 #ifndef CONFIG_USER_ONLY
1288 #define PR_LONG 0x200000
1290 static int cpu_pr_data(int pr
)
1293 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1294 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1295 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1296 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1297 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1298 case 7: return offsetof(CPUAlphaState
, palbr
);
1299 case 8: return offsetof(CPUAlphaState
, ptbr
);
1300 case 9: return offsetof(CPUAlphaState
, vptptr
);
1301 case 10: return offsetof(CPUAlphaState
, unique
);
1302 case 11: return offsetof(CPUAlphaState
, sysval
);
1303 case 12: return offsetof(CPUAlphaState
, usp
);
1306 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1309 return offsetof(CPUAlphaState
, alarm_expire
);
1314 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1316 void (*helper
)(TCGv
);
1321 /* Accessing the "non-shadow" general registers. */
1322 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1323 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1326 case 250: /* WALLTIME */
1327 helper
= gen_helper_get_walltime
;
1329 case 249: /* VMTIME */
1330 helper
= gen_helper_get_vmtime
;
1335 return DISAS_PC_STALE
;
1342 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1345 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1349 /* The basic registers are data only, and unknown registers
1350 are read-zero, write-ignore. */
1351 data
= cpu_pr_data(regno
);
1353 tcg_gen_movi_i64(va
, 0);
1354 } else if (data
& PR_LONG
) {
1355 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1357 tcg_gen_ld_i64(va
, cpu_env
, data
);
1365 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1372 gen_helper_tbia(cpu_env
);
1377 gen_helper_tbis(cpu_env
, vb
);
1383 TCGv_i32 tmp
= tcg_const_i32(1);
1384 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1385 offsetof(CPUState
, halted
));
1386 tcg_temp_free_i32(tmp
);
1388 return gen_excp(ctx
, EXCP_HALTED
, 0);
1392 gen_helper_halt(vb
);
1393 return DISAS_PC_STALE
;
1397 gen_helper_set_alarm(cpu_env
, vb
);
1402 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1403 /* Changing the PAL base register implies un-chaining all of the TBs
1404 that ended with a CALL_PAL. Since the base register usually only
1405 changes during boot, flushing everything works well. */
1406 gen_helper_tb_flush(cpu_env
);
1407 return DISAS_PC_STALE
;
1410 /* Accessing the "non-shadow" general registers. */
1411 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1412 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1416 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1419 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1423 /* The basic registers are data only, and unknown registers
1424 are read-zero, write-ignore. */
1425 data
= cpu_pr_data(regno
);
1427 if (data
& PR_LONG
) {
1428 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1430 tcg_gen_st_i64(vb
, cpu_env
, data
);
1438 #endif /* !USER_ONLY*/
1440 #define REQUIRE_NO_LIT \
1447 #define REQUIRE_AMASK(FLAG) \
1449 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1454 #define REQUIRE_TB_FLAG(FLAG) \
1456 if ((ctx->tbflags & (FLAG)) == 0) { \
1461 #define REQUIRE_REG_31(WHICH) \
1463 if (WHICH != 31) { \
1468 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1470 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1472 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1473 bool islit
, real_islit
;
1474 TCGv va
, vb
, vc
, tmp
, tmp2
;
1478 /* Decode all instruction fields */
1479 opc
= extract32(insn
, 26, 6);
1480 ra
= extract32(insn
, 21, 5);
1481 rb
= extract32(insn
, 16, 5);
1482 rc
= extract32(insn
, 0, 5);
1483 real_islit
= islit
= extract32(insn
, 12, 1);
1484 lit
= extract32(insn
, 13, 8);
1486 disp21
= sextract32(insn
, 0, 21);
1487 disp16
= sextract32(insn
, 0, 16);
1488 disp12
= sextract32(insn
, 0, 12);
1490 fn11
= extract32(insn
, 5, 11);
1491 fpfn
= extract32(insn
, 5, 6);
1492 fn7
= extract32(insn
, 5, 7);
1494 if (rb
== 31 && !islit
) {
1503 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1529 disp16
= (uint32_t)disp16
<< 16;
1533 va
= dest_gpr(ctx
, ra
);
1534 /* It's worth special-casing immediate loads. */
1536 tcg_gen_movi_i64(va
, disp16
);
1538 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1545 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1549 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1554 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1559 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1564 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1568 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1572 vc
= dest_gpr(ctx
, rc
);
1573 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1577 /* Special case ADDL as SEXTL. */
1578 tcg_gen_ext32s_i64(vc
, vb
);
1582 /* Special case SUBQ as NEGQ. */
1583 tcg_gen_neg_i64(vc
, vb
);
1588 va
= load_gpr(ctx
, ra
);
1592 tcg_gen_add_i64(vc
, va
, vb
);
1593 tcg_gen_ext32s_i64(vc
, vc
);
1597 tmp
= tcg_temp_new();
1598 tcg_gen_shli_i64(tmp
, va
, 2);
1599 tcg_gen_add_i64(tmp
, tmp
, vb
);
1600 tcg_gen_ext32s_i64(vc
, tmp
);
1605 tcg_gen_sub_i64(vc
, va
, vb
);
1606 tcg_gen_ext32s_i64(vc
, vc
);
1610 tmp
= tcg_temp_new();
1611 tcg_gen_shli_i64(tmp
, va
, 2);
1612 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1613 tcg_gen_ext32s_i64(vc
, tmp
);
1619 /* Special case 0 >= X as X == 0. */
1620 gen_helper_cmpbe0(vc
, vb
);
1622 gen_helper_cmpbge(vc
, va
, vb
);
1627 tmp
= tcg_temp_new();
1628 tcg_gen_shli_i64(tmp
, va
, 3);
1629 tcg_gen_add_i64(tmp
, tmp
, vb
);
1630 tcg_gen_ext32s_i64(vc
, tmp
);
1635 tmp
= tcg_temp_new();
1636 tcg_gen_shli_i64(tmp
, va
, 3);
1637 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1638 tcg_gen_ext32s_i64(vc
, tmp
);
1643 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1647 tcg_gen_add_i64(vc
, va
, vb
);
1651 tmp
= tcg_temp_new();
1652 tcg_gen_shli_i64(tmp
, va
, 2);
1653 tcg_gen_add_i64(vc
, tmp
, vb
);
1658 tcg_gen_sub_i64(vc
, va
, vb
);
1662 tmp
= tcg_temp_new();
1663 tcg_gen_shli_i64(tmp
, va
, 2);
1664 tcg_gen_sub_i64(vc
, tmp
, vb
);
1669 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1673 tmp
= tcg_temp_new();
1674 tcg_gen_shli_i64(tmp
, va
, 3);
1675 tcg_gen_add_i64(vc
, tmp
, vb
);
1680 tmp
= tcg_temp_new();
1681 tcg_gen_shli_i64(tmp
, va
, 3);
1682 tcg_gen_sub_i64(vc
, tmp
, vb
);
1687 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1691 tmp
= tcg_temp_new();
1692 tcg_gen_ext32s_i64(tmp
, va
);
1693 tcg_gen_ext32s_i64(vc
, vb
);
1694 tcg_gen_add_i64(tmp
, tmp
, vc
);
1695 tcg_gen_ext32s_i64(vc
, tmp
);
1696 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1701 tmp
= tcg_temp_new();
1702 tcg_gen_ext32s_i64(tmp
, va
);
1703 tcg_gen_ext32s_i64(vc
, vb
);
1704 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1705 tcg_gen_ext32s_i64(vc
, tmp
);
1706 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1711 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1715 tmp
= tcg_temp_new();
1716 tmp2
= tcg_temp_new();
1717 tcg_gen_eqv_i64(tmp
, va
, vb
);
1718 tcg_gen_mov_i64(tmp2
, va
);
1719 tcg_gen_add_i64(vc
, va
, vb
);
1720 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1721 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1722 tcg_gen_shri_i64(tmp
, tmp
, 63);
1723 tcg_gen_movi_i64(tmp2
, 0);
1724 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1726 tcg_temp_free(tmp2
);
1730 tmp
= tcg_temp_new();
1731 tmp2
= tcg_temp_new();
1732 tcg_gen_xor_i64(tmp
, va
, vb
);
1733 tcg_gen_mov_i64(tmp2
, va
);
1734 tcg_gen_sub_i64(vc
, va
, vb
);
1735 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1736 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1737 tcg_gen_shri_i64(tmp
, tmp
, 63);
1738 tcg_gen_movi_i64(tmp2
, 0);
1739 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1741 tcg_temp_free(tmp2
);
1745 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1755 /* Special case BIS as NOP. */
1759 /* Special case BIS as MOV. */
1760 vc
= dest_gpr(ctx
, rc
);
1762 tcg_gen_movi_i64(vc
, lit
);
1764 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1770 vc
= dest_gpr(ctx
, rc
);
1771 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1773 if (fn7
== 0x28 && ra
== 31) {
1774 /* Special case ORNOT as NOT. */
1775 tcg_gen_not_i64(vc
, vb
);
1779 va
= load_gpr(ctx
, ra
);
1783 tcg_gen_and_i64(vc
, va
, vb
);
1787 tcg_gen_andc_i64(vc
, va
, vb
);
1791 tmp
= tcg_temp_new();
1792 tcg_gen_andi_i64(tmp
, va
, 1);
1793 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1794 vb
, load_gpr(ctx
, rc
));
1799 tmp
= tcg_temp_new();
1800 tcg_gen_andi_i64(tmp
, va
, 1);
1801 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1802 vb
, load_gpr(ctx
, rc
));
1807 tcg_gen_or_i64(vc
, va
, vb
);
1811 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1812 vb
, load_gpr(ctx
, rc
));
1816 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1817 vb
, load_gpr(ctx
, rc
));
1821 tcg_gen_orc_i64(vc
, va
, vb
);
1825 tcg_gen_xor_i64(vc
, va
, vb
);
1829 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1830 vb
, load_gpr(ctx
, rc
));
1834 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1835 vb
, load_gpr(ctx
, rc
));
1839 tcg_gen_eqv_i64(vc
, va
, vb
);
1844 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1848 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1849 vb
, load_gpr(ctx
, rc
));
1853 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1854 vb
, load_gpr(ctx
, rc
));
1859 tcg_gen_movi_i64(vc
, ctx
->implver
);
1867 vc
= dest_gpr(ctx
, rc
);
1868 va
= load_gpr(ctx
, ra
);
1872 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1876 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1880 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1884 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1888 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1892 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1896 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1900 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1904 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1909 gen_zapnoti(vc
, va
, ~lit
);
1911 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1917 gen_zapnoti(vc
, va
, lit
);
1919 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1924 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1929 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1931 tmp
= tcg_temp_new();
1932 vb
= load_gpr(ctx
, rb
);
1933 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1934 tcg_gen_shr_i64(vc
, va
, tmp
);
1940 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1945 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1947 tmp
= tcg_temp_new();
1948 vb
= load_gpr(ctx
, rb
);
1949 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1950 tcg_gen_shl_i64(vc
, va
, tmp
);
1956 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1961 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1963 tmp
= tcg_temp_new();
1964 vb
= load_gpr(ctx
, rb
);
1965 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1966 tcg_gen_sar_i64(vc
, va
, tmp
);
1972 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1976 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1980 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1984 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1988 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1992 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1996 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2000 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2004 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2012 vc
= dest_gpr(ctx
, rc
);
2013 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2014 va
= load_gpr(ctx
, ra
);
2018 tcg_gen_mul_i64(vc
, va
, vb
);
2019 tcg_gen_ext32s_i64(vc
, vc
);
2023 tcg_gen_mul_i64(vc
, va
, vb
);
2027 tmp
= tcg_temp_new();
2028 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
2033 tmp
= tcg_temp_new();
2034 tcg_gen_ext32s_i64(tmp
, va
);
2035 tcg_gen_ext32s_i64(vc
, vb
);
2036 tcg_gen_mul_i64(tmp
, tmp
, vc
);
2037 tcg_gen_ext32s_i64(vc
, tmp
);
2038 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2043 tmp
= tcg_temp_new();
2044 tmp2
= tcg_temp_new();
2045 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2046 tcg_gen_sari_i64(tmp2
, vc
, 63);
2047 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2049 tcg_temp_free(tmp2
);
2058 vc
= dest_fpr(ctx
, rc
);
2059 switch (fpfn
) { /* fn11 & 0x3F */
2063 t32
= tcg_temp_new_i32();
2064 va
= load_gpr(ctx
, ra
);
2065 tcg_gen_extrl_i64_i32(t32
, va
);
2066 gen_helper_memory_to_s(vc
, t32
);
2067 tcg_temp_free_i32(t32
);
2072 vb
= load_fpr(ctx
, rb
);
2073 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2078 gen_sqrts(ctx
, rb
, rc
, fn11
);
2083 t32
= tcg_temp_new_i32();
2084 va
= load_gpr(ctx
, ra
);
2085 tcg_gen_extrl_i64_i32(t32
, va
);
2086 gen_helper_memory_to_f(vc
, t32
);
2087 tcg_temp_free_i32(t32
);
2092 va
= load_gpr(ctx
, ra
);
2093 tcg_gen_mov_i64(vc
, va
);
2098 vb
= load_fpr(ctx
, rb
);
2099 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2104 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2112 /* VAX floating point */
2113 /* XXX: rounding mode and trap are ignored (!) */
2114 vc
= dest_fpr(ctx
, rc
);
2115 vb
= load_fpr(ctx
, rb
);
2116 va
= load_fpr(ctx
, ra
);
2117 switch (fpfn
) { /* fn11 & 0x3F */
2120 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2124 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2128 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2132 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2140 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2144 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2148 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2152 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2156 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2160 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2164 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2169 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2178 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2183 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2188 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2196 /* IEEE floating-point */
2197 switch (fpfn
) { /* fn11 & 0x3F */
2200 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2204 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2208 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2212 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2216 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2220 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2224 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2228 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2232 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2236 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2240 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2244 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2248 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2250 gen_cvtst(ctx
, rb
, rc
, fn11
);
2253 gen_cvtts(ctx
, rb
, rc
, fn11
);
2259 gen_cvttq(ctx
, rb
, rc
, fn11
);
2264 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2269 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2281 vc
= dest_fpr(ctx
, rc
);
2282 vb
= load_fpr(ctx
, rb
);
2288 /* Special case CPYS as FNOP. */
2290 vc
= dest_fpr(ctx
, rc
);
2291 va
= load_fpr(ctx
, ra
);
2293 /* Special case CPYS as FMOV. */
2294 tcg_gen_mov_i64(vc
, va
);
2296 vb
= load_fpr(ctx
, rb
);
2297 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2303 vc
= dest_fpr(ctx
, rc
);
2304 vb
= load_fpr(ctx
, rb
);
2305 va
= load_fpr(ctx
, ra
);
2306 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2310 vc
= dest_fpr(ctx
, rc
);
2311 vb
= load_fpr(ctx
, rb
);
2312 va
= load_fpr(ctx
, ra
);
2313 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2317 va
= load_fpr(ctx
, ra
);
2318 gen_helper_store_fpcr(cpu_env
, va
);
2319 if (ctx
->tb_rm
== QUAL_RM_D
) {
2320 /* Re-do the copy of the rounding mode to fp_status
2321 the next time we use dynamic rounding. */
2327 va
= dest_fpr(ctx
, ra
);
2328 gen_helper_load_fpcr(va
, cpu_env
);
2332 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2336 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2340 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2344 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2348 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2352 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2354 case 0x030: /* CVTQL */
2355 case 0x130: /* CVTQL/V */
2356 case 0x530: /* CVTQL/SV */
2358 vc
= dest_fpr(ctx
, rc
);
2359 vb
= load_fpr(ctx
, rb
);
2360 gen_helper_cvtql(vc
, cpu_env
, vb
);
2361 gen_fp_exc_raise(rc
, fn11
);
2369 switch ((uint16_t)disp16
) {
2380 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2384 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2396 va
= dest_gpr(ctx
, ra
);
2397 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2399 gen_helper_load_pcc(va
, cpu_env
);
2400 ret
= DISAS_PC_STALE
;
2402 gen_helper_load_pcc(va
, cpu_env
);
2430 /* HW_MFPR (PALcode) */
2431 #ifndef CONFIG_USER_ONLY
2432 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2433 va
= dest_gpr(ctx
, ra
);
2434 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2441 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2442 prediction stack action, which of course we don't implement. */
2443 vb
= load_gpr(ctx
, rb
);
2444 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2446 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2448 ret
= DISAS_PC_UPDATED
;
2452 /* HW_LD (PALcode) */
2453 #ifndef CONFIG_USER_ONLY
2454 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2456 TCGv addr
= tcg_temp_new();
2457 vb
= load_gpr(ctx
, rb
);
2458 va
= dest_gpr(ctx
, ra
);
2460 tcg_gen_addi_i64(addr
, vb
, disp12
);
2461 switch ((insn
>> 12) & 0xF) {
2463 /* Longword physical access (hw_ldl/p) */
2464 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2467 /* Quadword physical access (hw_ldq/p) */
2468 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2471 /* Longword physical access with lock (hw_ldl_l/p) */
2472 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2475 /* Quadword physical access with lock (hw_ldq_l/p) */
2476 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2479 /* Longword virtual PTE fetch (hw_ldl/v) */
2482 /* Quadword virtual PTE fetch (hw_ldq/v) */
2492 /* Longword virtual access (hw_ldl) */
2495 /* Quadword virtual access (hw_ldq) */
2498 /* Longword virtual access with protection check (hw_ldl/w) */
2499 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2502 /* Quadword virtual access with protection check (hw_ldq/w) */
2503 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2506 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2509 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2512 /* Longword virtual access with alternate access mode and
2513 protection checks (hw_ldl/wa) */
2514 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2517 /* Quadword virtual access with alternate access mode and
2518 protection checks (hw_ldq/wa) */
2519 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2522 tcg_temp_free(addr
);
2530 vc
= dest_gpr(ctx
, rc
);
2535 va
= load_fpr(ctx
, ra
);
2536 tcg_gen_mov_i64(vc
, va
);
2538 } else if (fn7
== 0x78) {
2542 t32
= tcg_temp_new_i32();
2543 va
= load_fpr(ctx
, ra
);
2544 gen_helper_s_to_memory(t32
, va
);
2545 tcg_gen_ext_i32_i64(vc
, t32
);
2546 tcg_temp_free_i32(t32
);
2550 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2556 tcg_gen_ext8s_i64(vc
, vb
);
2562 tcg_gen_ext16s_i64(vc
, vb
);
2569 tcg_gen_ctpop_i64(vc
, vb
);
2575 va
= load_gpr(ctx
, ra
);
2576 gen_helper_perr(vc
, va
, vb
);
2583 tcg_gen_clzi_i64(vc
, vb
, 64);
2590 tcg_gen_ctzi_i64(vc
, vb
, 64);
2597 gen_helper_unpkbw(vc
, vb
);
2604 gen_helper_unpkbl(vc
, vb
);
2611 gen_helper_pkwb(vc
, vb
);
2618 gen_helper_pklb(vc
, vb
);
2623 va
= load_gpr(ctx
, ra
);
2624 gen_helper_minsb8(vc
, va
, vb
);
2629 va
= load_gpr(ctx
, ra
);
2630 gen_helper_minsw4(vc
, va
, vb
);
2635 va
= load_gpr(ctx
, ra
);
2636 gen_helper_minub8(vc
, va
, vb
);
2641 va
= load_gpr(ctx
, ra
);
2642 gen_helper_minuw4(vc
, va
, vb
);
2647 va
= load_gpr(ctx
, ra
);
2648 gen_helper_maxub8(vc
, va
, vb
);
2653 va
= load_gpr(ctx
, ra
);
2654 gen_helper_maxuw4(vc
, va
, vb
);
2659 va
= load_gpr(ctx
, ra
);
2660 gen_helper_maxsb8(vc
, va
, vb
);
2665 va
= load_gpr(ctx
, ra
);
2666 gen_helper_maxsw4(vc
, va
, vb
);
2674 /* HW_MTPR (PALcode) */
2675 #ifndef CONFIG_USER_ONLY
2676 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2677 vb
= load_gpr(ctx
, rb
);
2678 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2685 /* HW_RET (PALcode) */
2686 #ifndef CONFIG_USER_ONLY
2687 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2689 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2690 address from EXC_ADDR. This turns out to be useful for our
2691 emulation PALcode, so continue to accept it. */
2692 ctx
->lit
= vb
= tcg_temp_new();
2693 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2695 vb
= load_gpr(ctx
, rb
);
2697 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2698 tmp
= tcg_temp_new();
2699 tcg_gen_movi_i64(tmp
, 0);
2700 st_flag_byte(tmp
, ENV_FLAG_RX_SHIFT
);
2701 tcg_gen_andi_i64(tmp
, vb
, 1);
2702 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2704 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2705 /* Allow interrupts to be recognized right away. */
2706 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2713 /* HW_ST (PALcode) */
2714 #ifndef CONFIG_USER_ONLY
2715 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2717 switch ((insn
>> 12) & 0xF) {
2719 /* Longword physical access */
2720 va
= load_gpr(ctx
, ra
);
2721 vb
= load_gpr(ctx
, rb
);
2722 tmp
= tcg_temp_new();
2723 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2724 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2728 /* Quadword physical access */
2729 va
= load_gpr(ctx
, ra
);
2730 vb
= load_gpr(ctx
, rb
);
2731 tmp
= tcg_temp_new();
2732 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2733 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2737 /* Longword physical access with lock */
2738 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2739 MMU_PHYS_IDX
, MO_LESL
);
2742 /* Quadword physical access with lock */
2743 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2744 MMU_PHYS_IDX
, MO_LEQ
);
2747 /* Longword virtual access */
2750 /* Quadword virtual access */
2771 /* Longword virtual access with alternate access mode */
2774 /* Quadword virtual access with alternate access mode */
2790 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2794 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2798 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2802 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2806 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2810 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2814 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2818 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2822 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2826 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2830 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2834 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2838 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2842 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2846 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2847 ctx
->mem_idx
, MO_LESL
);
2851 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2852 ctx
->mem_idx
, MO_LEQ
);
2856 ret
= gen_bdirect(ctx
, ra
, disp21
);
2858 case 0x31: /* FBEQ */
2859 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2861 case 0x32: /* FBLT */
2862 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2864 case 0x33: /* FBLE */
2865 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2869 ret
= gen_bdirect(ctx
, ra
, disp21
);
2871 case 0x35: /* FBNE */
2872 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2874 case 0x36: /* FBGE */
2875 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2877 case 0x37: /* FBGT */
2878 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2882 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2886 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2890 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2894 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2898 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2902 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2906 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2910 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2913 ret
= gen_invalid(ctx
);
2920 static void alpha_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
2922 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2923 CPUAlphaState
*env
= cpu
->env_ptr
;
2924 int64_t bound
, mask
;
2926 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2927 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2928 ctx
->implver
= env
->implver
;
2929 ctx
->amask
= env
->amask
;
2931 #ifdef CONFIG_USER_ONLY
2932 ctx
->ir
= cpu_std_ir
;
2934 ctx
->palbr
= env
->palbr
;
2935 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2938 /* ??? Every TB begins with unset rounding mode, to be initialized on
2939 the first fp insn of the TB. Alternately we could define a proper
2940 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2941 to reset the FP_STATUS to that default at the end of any TB that
2942 changes the default. We could even (gasp) dynamiclly figure out
2943 what default would be most efficient given the running program. */
2945 /* Similarly for flush-to-zero. */
2952 /* Bound the number of insns to execute to those left on the page. */
2953 if (in_superpage(ctx
, ctx
->base
.pc_first
)) {
2956 mask
= TARGET_PAGE_MASK
;
2958 bound
= -(ctx
->base
.pc_first
| mask
) / 4;
2959 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
2962 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2966 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2968 tcg_gen_insn_start(dcbase
->pc_next
);
2971 static bool alpha_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
2972 const CPUBreakpoint
*bp
)
2974 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2976 ctx
->base
.is_jmp
= gen_excp(ctx
, EXCP_DEBUG
, 0);
2978 /* The address covered by the breakpoint must be included in
2979 [tb->pc, tb->pc + tb->size) in order to for it to be
2980 properly cleared -- thus we increment the PC here so that
2981 the logic setting tb->size below does the right thing. */
2982 ctx
->base
.pc_next
+= 4;
2986 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2988 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2989 CPUAlphaState
*env
= cpu
->env_ptr
;
2990 uint32_t insn
= translator_ldl(env
, ctx
->base
.pc_next
);
2992 ctx
->base
.pc_next
+= 4;
2993 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
2995 free_context_temps(ctx
);
2996 translator_loop_temp_check(&ctx
->base
);
2999 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
3001 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
3003 switch (ctx
->base
.is_jmp
) {
3004 case DISAS_NORETURN
:
3006 case DISAS_TOO_MANY
:
3007 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
3009 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3010 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
3013 case DISAS_PC_STALE
:
3014 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3016 case DISAS_PC_UPDATED
:
3017 if (!use_exit_tb(ctx
)) {
3018 tcg_gen_lookup_and_goto_ptr();
3022 case DISAS_PC_UPDATED_NOCHAIN
:
3023 if (ctx
->base
.singlestep_enabled
) {
3024 gen_excp_1(EXCP_DEBUG
, 0);
3026 tcg_gen_exit_tb(NULL
, 0);
3030 g_assert_not_reached();
3034 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
3036 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
3037 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
3040 static const TranslatorOps alpha_tr_ops
= {
3041 .init_disas_context
= alpha_tr_init_disas_context
,
3042 .tb_start
= alpha_tr_tb_start
,
3043 .insn_start
= alpha_tr_insn_start
,
3044 .breakpoint_check
= alpha_tr_breakpoint_check
,
3045 .translate_insn
= alpha_tr_translate_insn
,
3046 .tb_stop
= alpha_tr_tb_stop
,
3047 .disas_log
= alpha_tr_disas_log
,
3050 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
3053 translator_loop(&alpha_tr_ops
, &dc
.base
, cpu
, tb
, max_insns
);
3056 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,