2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/translator.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 DisasContextBase base
;
48 #ifndef CONFIG_USER_ONLY
54 /* implver and amask values for this CPU. */
58 /* Current rounding mode for this TB. */
60 /* Current flush-to-zero setting for this TB. */
63 /* The set of registers active in the current context. */
66 /* Temporaries for $31 and $f31 as source and destination. */
69 /* Temporary for immediate constants. */
73 /* Target-specific return values from translate_one, indicating the
74 state of the TB. Note that DISAS_NEXT indicates that we are not
76 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
77 #define DISAS_PC_UPDATED DISAS_TARGET_1
78 #define DISAS_PC_STALE DISAS_TARGET_2
80 /* global register indexes */
81 static TCGv cpu_std_ir
[31];
82 static TCGv cpu_fir
[31];
84 static TCGv cpu_lock_addr
;
85 static TCGv cpu_lock_value
;
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_pal_ir
[31];
91 #include "exec/gen-icount.h"
93 void alpha_translate_init(void)
95 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
97 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
98 static const GlobalVar vars
[] = {
106 /* Use the symbolic register names that match the disassembler. */
107 static const char greg_names
[31][4] = {
108 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
109 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
110 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
111 "t10", "t11", "ra", "t12", "at", "gp", "sp"
113 static const char freg_names
[31][4] = {
114 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
115 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
116 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
117 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
119 #ifndef CONFIG_USER_ONLY
120 static const char shadow_names
[8][8] = {
121 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
122 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
128 for (i
= 0; i
< 31; i
++) {
129 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
130 offsetof(CPUAlphaState
, ir
[i
]),
134 for (i
= 0; i
< 31; i
++) {
135 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
136 offsetof(CPUAlphaState
, fir
[i
]),
140 #ifndef CONFIG_USER_ONLY
141 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
142 for (i
= 0; i
< 8; i
++) {
143 int r
= (i
== 7 ? 25 : i
+ 8);
144 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
145 offsetof(CPUAlphaState
,
151 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
152 const GlobalVar
*v
= &vars
[i
];
153 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
157 static TCGv
load_zero(DisasContext
*ctx
)
160 ctx
->zero
= tcg_const_i64(0);
165 static TCGv
dest_sink(DisasContext
*ctx
)
168 ctx
->sink
= tcg_temp_new();
173 static void free_context_temps(DisasContext
*ctx
)
176 tcg_gen_discard_i64(ctx
->sink
);
177 tcg_temp_free(ctx
->sink
);
181 tcg_temp_free(ctx
->zero
);
185 tcg_temp_free(ctx
->lit
);
190 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
192 if (likely(reg
< 31)) {
195 return load_zero(ctx
);
199 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
200 uint8_t lit
, bool islit
)
203 ctx
->lit
= tcg_const_i64(lit
);
205 } else if (likely(reg
< 31)) {
208 return load_zero(ctx
);
212 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
214 if (likely(reg
< 31)) {
217 return dest_sink(ctx
);
221 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
223 if (likely(reg
< 31)) {
226 return load_zero(ctx
);
230 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
232 if (likely(reg
< 31)) {
235 return dest_sink(ctx
);
239 static int get_flag_ofs(unsigned shift
)
241 int ofs
= offsetof(CPUAlphaState
, flags
);
242 #ifdef HOST_WORDS_BIGENDIAN
243 ofs
+= 3 - (shift
/ 8);
250 static void ld_flag_byte(TCGv val
, unsigned shift
)
252 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
255 static void st_flag_byte(TCGv val
, unsigned shift
)
257 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
260 static void gen_excp_1(int exception
, int error_code
)
264 tmp1
= tcg_const_i32(exception
);
265 tmp2
= tcg_const_i32(error_code
);
266 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
267 tcg_temp_free_i32(tmp2
);
268 tcg_temp_free_i32(tmp1
);
271 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
273 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
274 gen_excp_1(exception
, error_code
);
275 return DISAS_NORETURN
;
278 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
280 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
283 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
285 TCGv_i32 tmp32
= tcg_temp_new_i32();
286 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
287 gen_helper_memory_to_f(t0
, tmp32
);
288 tcg_temp_free_i32(tmp32
);
291 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
293 TCGv tmp
= tcg_temp_new();
294 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
295 gen_helper_memory_to_g(t0
, tmp
);
299 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
301 TCGv_i32 tmp32
= tcg_temp_new_i32();
302 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
303 gen_helper_memory_to_s(t0
, tmp32
);
304 tcg_temp_free_i32(tmp32
);
307 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
309 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
310 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
311 tcg_gen_mov_i64(cpu_lock_value
, t0
);
314 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
316 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
317 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
318 tcg_gen_mov_i64(cpu_lock_value
, t0
);
321 static inline void gen_load_mem(DisasContext
*ctx
,
322 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
324 int ra
, int rb
, int32_t disp16
, bool fp
,
329 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
330 prefetches, which we can treat as nops. No worries about
331 missed exceptions here. */
332 if (unlikely(ra
== 31)) {
336 tmp
= tcg_temp_new();
337 addr
= load_gpr(ctx
, rb
);
340 tcg_gen_addi_i64(tmp
, addr
, disp16
);
344 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
348 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
349 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
354 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
356 TCGv_i32 tmp32
= tcg_temp_new_i32();
357 gen_helper_f_to_memory(tmp32
, t0
);
358 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
359 tcg_temp_free_i32(tmp32
);
362 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
364 TCGv tmp
= tcg_temp_new();
365 gen_helper_g_to_memory(tmp
, t0
);
366 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
370 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
372 TCGv_i32 tmp32
= tcg_temp_new_i32();
373 gen_helper_s_to_memory(tmp32
, t0
);
374 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
375 tcg_temp_free_i32(tmp32
);
378 static inline void gen_store_mem(DisasContext
*ctx
,
379 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
381 int ra
, int rb
, int32_t disp16
, bool fp
,
386 tmp
= tcg_temp_new();
387 addr
= load_gpr(ctx
, rb
);
390 tcg_gen_addi_i64(tmp
, addr
, disp16
);
394 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
398 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
399 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
404 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
405 int32_t disp16
, int mem_idx
,
408 TCGLabel
*lab_fail
, *lab_done
;
411 addr
= tcg_temp_new_i64();
412 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
413 free_context_temps(ctx
);
415 lab_fail
= gen_new_label();
416 lab_done
= gen_new_label();
417 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
418 tcg_temp_free_i64(addr
);
420 val
= tcg_temp_new_i64();
421 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
422 load_gpr(ctx
, ra
), mem_idx
, op
);
423 free_context_temps(ctx
);
426 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
428 tcg_temp_free_i64(val
);
429 tcg_gen_br(lab_done
);
431 gen_set_label(lab_fail
);
433 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
436 gen_set_label(lab_done
);
437 tcg_gen_movi_i64(cpu_lock_addr
, -1);
441 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
443 #ifndef CONFIG_USER_ONLY
444 return ((ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0
445 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
446 && ((addr
>> 41) & 3) == 2);
452 static bool use_exit_tb(DisasContext
*ctx
)
454 return ((tb_cflags(ctx
->base
.tb
) & CF_LAST_IO
)
455 || ctx
->base
.singlestep_enabled
459 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
461 /* Suppress goto_tb in the case of single-steping and IO. */
462 if (unlikely(use_exit_tb(ctx
))) {
465 #ifndef CONFIG_USER_ONLY
466 /* If the destination is in the superpage, the page perms can't change. */
467 if (in_superpage(ctx
, dest
)) {
470 /* Check for the dest on the same page as the start of the TB. */
471 return ((ctx
->base
.tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
477 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
479 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
482 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
485 /* Notice branch-to-next; used to initialize RA with the PC. */
488 } else if (use_goto_tb(ctx
, dest
)) {
490 tcg_gen_movi_i64(cpu_pc
, dest
);
491 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
492 return DISAS_NORETURN
;
494 tcg_gen_movi_i64(cpu_pc
, dest
);
495 return DISAS_PC_UPDATED
;
499 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
500 TCGv cmp
, int32_t disp
)
502 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
503 TCGLabel
*lab_true
= gen_new_label();
505 if (use_goto_tb(ctx
, dest
)) {
506 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
509 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
510 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
512 gen_set_label(lab_true
);
514 tcg_gen_movi_i64(cpu_pc
, dest
);
515 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
+ 1);
517 return DISAS_NORETURN
;
519 TCGv_i64 z
= tcg_const_i64(0);
520 TCGv_i64 d
= tcg_const_i64(dest
);
521 TCGv_i64 p
= tcg_const_i64(ctx
->base
.pc_next
);
523 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
525 tcg_temp_free_i64(z
);
526 tcg_temp_free_i64(d
);
527 tcg_temp_free_i64(p
);
528 return DISAS_PC_UPDATED
;
532 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
533 int32_t disp
, int mask
)
536 TCGv tmp
= tcg_temp_new();
539 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
540 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
544 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
547 /* Fold -0.0 for comparison with COND. */
549 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
551 uint64_t mzero
= 1ull << 63;
556 /* For <= or >, the -0.0 value directly compares the way we want. */
557 tcg_gen_mov_i64(dest
, src
);
562 /* For == or !=, we can simply mask off the sign bit and compare. */
563 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
568 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
569 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
570 tcg_gen_neg_i64(dest
, dest
);
571 tcg_gen_and_i64(dest
, dest
, src
);
579 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
582 TCGv cmp_tmp
= tcg_temp_new();
585 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
586 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
587 tcg_temp_free(cmp_tmp
);
591 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
596 vb
= load_fpr(ctx
, rb
);
598 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
600 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
605 #define QUAL_RM_N 0x080 /* Round mode nearest even */
606 #define QUAL_RM_C 0x000 /* Round mode chopped */
607 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
608 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
609 #define QUAL_RM_MASK 0x0c0
611 #define QUAL_U 0x100 /* Underflow enable (fp output) */
612 #define QUAL_V 0x100 /* Overflow enable (int output) */
613 #define QUAL_S 0x400 /* Software completion enable */
614 #define QUAL_I 0x200 /* Inexact detection enable */
616 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
620 fn11
&= QUAL_RM_MASK
;
621 if (fn11
== ctx
->tb_rm
) {
626 tmp
= tcg_temp_new_i32();
629 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
632 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
635 tcg_gen_movi_i32(tmp
, float_round_down
);
638 tcg_gen_ld8u_i32(tmp
, cpu_env
,
639 offsetof(CPUAlphaState
, fpcr_dyn_round
));
643 #if defined(CONFIG_SOFTFLOAT_INLINE)
644 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
645 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
646 sets the one field. */
647 tcg_gen_st8_i32(tmp
, cpu_env
,
648 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
650 gen_helper_setroundmode(tmp
);
653 tcg_temp_free_i32(tmp
);
656 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
661 if (fn11
== ctx
->tb_ftz
) {
666 tmp
= tcg_temp_new_i32();
668 /* Underflow is enabled, use the FPCR setting. */
669 tcg_gen_ld8u_i32(tmp
, cpu_env
,
670 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
672 /* Underflow is disabled, force flush-to-zero. */
673 tcg_gen_movi_i32(tmp
, 1);
676 #if defined(CONFIG_SOFTFLOAT_INLINE)
677 tcg_gen_st8_i32(tmp
, cpu_env
,
678 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
680 gen_helper_setflushzero(tmp
);
683 tcg_temp_free_i32(tmp
);
686 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
690 if (unlikely(reg
== 31)) {
691 val
= load_zero(ctx
);
694 if ((fn11
& QUAL_S
) == 0) {
696 gen_helper_ieee_input_cmp(cpu_env
, val
);
698 gen_helper_ieee_input(cpu_env
, val
);
701 #ifndef CONFIG_USER_ONLY
702 /* In system mode, raise exceptions for denormals like real
703 hardware. In user mode, proceed as if the OS completion
704 handler is handling the denormal as per spec. */
705 gen_helper_ieee_input_s(cpu_env
, val
);
712 static void gen_fp_exc_raise(int rc
, int fn11
)
714 /* ??? We ought to be able to do something with imprecise exceptions.
715 E.g. notice we're still in the trap shadow of something within the
716 TB and do not generate the code to signal the exception; end the TB
717 when an exception is forced to arrive, either by consumption of a
718 register value or TRAPB or EXCB. */
722 if (!(fn11
& QUAL_U
)) {
723 /* Note that QUAL_U == QUAL_V, so ignore either. */
724 ignore
|= FPCR_UNF
| FPCR_IOV
;
726 if (!(fn11
& QUAL_I
)) {
729 ign
= tcg_const_i32(ignore
);
731 /* ??? Pass in the regno of the destination so that the helper can
732 set EXC_MASK, which contains a bitmask of destination registers
733 that have caused arithmetic traps. A simple userspace emulation
734 does not require this. We do need it for a guest kernel's entArith,
735 or if we were to do something clever with imprecise exceptions. */
736 reg
= tcg_const_i32(rc
+ 32);
738 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
740 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
743 tcg_temp_free_i32(reg
);
744 tcg_temp_free_i32(ign
);
747 static void gen_cvtlq(TCGv vc
, TCGv vb
)
749 TCGv tmp
= tcg_temp_new();
751 /* The arithmetic right shift here, plus the sign-extended mask below
752 yields a sign-extended result without an explicit ext32s_i64. */
753 tcg_gen_shri_i64(tmp
, vb
, 29);
754 tcg_gen_sari_i64(vc
, vb
, 32);
755 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
760 static void gen_ieee_arith2(DisasContext
*ctx
,
761 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
762 int rb
, int rc
, int fn11
)
766 gen_qual_roundmode(ctx
, fn11
);
767 gen_qual_flushzero(ctx
, fn11
);
769 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
770 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
772 gen_fp_exc_raise(rc
, fn11
);
775 #define IEEE_ARITH2(name) \
776 static inline void glue(gen_, name)(DisasContext *ctx, \
777 int rb, int rc, int fn11) \
779 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
786 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
790 /* No need to set flushzero, since we have an integer output. */
791 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
792 vc
= dest_fpr(ctx
, rc
);
794 /* Almost all integer conversions use cropped rounding;
795 special case that. */
796 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
797 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
799 gen_qual_roundmode(ctx
, fn11
);
800 gen_helper_cvttq(vc
, cpu_env
, vb
);
802 gen_fp_exc_raise(rc
, fn11
);
805 static void gen_ieee_intcvt(DisasContext
*ctx
,
806 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
807 int rb
, int rc
, int fn11
)
811 gen_qual_roundmode(ctx
, fn11
);
812 vb
= load_fpr(ctx
, rb
);
813 vc
= dest_fpr(ctx
, rc
);
815 /* The only exception that can be raised by integer conversion
816 is inexact. Thus we only need to worry about exceptions when
817 inexact handling is requested. */
819 helper(vc
, cpu_env
, vb
);
820 gen_fp_exc_raise(rc
, fn11
);
822 helper(vc
, cpu_env
, vb
);
826 #define IEEE_INTCVT(name) \
827 static inline void glue(gen_, name)(DisasContext *ctx, \
828 int rb, int rc, int fn11) \
830 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
835 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
837 TCGv vmask
= tcg_const_i64(mask
);
838 TCGv tmp
= tcg_temp_new_i64();
841 tcg_gen_andc_i64(tmp
, vmask
, va
);
843 tcg_gen_and_i64(tmp
, va
, vmask
);
846 tcg_gen_andc_i64(vc
, vb
, vmask
);
847 tcg_gen_or_i64(vc
, vc
, tmp
);
849 tcg_temp_free(vmask
);
853 static void gen_ieee_arith3(DisasContext
*ctx
,
854 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
855 int ra
, int rb
, int rc
, int fn11
)
859 gen_qual_roundmode(ctx
, fn11
);
860 gen_qual_flushzero(ctx
, fn11
);
862 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
863 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
864 vc
= dest_fpr(ctx
, rc
);
865 helper(vc
, cpu_env
, va
, vb
);
867 gen_fp_exc_raise(rc
, fn11
);
870 #define IEEE_ARITH3(name) \
871 static inline void glue(gen_, name)(DisasContext *ctx, \
872 int ra, int rb, int rc, int fn11) \
874 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
885 static void gen_ieee_compare(DisasContext
*ctx
,
886 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
887 int ra
, int rb
, int rc
, int fn11
)
891 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
892 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
893 vc
= dest_fpr(ctx
, rc
);
894 helper(vc
, cpu_env
, va
, vb
);
896 gen_fp_exc_raise(rc
, fn11
);
899 #define IEEE_CMP3(name) \
900 static inline void glue(gen_, name)(DisasContext *ctx, \
901 int ra, int rb, int rc, int fn11) \
903 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
910 static inline uint64_t zapnot_mask(uint8_t lit
)
915 for (i
= 0; i
< 8; ++i
) {
916 if ((lit
>> i
) & 1) {
917 mask
|= 0xffull
<< (i
* 8);
923 /* Implement zapnot with an immediate operand, which expands to some
924 form of immediate AND. This is a basic building block in the
925 definition of many of the other byte manipulation instructions. */
926 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
930 tcg_gen_movi_i64(dest
, 0);
933 tcg_gen_ext8u_i64(dest
, src
);
936 tcg_gen_ext16u_i64(dest
, src
);
939 tcg_gen_ext32u_i64(dest
, src
);
942 tcg_gen_mov_i64(dest
, src
);
945 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
950 /* EXTWH, EXTLH, EXTQH */
951 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
952 uint8_t lit
, uint8_t byte_mask
)
955 int pos
= (64 - lit
* 8) & 0x3f;
956 int len
= cto32(byte_mask
) * 8;
958 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
960 tcg_gen_movi_i64(vc
, 0);
963 TCGv tmp
= tcg_temp_new();
964 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
965 tcg_gen_neg_i64(tmp
, tmp
);
966 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
967 tcg_gen_shl_i64(vc
, va
, tmp
);
970 gen_zapnoti(vc
, vc
, byte_mask
);
973 /* EXTBL, EXTWL, EXTLL, EXTQL */
974 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
975 uint8_t lit
, uint8_t byte_mask
)
978 int pos
= (lit
& 7) * 8;
979 int len
= cto32(byte_mask
) * 8;
980 if (pos
+ len
>= 64) {
983 tcg_gen_extract_i64(vc
, va
, pos
, len
);
985 TCGv tmp
= tcg_temp_new();
986 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
987 tcg_gen_shli_i64(tmp
, tmp
, 3);
988 tcg_gen_shr_i64(vc
, va
, tmp
);
990 gen_zapnoti(vc
, vc
, byte_mask
);
994 /* INSWH, INSLH, INSQH */
995 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
996 uint8_t lit
, uint8_t byte_mask
)
999 int pos
= 64 - (lit
& 7) * 8;
1000 int len
= cto32(byte_mask
) * 8;
1002 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
1004 tcg_gen_movi_i64(vc
, 0);
1007 TCGv tmp
= tcg_temp_new();
1008 TCGv shift
= tcg_temp_new();
1010 /* The instruction description has us left-shift the byte mask
1011 and extract bits <15:8> and apply that zap at the end. This
1012 is equivalent to simply performing the zap first and shifting
1014 gen_zapnoti(tmp
, va
, byte_mask
);
1016 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1017 portably by splitting the shift into two parts: shift_count-1 and 1.
1018 Arrange for the -1 by using ones-complement instead of
1019 twos-complement in the negation: ~(B * 8) & 63. */
1021 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1022 tcg_gen_not_i64(shift
, shift
);
1023 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1025 tcg_gen_shr_i64(vc
, tmp
, shift
);
1026 tcg_gen_shri_i64(vc
, vc
, 1);
1027 tcg_temp_free(shift
);
1032 /* INSBL, INSWL, INSLL, INSQL */
1033 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1034 uint8_t lit
, uint8_t byte_mask
)
1037 int pos
= (lit
& 7) * 8;
1038 int len
= cto32(byte_mask
) * 8;
1039 if (pos
+ len
> 64) {
1042 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1044 TCGv tmp
= tcg_temp_new();
1045 TCGv shift
= tcg_temp_new();
1047 /* The instruction description has us left-shift the byte mask
1048 and extract bits <15:8> and apply that zap at the end. This
1049 is equivalent to simply performing the zap first and shifting
1051 gen_zapnoti(tmp
, va
, byte_mask
);
1053 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1054 tcg_gen_shli_i64(shift
, shift
, 3);
1055 tcg_gen_shl_i64(vc
, tmp
, shift
);
1056 tcg_temp_free(shift
);
1061 /* MSKWH, MSKLH, MSKQH */
1062 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1063 uint8_t lit
, uint8_t byte_mask
)
1066 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1068 TCGv shift
= tcg_temp_new();
1069 TCGv mask
= tcg_temp_new();
1071 /* The instruction description is as above, where the byte_mask
1072 is shifted left, and then we extract bits <15:8>. This can be
1073 emulated with a right-shift on the expanded byte mask. This
1074 requires extra care because for an input <2:0> == 0 we need a
1075 shift of 64 bits in order to generate a zero. This is done by
1076 splitting the shift into two parts, the variable shift - 1
1077 followed by a constant 1 shift. The code we expand below is
1078 equivalent to ~(B * 8) & 63. */
1080 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1081 tcg_gen_not_i64(shift
, shift
);
1082 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1083 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1084 tcg_gen_shr_i64(mask
, mask
, shift
);
1085 tcg_gen_shri_i64(mask
, mask
, 1);
1087 tcg_gen_andc_i64(vc
, va
, mask
);
1089 tcg_temp_free(mask
);
1090 tcg_temp_free(shift
);
1094 /* MSKBL, MSKWL, MSKLL, MSKQL */
1095 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1096 uint8_t lit
, uint8_t byte_mask
)
1099 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1101 TCGv shift
= tcg_temp_new();
1102 TCGv mask
= tcg_temp_new();
1104 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1105 tcg_gen_shli_i64(shift
, shift
, 3);
1106 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1107 tcg_gen_shl_i64(mask
, mask
, shift
);
1109 tcg_gen_andc_i64(vc
, va
, mask
);
1111 tcg_temp_free(mask
);
1112 tcg_temp_free(shift
);
1116 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1121 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1124 tmp
= tcg_const_i64(set
);
1125 st_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1129 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1131 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1132 to internal cpu registers. */
1134 /* Unprivileged PAL call */
1135 if (palcode
>= 0x80 && palcode
< 0xC0) {
1139 /* No-op inside QEMU. */
1143 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1144 offsetof(CPUAlphaState
, unique
));
1148 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1149 offsetof(CPUAlphaState
, unique
));
1158 #ifndef CONFIG_USER_ONLY
1159 /* Privileged PAL code */
1160 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1164 /* No-op inside QEMU. */
1168 /* No-op inside QEMU. */
1172 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1173 offsetof(CPUAlphaState
, vptptr
));
1177 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1178 offsetof(CPUAlphaState
, sysval
));
1182 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1183 offsetof(CPUAlphaState
, sysval
));
1188 /* Note that we already know we're in kernel mode, so we know
1189 that PS only contains the 3 IPL bits. */
1190 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1192 /* But make sure and store only the 3 IPL bits from the user. */
1194 TCGv tmp
= tcg_temp_new();
1195 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1196 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1200 /* Allow interrupts to be recognized right away. */
1201 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1202 return DISAS_PC_UPDATED_NOCHAIN
;
1206 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1211 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1212 offsetof(CPUAlphaState
, usp
));
1216 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1217 offsetof(CPUAlphaState
, usp
));
1221 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1222 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1228 TCGv_i32 tmp
= tcg_const_i32(1);
1229 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1230 offsetof(CPUState
, halted
));
1231 tcg_temp_free_i32(tmp
);
1233 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1234 return gen_excp(ctx
, EXCP_HALTED
, 0);
1243 return gen_invalid(ctx
);
1246 #ifdef CONFIG_USER_ONLY
1247 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1250 TCGv tmp
= tcg_temp_new();
1251 uint64_t exc_addr
= ctx
->base
.pc_next
;
1252 uint64_t entry
= ctx
->palbr
;
1254 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1257 tcg_gen_movi_i64(tmp
, 1);
1258 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1261 tcg_gen_movi_i64(tmp
, exc_addr
);
1262 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1265 entry
+= (palcode
& 0x80
1266 ? 0x2000 + (palcode
- 0x80) * 64
1267 : 0x1000 + palcode
* 64);
1269 /* Since the destination is running in PALmode, we don't really
1270 need the page permissions check. We'll see the existence of
1271 the page when we create the TB, and we'll flush all TBs if
1272 we change the PAL base register. */
1273 if (!use_exit_tb(ctx
)) {
1275 tcg_gen_movi_i64(cpu_pc
, entry
);
1276 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
1277 return DISAS_NORETURN
;
1279 tcg_gen_movi_i64(cpu_pc
, entry
);
1280 return DISAS_PC_UPDATED
;
1286 #ifndef CONFIG_USER_ONLY
1288 #define PR_LONG 0x200000
1290 static int cpu_pr_data(int pr
)
1293 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1294 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1295 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1296 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1297 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1298 case 7: return offsetof(CPUAlphaState
, palbr
);
1299 case 8: return offsetof(CPUAlphaState
, ptbr
);
1300 case 9: return offsetof(CPUAlphaState
, vptptr
);
1301 case 10: return offsetof(CPUAlphaState
, unique
);
1302 case 11: return offsetof(CPUAlphaState
, sysval
);
1303 case 12: return offsetof(CPUAlphaState
, usp
);
1306 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1309 return offsetof(CPUAlphaState
, alarm_expire
);
1314 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1316 void (*helper
)(TCGv
);
1321 /* Accessing the "non-shadow" general registers. */
1322 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1323 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1326 case 250: /* WALLTIME */
1327 helper
= gen_helper_get_walltime
;
1329 case 249: /* VMTIME */
1330 helper
= gen_helper_get_vmtime
;
1336 return DISAS_PC_STALE
;
1343 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1346 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1350 /* The basic registers are data only, and unknown registers
1351 are read-zero, write-ignore. */
1352 data
= cpu_pr_data(regno
);
1354 tcg_gen_movi_i64(va
, 0);
1355 } else if (data
& PR_LONG
) {
1356 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1358 tcg_gen_ld_i64(va
, cpu_env
, data
);
1366 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1373 gen_helper_tbia(cpu_env
);
1378 gen_helper_tbis(cpu_env
, vb
);
1384 TCGv_i32 tmp
= tcg_const_i32(1);
1385 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1386 offsetof(CPUState
, halted
));
1387 tcg_temp_free_i32(tmp
);
1389 return gen_excp(ctx
, EXCP_HALTED
, 0);
1393 gen_helper_halt(vb
);
1394 return DISAS_PC_STALE
;
1398 gen_helper_set_alarm(cpu_env
, vb
);
1403 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1404 /* Changing the PAL base register implies un-chaining all of the TBs
1405 that ended with a CALL_PAL. Since the base register usually only
1406 changes during boot, flushing everything works well. */
1407 gen_helper_tb_flush(cpu_env
);
1408 return DISAS_PC_STALE
;
1411 /* Accessing the "non-shadow" general registers. */
1412 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1413 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1417 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1420 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1424 /* The basic registers are data only, and unknown registers
1425 are read-zero, write-ignore. */
1426 data
= cpu_pr_data(regno
);
1428 if (data
& PR_LONG
) {
1429 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1431 tcg_gen_st_i64(vb
, cpu_env
, data
);
1439 #endif /* !USER_ONLY*/
1441 #define REQUIRE_NO_LIT \
1448 #define REQUIRE_AMASK(FLAG) \
1450 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1455 #define REQUIRE_TB_FLAG(FLAG) \
1457 if ((ctx->tbflags & (FLAG)) == 0) { \
1462 #define REQUIRE_REG_31(WHICH) \
1464 if (WHICH != 31) { \
1469 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1471 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1473 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1474 bool islit
, real_islit
;
1475 TCGv va
, vb
, vc
, tmp
, tmp2
;
1479 /* Decode all instruction fields */
1480 opc
= extract32(insn
, 26, 6);
1481 ra
= extract32(insn
, 21, 5);
1482 rb
= extract32(insn
, 16, 5);
1483 rc
= extract32(insn
, 0, 5);
1484 real_islit
= islit
= extract32(insn
, 12, 1);
1485 lit
= extract32(insn
, 13, 8);
1487 disp21
= sextract32(insn
, 0, 21);
1488 disp16
= sextract32(insn
, 0, 16);
1489 disp12
= sextract32(insn
, 0, 12);
1491 fn11
= extract32(insn
, 5, 11);
1492 fpfn
= extract32(insn
, 5, 6);
1493 fn7
= extract32(insn
, 5, 7);
1495 if (rb
== 31 && !islit
) {
1504 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1530 disp16
= (uint32_t)disp16
<< 16;
1534 va
= dest_gpr(ctx
, ra
);
1535 /* It's worth special-casing immediate loads. */
1537 tcg_gen_movi_i64(va
, disp16
);
1539 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1546 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1550 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1555 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1560 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1565 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1569 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1573 vc
= dest_gpr(ctx
, rc
);
1574 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1578 /* Special case ADDL as SEXTL. */
1579 tcg_gen_ext32s_i64(vc
, vb
);
1583 /* Special case SUBQ as NEGQ. */
1584 tcg_gen_neg_i64(vc
, vb
);
1589 va
= load_gpr(ctx
, ra
);
1593 tcg_gen_add_i64(vc
, va
, vb
);
1594 tcg_gen_ext32s_i64(vc
, vc
);
1598 tmp
= tcg_temp_new();
1599 tcg_gen_shli_i64(tmp
, va
, 2);
1600 tcg_gen_add_i64(tmp
, tmp
, vb
);
1601 tcg_gen_ext32s_i64(vc
, tmp
);
1606 tcg_gen_sub_i64(vc
, va
, vb
);
1607 tcg_gen_ext32s_i64(vc
, vc
);
1611 tmp
= tcg_temp_new();
1612 tcg_gen_shli_i64(tmp
, va
, 2);
1613 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1614 tcg_gen_ext32s_i64(vc
, tmp
);
1620 /* Special case 0 >= X as X == 0. */
1621 gen_helper_cmpbe0(vc
, vb
);
1623 gen_helper_cmpbge(vc
, va
, vb
);
1628 tmp
= tcg_temp_new();
1629 tcg_gen_shli_i64(tmp
, va
, 3);
1630 tcg_gen_add_i64(tmp
, tmp
, vb
);
1631 tcg_gen_ext32s_i64(vc
, tmp
);
1636 tmp
= tcg_temp_new();
1637 tcg_gen_shli_i64(tmp
, va
, 3);
1638 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1639 tcg_gen_ext32s_i64(vc
, tmp
);
1644 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1648 tcg_gen_add_i64(vc
, va
, vb
);
1652 tmp
= tcg_temp_new();
1653 tcg_gen_shli_i64(tmp
, va
, 2);
1654 tcg_gen_add_i64(vc
, tmp
, vb
);
1659 tcg_gen_sub_i64(vc
, va
, vb
);
1663 tmp
= tcg_temp_new();
1664 tcg_gen_shli_i64(tmp
, va
, 2);
1665 tcg_gen_sub_i64(vc
, tmp
, vb
);
1670 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1674 tmp
= tcg_temp_new();
1675 tcg_gen_shli_i64(tmp
, va
, 3);
1676 tcg_gen_add_i64(vc
, tmp
, vb
);
1681 tmp
= tcg_temp_new();
1682 tcg_gen_shli_i64(tmp
, va
, 3);
1683 tcg_gen_sub_i64(vc
, tmp
, vb
);
1688 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1692 tmp
= tcg_temp_new();
1693 tcg_gen_ext32s_i64(tmp
, va
);
1694 tcg_gen_ext32s_i64(vc
, vb
);
1695 tcg_gen_add_i64(tmp
, tmp
, vc
);
1696 tcg_gen_ext32s_i64(vc
, tmp
);
1697 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1702 tmp
= tcg_temp_new();
1703 tcg_gen_ext32s_i64(tmp
, va
);
1704 tcg_gen_ext32s_i64(vc
, vb
);
1705 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1706 tcg_gen_ext32s_i64(vc
, tmp
);
1707 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1712 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1716 tmp
= tcg_temp_new();
1717 tmp2
= tcg_temp_new();
1718 tcg_gen_eqv_i64(tmp
, va
, vb
);
1719 tcg_gen_mov_i64(tmp2
, va
);
1720 tcg_gen_add_i64(vc
, va
, vb
);
1721 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1722 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1723 tcg_gen_shri_i64(tmp
, tmp
, 63);
1724 tcg_gen_movi_i64(tmp2
, 0);
1725 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1727 tcg_temp_free(tmp2
);
1731 tmp
= tcg_temp_new();
1732 tmp2
= tcg_temp_new();
1733 tcg_gen_xor_i64(tmp
, va
, vb
);
1734 tcg_gen_mov_i64(tmp2
, va
);
1735 tcg_gen_sub_i64(vc
, va
, vb
);
1736 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1737 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1738 tcg_gen_shri_i64(tmp
, tmp
, 63);
1739 tcg_gen_movi_i64(tmp2
, 0);
1740 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1742 tcg_temp_free(tmp2
);
1746 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1756 /* Special case BIS as NOP. */
1760 /* Special case BIS as MOV. */
1761 vc
= dest_gpr(ctx
, rc
);
1763 tcg_gen_movi_i64(vc
, lit
);
1765 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1771 vc
= dest_gpr(ctx
, rc
);
1772 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1774 if (fn7
== 0x28 && ra
== 31) {
1775 /* Special case ORNOT as NOT. */
1776 tcg_gen_not_i64(vc
, vb
);
1780 va
= load_gpr(ctx
, ra
);
1784 tcg_gen_and_i64(vc
, va
, vb
);
1788 tcg_gen_andc_i64(vc
, va
, vb
);
1792 tmp
= tcg_temp_new();
1793 tcg_gen_andi_i64(tmp
, va
, 1);
1794 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1795 vb
, load_gpr(ctx
, rc
));
1800 tmp
= tcg_temp_new();
1801 tcg_gen_andi_i64(tmp
, va
, 1);
1802 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1803 vb
, load_gpr(ctx
, rc
));
1808 tcg_gen_or_i64(vc
, va
, vb
);
1812 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1813 vb
, load_gpr(ctx
, rc
));
1817 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1818 vb
, load_gpr(ctx
, rc
));
1822 tcg_gen_orc_i64(vc
, va
, vb
);
1826 tcg_gen_xor_i64(vc
, va
, vb
);
1830 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1831 vb
, load_gpr(ctx
, rc
));
1835 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1836 vb
, load_gpr(ctx
, rc
));
1840 tcg_gen_eqv_i64(vc
, va
, vb
);
1845 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1849 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1850 vb
, load_gpr(ctx
, rc
));
1854 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1855 vb
, load_gpr(ctx
, rc
));
1860 tcg_gen_movi_i64(vc
, ctx
->implver
);
1868 vc
= dest_gpr(ctx
, rc
);
1869 va
= load_gpr(ctx
, ra
);
1873 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1877 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1881 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1885 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1889 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1893 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1897 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1901 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1905 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1910 gen_zapnoti(vc
, va
, ~lit
);
1912 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1918 gen_zapnoti(vc
, va
, lit
);
1920 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1925 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1930 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1932 tmp
= tcg_temp_new();
1933 vb
= load_gpr(ctx
, rb
);
1934 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1935 tcg_gen_shr_i64(vc
, va
, tmp
);
1941 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1946 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1948 tmp
= tcg_temp_new();
1949 vb
= load_gpr(ctx
, rb
);
1950 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1951 tcg_gen_shl_i64(vc
, va
, tmp
);
1957 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1962 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1964 tmp
= tcg_temp_new();
1965 vb
= load_gpr(ctx
, rb
);
1966 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1967 tcg_gen_sar_i64(vc
, va
, tmp
);
1973 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1977 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1981 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1985 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1989 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1993 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1997 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2001 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2005 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2013 vc
= dest_gpr(ctx
, rc
);
2014 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2015 va
= load_gpr(ctx
, ra
);
2019 tcg_gen_mul_i64(vc
, va
, vb
);
2020 tcg_gen_ext32s_i64(vc
, vc
);
2024 tcg_gen_mul_i64(vc
, va
, vb
);
2028 tmp
= tcg_temp_new();
2029 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
2034 tmp
= tcg_temp_new();
2035 tcg_gen_ext32s_i64(tmp
, va
);
2036 tcg_gen_ext32s_i64(vc
, vb
);
2037 tcg_gen_mul_i64(tmp
, tmp
, vc
);
2038 tcg_gen_ext32s_i64(vc
, tmp
);
2039 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2044 tmp
= tcg_temp_new();
2045 tmp2
= tcg_temp_new();
2046 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2047 tcg_gen_sari_i64(tmp2
, vc
, 63);
2048 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2050 tcg_temp_free(tmp2
);
2059 vc
= dest_fpr(ctx
, rc
);
2060 switch (fpfn
) { /* fn11 & 0x3F */
2064 t32
= tcg_temp_new_i32();
2065 va
= load_gpr(ctx
, ra
);
2066 tcg_gen_extrl_i64_i32(t32
, va
);
2067 gen_helper_memory_to_s(vc
, t32
);
2068 tcg_temp_free_i32(t32
);
2073 vb
= load_fpr(ctx
, rb
);
2074 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2079 gen_sqrts(ctx
, rb
, rc
, fn11
);
2084 t32
= tcg_temp_new_i32();
2085 va
= load_gpr(ctx
, ra
);
2086 tcg_gen_extrl_i64_i32(t32
, va
);
2087 gen_helper_memory_to_f(vc
, t32
);
2088 tcg_temp_free_i32(t32
);
2093 va
= load_gpr(ctx
, ra
);
2094 tcg_gen_mov_i64(vc
, va
);
2099 vb
= load_fpr(ctx
, rb
);
2100 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2105 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2113 /* VAX floating point */
2114 /* XXX: rounding mode and trap are ignored (!) */
2115 vc
= dest_fpr(ctx
, rc
);
2116 vb
= load_fpr(ctx
, rb
);
2117 va
= load_fpr(ctx
, ra
);
2118 switch (fpfn
) { /* fn11 & 0x3F */
2121 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2125 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2129 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2133 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2141 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2145 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2149 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2153 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2157 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2161 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2165 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2170 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2179 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2184 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2189 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2197 /* IEEE floating-point */
2198 switch (fpfn
) { /* fn11 & 0x3F */
2201 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2205 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2209 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2213 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2217 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2221 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2225 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2229 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2233 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2237 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2241 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2245 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2249 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2251 gen_cvtst(ctx
, rb
, rc
, fn11
);
2254 gen_cvtts(ctx
, rb
, rc
, fn11
);
2260 gen_cvttq(ctx
, rb
, rc
, fn11
);
2265 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2270 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2282 vc
= dest_fpr(ctx
, rc
);
2283 vb
= load_fpr(ctx
, rb
);
2289 /* Special case CPYS as FNOP. */
2291 vc
= dest_fpr(ctx
, rc
);
2292 va
= load_fpr(ctx
, ra
);
2294 /* Special case CPYS as FMOV. */
2295 tcg_gen_mov_i64(vc
, va
);
2297 vb
= load_fpr(ctx
, rb
);
2298 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2304 vc
= dest_fpr(ctx
, rc
);
2305 vb
= load_fpr(ctx
, rb
);
2306 va
= load_fpr(ctx
, ra
);
2307 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2311 vc
= dest_fpr(ctx
, rc
);
2312 vb
= load_fpr(ctx
, rb
);
2313 va
= load_fpr(ctx
, ra
);
2314 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2318 va
= load_fpr(ctx
, ra
);
2319 gen_helper_store_fpcr(cpu_env
, va
);
2320 if (ctx
->tb_rm
== QUAL_RM_D
) {
2321 /* Re-do the copy of the rounding mode to fp_status
2322 the next time we use dynamic rounding. */
2328 va
= dest_fpr(ctx
, ra
);
2329 gen_helper_load_fpcr(va
, cpu_env
);
2333 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2337 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2341 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2345 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2349 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2353 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2355 case 0x030: /* CVTQL */
2356 case 0x130: /* CVTQL/V */
2357 case 0x530: /* CVTQL/SV */
2359 vc
= dest_fpr(ctx
, rc
);
2360 vb
= load_fpr(ctx
, rb
);
2361 gen_helper_cvtql(vc
, cpu_env
, vb
);
2362 gen_fp_exc_raise(rc
, fn11
);
2370 switch ((uint16_t)disp16
) {
2381 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2385 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2397 va
= dest_gpr(ctx
, ra
);
2398 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2400 gen_helper_load_pcc(va
, cpu_env
);
2402 ret
= DISAS_PC_STALE
;
2404 gen_helper_load_pcc(va
, cpu_env
);
2432 /* HW_MFPR (PALcode) */
2433 #ifndef CONFIG_USER_ONLY
2434 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2435 va
= dest_gpr(ctx
, ra
);
2436 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2443 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2444 prediction stack action, which of course we don't implement. */
2445 vb
= load_gpr(ctx
, rb
);
2446 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2448 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2450 ret
= DISAS_PC_UPDATED
;
2454 /* HW_LD (PALcode) */
2455 #ifndef CONFIG_USER_ONLY
2456 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2458 TCGv addr
= tcg_temp_new();
2459 vb
= load_gpr(ctx
, rb
);
2460 va
= dest_gpr(ctx
, ra
);
2462 tcg_gen_addi_i64(addr
, vb
, disp12
);
2463 switch ((insn
>> 12) & 0xF) {
2465 /* Longword physical access (hw_ldl/p) */
2466 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2469 /* Quadword physical access (hw_ldq/p) */
2470 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2473 /* Longword physical access with lock (hw_ldl_l/p) */
2474 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2477 /* Quadword physical access with lock (hw_ldq_l/p) */
2478 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2481 /* Longword virtual PTE fetch (hw_ldl/v) */
2484 /* Quadword virtual PTE fetch (hw_ldq/v) */
2494 /* Longword virtual access (hw_ldl) */
2497 /* Quadword virtual access (hw_ldq) */
2500 /* Longword virtual access with protection check (hw_ldl/w) */
2501 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2504 /* Quadword virtual access with protection check (hw_ldq/w) */
2505 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2508 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2511 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2514 /* Longword virtual access with alternate access mode and
2515 protection checks (hw_ldl/wa) */
2516 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2519 /* Quadword virtual access with alternate access mode and
2520 protection checks (hw_ldq/wa) */
2521 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2524 tcg_temp_free(addr
);
2532 vc
= dest_gpr(ctx
, rc
);
2537 va
= load_fpr(ctx
, ra
);
2538 tcg_gen_mov_i64(vc
, va
);
2540 } else if (fn7
== 0x78) {
2544 t32
= tcg_temp_new_i32();
2545 va
= load_fpr(ctx
, ra
);
2546 gen_helper_s_to_memory(t32
, va
);
2547 tcg_gen_ext_i32_i64(vc
, t32
);
2548 tcg_temp_free_i32(t32
);
2552 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2558 tcg_gen_ext8s_i64(vc
, vb
);
2564 tcg_gen_ext16s_i64(vc
, vb
);
2571 tcg_gen_ctpop_i64(vc
, vb
);
2577 va
= load_gpr(ctx
, ra
);
2578 gen_helper_perr(vc
, va
, vb
);
2585 tcg_gen_clzi_i64(vc
, vb
, 64);
2592 tcg_gen_ctzi_i64(vc
, vb
, 64);
2599 gen_helper_unpkbw(vc
, vb
);
2606 gen_helper_unpkbl(vc
, vb
);
2613 gen_helper_pkwb(vc
, vb
);
2620 gen_helper_pklb(vc
, vb
);
2625 va
= load_gpr(ctx
, ra
);
2626 gen_helper_minsb8(vc
, va
, vb
);
2631 va
= load_gpr(ctx
, ra
);
2632 gen_helper_minsw4(vc
, va
, vb
);
2637 va
= load_gpr(ctx
, ra
);
2638 gen_helper_minub8(vc
, va
, vb
);
2643 va
= load_gpr(ctx
, ra
);
2644 gen_helper_minuw4(vc
, va
, vb
);
2649 va
= load_gpr(ctx
, ra
);
2650 gen_helper_maxub8(vc
, va
, vb
);
2655 va
= load_gpr(ctx
, ra
);
2656 gen_helper_maxuw4(vc
, va
, vb
);
2661 va
= load_gpr(ctx
, ra
);
2662 gen_helper_maxsb8(vc
, va
, vb
);
2667 va
= load_gpr(ctx
, ra
);
2668 gen_helper_maxsw4(vc
, va
, vb
);
2676 /* HW_MTPR (PALcode) */
2677 #ifndef CONFIG_USER_ONLY
2678 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2679 vb
= load_gpr(ctx
, rb
);
2680 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2687 /* HW_RET (PALcode) */
2688 #ifndef CONFIG_USER_ONLY
2689 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2691 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2692 address from EXC_ADDR. This turns out to be useful for our
2693 emulation PALcode, so continue to accept it. */
2694 ctx
->lit
= vb
= tcg_temp_new();
2695 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2697 vb
= load_gpr(ctx
, rb
);
2699 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2700 tmp
= tcg_temp_new();
2701 tcg_gen_movi_i64(tmp
, 0);
2702 st_flag_byte(tmp
, ENV_FLAG_RX_SHIFT
);
2703 tcg_gen_andi_i64(tmp
, vb
, 1);
2704 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2706 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2707 /* Allow interrupts to be recognized right away. */
2708 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2715 /* HW_ST (PALcode) */
2716 #ifndef CONFIG_USER_ONLY
2717 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2719 switch ((insn
>> 12) & 0xF) {
2721 /* Longword physical access */
2722 va
= load_gpr(ctx
, ra
);
2723 vb
= load_gpr(ctx
, rb
);
2724 tmp
= tcg_temp_new();
2725 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2726 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2730 /* Quadword physical access */
2731 va
= load_gpr(ctx
, ra
);
2732 vb
= load_gpr(ctx
, rb
);
2733 tmp
= tcg_temp_new();
2734 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2735 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2739 /* Longword physical access with lock */
2740 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2741 MMU_PHYS_IDX
, MO_LESL
);
2744 /* Quadword physical access with lock */
2745 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2746 MMU_PHYS_IDX
, MO_LEQ
);
2749 /* Longword virtual access */
2752 /* Quadword virtual access */
2773 /* Longword virtual access with alternate access mode */
2776 /* Quadword virtual access with alternate access mode */
2792 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2796 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2800 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2804 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2808 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2812 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2816 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2820 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2824 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2828 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2832 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2836 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2840 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2844 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2848 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2849 ctx
->mem_idx
, MO_LESL
);
2853 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2854 ctx
->mem_idx
, MO_LEQ
);
2858 ret
= gen_bdirect(ctx
, ra
, disp21
);
2860 case 0x31: /* FBEQ */
2861 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2863 case 0x32: /* FBLT */
2864 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2866 case 0x33: /* FBLE */
2867 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2871 ret
= gen_bdirect(ctx
, ra
, disp21
);
2873 case 0x35: /* FBNE */
2874 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2876 case 0x36: /* FBGE */
2877 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2879 case 0x37: /* FBGT */
2880 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2884 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2888 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2892 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2896 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2900 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2904 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2908 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2912 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2915 ret
= gen_invalid(ctx
);
2922 static int alpha_tr_init_disas_context(DisasContextBase
*dcbase
,
2923 CPUState
*cpu
, int max_insns
)
2925 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2926 CPUAlphaState
*env
= cpu
->env_ptr
;
2927 int64_t bound
, mask
;
2929 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2930 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2931 ctx
->implver
= env
->implver
;
2932 ctx
->amask
= env
->amask
;
2934 #ifdef CONFIG_USER_ONLY
2935 ctx
->ir
= cpu_std_ir
;
2937 ctx
->palbr
= env
->palbr
;
2938 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2941 /* ??? Every TB begins with unset rounding mode, to be initialized on
2942 the first fp insn of the TB. Alternately we could define a proper
2943 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2944 to reset the FP_STATUS to that default at the end of any TB that
2945 changes the default. We could even (gasp) dynamiclly figure out
2946 what default would be most efficient given the running program. */
2948 /* Similarly for flush-to-zero. */
2955 /* Bound the number of insns to execute to those left on the page. */
2956 if (in_superpage(ctx
, ctx
->base
.pc_first
)) {
2959 mask
= TARGET_PAGE_MASK
;
2961 bound
= -(ctx
->base
.pc_first
| mask
) / 4;
2963 return MIN(max_insns
, bound
);
2966 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2970 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2972 tcg_gen_insn_start(dcbase
->pc_next
);
2975 static bool alpha_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
2976 const CPUBreakpoint
*bp
)
2978 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2980 ctx
->base
.is_jmp
= gen_excp(ctx
, EXCP_DEBUG
, 0);
2982 /* The address covered by the breakpoint must be included in
2983 [tb->pc, tb->pc + tb->size) in order to for it to be
2984 properly cleared -- thus we increment the PC here so that
2985 the logic setting tb->size below does the right thing. */
2986 ctx
->base
.pc_next
+= 4;
2990 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2992 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2993 CPUAlphaState
*env
= cpu
->env_ptr
;
2994 uint32_t insn
= cpu_ldl_code(env
, ctx
->base
.pc_next
);
2996 ctx
->base
.pc_next
+= 4;
2997 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
2999 free_context_temps(ctx
);
3000 translator_loop_temp_check(&ctx
->base
);
3003 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
3005 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
3007 switch (ctx
->base
.is_jmp
) {
3008 case DISAS_NORETURN
:
3010 case DISAS_TOO_MANY
:
3011 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
3013 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3014 tcg_gen_exit_tb((uintptr_t)ctx
->base
.tb
);
3017 case DISAS_PC_STALE
:
3018 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3020 case DISAS_PC_UPDATED
:
3021 if (!use_exit_tb(ctx
)) {
3022 tcg_gen_lookup_and_goto_ptr();
3026 case DISAS_PC_UPDATED_NOCHAIN
:
3027 if (ctx
->base
.singlestep_enabled
) {
3028 gen_excp_1(EXCP_DEBUG
, 0);
3034 g_assert_not_reached();
3038 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
3040 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
3041 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
3044 static const TranslatorOps alpha_tr_ops
= {
3045 .init_disas_context
= alpha_tr_init_disas_context
,
3046 .tb_start
= alpha_tr_tb_start
,
3047 .insn_start
= alpha_tr_insn_start
,
3048 .breakpoint_check
= alpha_tr_breakpoint_check
,
3049 .translate_insn
= alpha_tr_translate_insn
,
3050 .tb_stop
= alpha_tr_tb_stop
,
3051 .disas_log
= alpha_tr_disas_log
,
3054 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
)
3057 translator_loop(&alpha_tr_ops
, &dc
.base
, cpu
, tb
);
3060 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,