2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DISAS(...) do { } while (0)
45 typedef struct DisasContext DisasContext
;
47 DisasContextBase base
;
49 #ifdef CONFIG_USER_ONLY
57 /* implver and amask values for this CPU. */
61 /* Current rounding mode for this TB. */
63 /* Current flush-to-zero setting for this TB. */
66 /* The set of registers active in the current context. */
69 /* Temporaries for $31 and $f31 as source and destination. */
74 #ifdef CONFIG_USER_ONLY
75 #define UNALIGN(C) (C)->unalign
77 #define UNALIGN(C) MO_ALIGN
80 /* Target-specific return values from translate_one, indicating the
81 state of the TB. Note that DISAS_NEXT indicates that we are not
83 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
84 #define DISAS_PC_UPDATED DISAS_TARGET_1
85 #define DISAS_PC_STALE DISAS_TARGET_2
87 /* global register indexes */
88 static TCGv cpu_std_ir
[31];
89 static TCGv cpu_fir
[31];
91 static TCGv cpu_lock_addr
;
92 static TCGv cpu_lock_value
;
94 #ifndef CONFIG_USER_ONLY
95 static TCGv cpu_pal_ir
[31];
98 void alpha_translate_init(void)
100 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
102 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
103 static const GlobalVar vars
[] = {
111 /* Use the symbolic register names that match the disassembler. */
112 static const char greg_names
[31][4] = {
113 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
114 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
115 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
116 "t10", "t11", "ra", "t12", "at", "gp", "sp"
118 static const char freg_names
[31][4] = {
119 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
120 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
121 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
122 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
124 #ifndef CONFIG_USER_ONLY
125 static const char shadow_names
[8][8] = {
126 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
127 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
133 for (i
= 0; i
< 31; i
++) {
134 cpu_std_ir
[i
] = tcg_global_mem_new_i64(tcg_env
,
135 offsetof(CPUAlphaState
, ir
[i
]),
139 for (i
= 0; i
< 31; i
++) {
140 cpu_fir
[i
] = tcg_global_mem_new_i64(tcg_env
,
141 offsetof(CPUAlphaState
, fir
[i
]),
145 #ifndef CONFIG_USER_ONLY
146 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
147 for (i
= 0; i
< 8; i
++) {
148 int r
= (i
== 7 ? 25 : i
+ 8);
149 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(tcg_env
,
150 offsetof(CPUAlphaState
,
156 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
157 const GlobalVar
*v
= &vars
[i
];
158 *v
->var
= tcg_global_mem_new_i64(tcg_env
, v
->ofs
, v
->name
);
162 static TCGv
load_zero(DisasContext
*ctx
)
165 ctx
->zero
= tcg_constant_i64(0);
170 static TCGv
dest_sink(DisasContext
*ctx
)
173 ctx
->sink
= tcg_temp_new();
178 static void free_context_temps(DisasContext
*ctx
)
181 tcg_gen_discard_i64(ctx
->sink
);
186 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
188 if (likely(reg
< 31)) {
191 return load_zero(ctx
);
195 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
196 uint8_t lit
, bool islit
)
199 return tcg_constant_i64(lit
);
200 } else if (likely(reg
< 31)) {
203 return load_zero(ctx
);
207 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
209 if (likely(reg
< 31)) {
212 return dest_sink(ctx
);
216 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
218 if (likely(reg
< 31)) {
221 return load_zero(ctx
);
225 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
227 if (likely(reg
< 31)) {
230 return dest_sink(ctx
);
234 static int get_flag_ofs(unsigned shift
)
236 int ofs
= offsetof(CPUAlphaState
, flags
);
238 ofs
+= 3 - (shift
/ 8);
245 static void ld_flag_byte(TCGv val
, unsigned shift
)
247 tcg_gen_ld8u_i64(val
, tcg_env
, get_flag_ofs(shift
));
250 static void st_flag_byte(TCGv val
, unsigned shift
)
252 tcg_gen_st8_i64(val
, tcg_env
, get_flag_ofs(shift
));
255 static void gen_excp_1(int exception
, int error_code
)
259 tmp1
= tcg_constant_i32(exception
);
260 tmp2
= tcg_constant_i32(error_code
);
261 gen_helper_excp(tcg_env
, tmp1
, tmp2
);
264 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
266 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
267 gen_excp_1(exception
, error_code
);
268 return DISAS_NORETURN
;
271 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
273 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
276 static void gen_ldf(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
278 TCGv_i32 tmp32
= tcg_temp_new_i32();
279 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
280 gen_helper_memory_to_f(dest
, tmp32
);
283 static void gen_ldg(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
285 TCGv tmp
= tcg_temp_new();
286 tcg_gen_qemu_ld_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
287 gen_helper_memory_to_g(dest
, tmp
);
290 static void gen_lds(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
292 TCGv_i32 tmp32
= tcg_temp_new_i32();
293 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
294 gen_helper_memory_to_s(dest
, tmp32
);
297 static void gen_ldt(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
299 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
302 static void gen_load_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
303 void (*func
)(DisasContext
*, TCGv
, TCGv
))
305 /* Loads to $f31 are prefetches, which we can treat as nops. */
306 if (likely(ra
!= 31)) {
307 TCGv addr
= tcg_temp_new();
308 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
309 func(ctx
, cpu_fir
[ra
], addr
);
313 static void gen_load_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
314 MemOp op
, bool clear
, bool locked
)
318 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
319 prefetches, which we can treat as nops. No worries about
320 missed exceptions here. */
321 if (unlikely(ra
== 31)) {
325 addr
= tcg_temp_new();
326 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
328 tcg_gen_andi_i64(addr
, addr
, ~0x7);
329 } else if (!locked
) {
334 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, op
);
337 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
338 tcg_gen_mov_i64(cpu_lock_value
, dest
);
342 static void gen_stf(DisasContext
*ctx
, TCGv src
, TCGv addr
)
344 TCGv_i32 tmp32
= tcg_temp_new_i32();
345 gen_helper_f_to_memory(tmp32
, addr
);
346 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
349 static void gen_stg(DisasContext
*ctx
, TCGv src
, TCGv addr
)
351 TCGv tmp
= tcg_temp_new();
352 gen_helper_g_to_memory(tmp
, src
);
353 tcg_gen_qemu_st_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
356 static void gen_sts(DisasContext
*ctx
, TCGv src
, TCGv addr
)
358 TCGv_i32 tmp32
= tcg_temp_new_i32();
359 gen_helper_s_to_memory(tmp32
, src
);
360 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
363 static void gen_stt(DisasContext
*ctx
, TCGv src
, TCGv addr
)
365 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
368 static void gen_store_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
369 void (*func
)(DisasContext
*, TCGv
, TCGv
))
371 TCGv addr
= tcg_temp_new();
372 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
373 func(ctx
, load_fpr(ctx
, ra
), addr
);
376 static void gen_store_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
377 MemOp op
, bool clear
)
381 addr
= tcg_temp_new();
382 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
384 tcg_gen_andi_i64(addr
, addr
, ~0x7);
389 src
= load_gpr(ctx
, ra
);
390 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, op
);
393 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
394 int32_t disp16
, int mem_idx
,
397 TCGLabel
*lab_fail
, *lab_done
;
400 addr
= tcg_temp_new_i64();
401 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
402 free_context_temps(ctx
);
404 lab_fail
= gen_new_label();
405 lab_done
= gen_new_label();
406 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
408 val
= tcg_temp_new_i64();
409 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
410 load_gpr(ctx
, ra
), mem_idx
, op
);
411 free_context_temps(ctx
);
414 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
416 tcg_gen_br(lab_done
);
418 gen_set_label(lab_fail
);
420 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
423 gen_set_label(lab_done
);
424 tcg_gen_movi_i64(cpu_lock_addr
, -1);
428 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
430 return translator_use_goto_tb(&ctx
->base
, dest
);
433 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
435 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
438 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
441 /* Notice branch-to-next; used to initialize RA with the PC. */
444 } else if (use_goto_tb(ctx
, dest
)) {
446 tcg_gen_movi_i64(cpu_pc
, dest
);
447 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
448 return DISAS_NORETURN
;
450 tcg_gen_movi_i64(cpu_pc
, dest
);
451 return DISAS_PC_UPDATED
;
455 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
456 TCGv cmp
, uint64_t imm
, int32_t disp
)
458 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
459 TCGLabel
*lab_true
= gen_new_label();
461 if (use_goto_tb(ctx
, dest
)) {
462 tcg_gen_brcondi_i64(cond
, cmp
, imm
, lab_true
);
465 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
466 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
468 gen_set_label(lab_true
);
470 tcg_gen_movi_i64(cpu_pc
, dest
);
471 tcg_gen_exit_tb(ctx
->base
.tb
, 1);
473 return DISAS_NORETURN
;
475 TCGv_i64 i
= tcg_constant_i64(imm
);
476 TCGv_i64 d
= tcg_constant_i64(dest
);
477 TCGv_i64 p
= tcg_constant_i64(ctx
->base
.pc_next
);
479 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, i
, d
, p
);
480 return DISAS_PC_UPDATED
;
484 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
487 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
),
488 is_tst_cond(cond
), disp
);
491 /* Fold -0.0 for comparison with COND. */
493 static TCGv_i64
gen_fold_mzero(TCGCond
*pcond
, uint64_t *pimm
, TCGv_i64 src
)
501 /* For <= or >, the -0.0 value directly compares the way we want. */
506 /* For == or !=, we can compare without the sign bit. */
507 *pcond
= *pcond
== TCG_COND_EQ
? TCG_COND_TSTEQ
: TCG_COND_TSTNE
;
513 /* For >= or <, map -0.0 to +0.0. */
514 tmp
= tcg_temp_new_i64();
515 tcg_gen_movcond_i64(TCG_COND_EQ
, tmp
,
516 src
, tcg_constant_i64(INT64_MIN
),
517 tcg_constant_i64(0), src
);
521 g_assert_not_reached();
525 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
529 TCGv_i64 tmp
= gen_fold_mzero(&cond
, &imm
, load_fpr(ctx
, ra
));
530 return gen_bcond_internal(ctx
, cond
, tmp
, imm
, disp
);
533 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
536 TCGv_i64 tmp
= gen_fold_mzero(&cond
, &imm
, load_fpr(ctx
, ra
));
537 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
),
538 tmp
, tcg_constant_i64(imm
),
539 load_fpr(ctx
, rb
), load_fpr(ctx
, rc
));
542 #define QUAL_RM_N 0x080 /* Round mode nearest even */
543 #define QUAL_RM_C 0x000 /* Round mode chopped */
544 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
545 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
546 #define QUAL_RM_MASK 0x0c0
548 #define QUAL_U 0x100 /* Underflow enable (fp output) */
549 #define QUAL_V 0x100 /* Overflow enable (int output) */
550 #define QUAL_S 0x400 /* Software completion enable */
551 #define QUAL_I 0x200 /* Inexact detection enable */
553 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
557 fn11
&= QUAL_RM_MASK
;
558 if (fn11
== ctx
->tb_rm
) {
563 tmp
= tcg_temp_new_i32();
566 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
569 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
572 tcg_gen_movi_i32(tmp
, float_round_down
);
575 tcg_gen_ld8u_i32(tmp
, tcg_env
,
576 offsetof(CPUAlphaState
, fpcr_dyn_round
));
580 #if defined(CONFIG_SOFTFLOAT_INLINE)
581 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
582 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
583 sets the one field. */
584 tcg_gen_st8_i32(tmp
, tcg_env
,
585 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
587 gen_helper_setroundmode(tmp
);
591 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
596 if (fn11
== ctx
->tb_ftz
) {
601 tmp
= tcg_temp_new_i32();
603 /* Underflow is enabled, use the FPCR setting. */
604 tcg_gen_ld8u_i32(tmp
, tcg_env
,
605 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
607 /* Underflow is disabled, force flush-to-zero. */
608 tcg_gen_movi_i32(tmp
, 1);
611 #if defined(CONFIG_SOFTFLOAT_INLINE)
612 tcg_gen_st8_i32(tmp
, tcg_env
,
613 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
615 gen_helper_setflushzero(tmp
);
619 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
623 if (unlikely(reg
== 31)) {
624 val
= load_zero(ctx
);
627 if ((fn11
& QUAL_S
) == 0) {
629 gen_helper_ieee_input_cmp(tcg_env
, val
);
631 gen_helper_ieee_input(tcg_env
, val
);
634 #ifndef CONFIG_USER_ONLY
635 /* In system mode, raise exceptions for denormals like real
636 hardware. In user mode, proceed as if the OS completion
637 handler is handling the denormal as per spec. */
638 gen_helper_ieee_input_s(tcg_env
, val
);
645 static void gen_fp_exc_raise(int rc
, int fn11
)
647 /* ??? We ought to be able to do something with imprecise exceptions.
648 E.g. notice we're still in the trap shadow of something within the
649 TB and do not generate the code to signal the exception; end the TB
650 when an exception is forced to arrive, either by consumption of a
651 register value or TRAPB or EXCB. */
655 if (!(fn11
& QUAL_U
)) {
656 /* Note that QUAL_U == QUAL_V, so ignore either. */
657 ignore
|= FPCR_UNF
| FPCR_IOV
;
659 if (!(fn11
& QUAL_I
)) {
662 ign
= tcg_constant_i32(ignore
);
664 /* ??? Pass in the regno of the destination so that the helper can
665 set EXC_MASK, which contains a bitmask of destination registers
666 that have caused arithmetic traps. A simple userspace emulation
667 does not require this. We do need it for a guest kernel's entArith,
668 or if we were to do something clever with imprecise exceptions. */
669 reg
= tcg_constant_i32(rc
+ 32);
671 gen_helper_fp_exc_raise_s(tcg_env
, ign
, reg
);
673 gen_helper_fp_exc_raise(tcg_env
, ign
, reg
);
677 static void gen_cvtlq(TCGv vc
, TCGv vb
)
679 TCGv tmp
= tcg_temp_new();
681 /* The arithmetic right shift here, plus the sign-extended mask below
682 yields a sign-extended result without an explicit ext32s_i64. */
683 tcg_gen_shri_i64(tmp
, vb
, 29);
684 tcg_gen_sari_i64(vc
, vb
, 32);
685 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
688 static void gen_ieee_arith2(DisasContext
*ctx
,
689 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
690 int rb
, int rc
, int fn11
)
694 gen_qual_roundmode(ctx
, fn11
);
695 gen_qual_flushzero(ctx
, fn11
);
697 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
698 helper(dest_fpr(ctx
, rc
), tcg_env
, vb
);
700 gen_fp_exc_raise(rc
, fn11
);
703 #define IEEE_ARITH2(name) \
704 static inline void glue(gen_, name)(DisasContext *ctx, \
705 int rb, int rc, int fn11) \
707 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
714 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
718 /* No need to set flushzero, since we have an integer output. */
719 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
720 vc
= dest_fpr(ctx
, rc
);
722 /* Almost all integer conversions use cropped rounding;
723 special case that. */
724 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
725 gen_helper_cvttq_c(vc
, tcg_env
, vb
);
727 gen_qual_roundmode(ctx
, fn11
);
728 gen_helper_cvttq(vc
, tcg_env
, vb
);
730 gen_fp_exc_raise(rc
, fn11
);
733 static void gen_ieee_intcvt(DisasContext
*ctx
,
734 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
735 int rb
, int rc
, int fn11
)
739 gen_qual_roundmode(ctx
, fn11
);
740 vb
= load_fpr(ctx
, rb
);
741 vc
= dest_fpr(ctx
, rc
);
743 /* The only exception that can be raised by integer conversion
744 is inexact. Thus we only need to worry about exceptions when
745 inexact handling is requested. */
747 helper(vc
, tcg_env
, vb
);
748 gen_fp_exc_raise(rc
, fn11
);
750 helper(vc
, tcg_env
, vb
);
754 #define IEEE_INTCVT(name) \
755 static inline void glue(gen_, name)(DisasContext *ctx, \
756 int rb, int rc, int fn11) \
758 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
763 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
765 TCGv vmask
= tcg_constant_i64(mask
);
766 TCGv tmp
= tcg_temp_new_i64();
769 tcg_gen_andc_i64(tmp
, vmask
, va
);
771 tcg_gen_and_i64(tmp
, va
, vmask
);
774 tcg_gen_andc_i64(vc
, vb
, vmask
);
775 tcg_gen_or_i64(vc
, vc
, tmp
);
778 static void gen_ieee_arith3(DisasContext
*ctx
,
779 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
780 int ra
, int rb
, int rc
, int fn11
)
784 gen_qual_roundmode(ctx
, fn11
);
785 gen_qual_flushzero(ctx
, fn11
);
787 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
788 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
789 vc
= dest_fpr(ctx
, rc
);
790 helper(vc
, tcg_env
, va
, vb
);
792 gen_fp_exc_raise(rc
, fn11
);
795 #define IEEE_ARITH3(name) \
796 static inline void glue(gen_, name)(DisasContext *ctx, \
797 int ra, int rb, int rc, int fn11) \
799 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
810 static void gen_ieee_compare(DisasContext
*ctx
,
811 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
812 int ra
, int rb
, int rc
, int fn11
)
816 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
817 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
818 vc
= dest_fpr(ctx
, rc
);
819 helper(vc
, tcg_env
, va
, vb
);
821 gen_fp_exc_raise(rc
, fn11
);
824 #define IEEE_CMP3(name) \
825 static inline void glue(gen_, name)(DisasContext *ctx, \
826 int ra, int rb, int rc, int fn11) \
828 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
835 static inline uint64_t zapnot_mask(uint8_t lit
)
840 for (i
= 0; i
< 8; ++i
) {
841 if ((lit
>> i
) & 1) {
842 mask
|= 0xffull
<< (i
* 8);
848 /* Implement zapnot with an immediate operand, which expands to some
849 form of immediate AND. This is a basic building block in the
850 definition of many of the other byte manipulation instructions. */
851 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
855 tcg_gen_movi_i64(dest
, 0);
858 tcg_gen_ext8u_i64(dest
, src
);
861 tcg_gen_ext16u_i64(dest
, src
);
864 tcg_gen_ext32u_i64(dest
, src
);
867 tcg_gen_mov_i64(dest
, src
);
870 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
875 /* EXTWH, EXTLH, EXTQH */
876 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
877 uint8_t lit
, uint8_t byte_mask
)
880 int pos
= (64 - lit
* 8) & 0x3f;
881 int len
= cto32(byte_mask
) * 8;
883 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
885 tcg_gen_movi_i64(vc
, 0);
888 TCGv tmp
= tcg_temp_new();
889 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
890 tcg_gen_neg_i64(tmp
, tmp
);
891 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
892 tcg_gen_shl_i64(vc
, va
, tmp
);
894 gen_zapnoti(vc
, vc
, byte_mask
);
897 /* EXTBL, EXTWL, EXTLL, EXTQL */
898 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
899 uint8_t lit
, uint8_t byte_mask
)
902 int pos
= (lit
& 7) * 8;
903 int len
= cto32(byte_mask
) * 8;
904 if (pos
+ len
>= 64) {
907 tcg_gen_extract_i64(vc
, va
, pos
, len
);
909 TCGv tmp
= tcg_temp_new();
910 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
911 tcg_gen_shli_i64(tmp
, tmp
, 3);
912 tcg_gen_shr_i64(vc
, va
, tmp
);
913 gen_zapnoti(vc
, vc
, byte_mask
);
917 /* INSWH, INSLH, INSQH */
918 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
919 uint8_t lit
, uint8_t byte_mask
)
922 int pos
= 64 - (lit
& 7) * 8;
923 int len
= cto32(byte_mask
) * 8;
925 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
927 tcg_gen_movi_i64(vc
, 0);
930 TCGv tmp
= tcg_temp_new();
931 TCGv shift
= tcg_temp_new();
933 /* The instruction description has us left-shift the byte mask
934 and extract bits <15:8> and apply that zap at the end. This
935 is equivalent to simply performing the zap first and shifting
937 gen_zapnoti(tmp
, va
, byte_mask
);
939 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
940 portably by splitting the shift into two parts: shift_count-1 and 1.
941 Arrange for the -1 by using ones-complement instead of
942 twos-complement in the negation: ~(B * 8) & 63. */
944 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
945 tcg_gen_not_i64(shift
, shift
);
946 tcg_gen_andi_i64(shift
, shift
, 0x3f);
948 tcg_gen_shr_i64(vc
, tmp
, shift
);
949 tcg_gen_shri_i64(vc
, vc
, 1);
953 /* INSBL, INSWL, INSLL, INSQL */
954 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
955 uint8_t lit
, uint8_t byte_mask
)
958 int pos
= (lit
& 7) * 8;
959 int len
= cto32(byte_mask
) * 8;
960 if (pos
+ len
> 64) {
963 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
965 TCGv tmp
= tcg_temp_new();
966 TCGv shift
= tcg_temp_new();
968 /* The instruction description has us left-shift the byte mask
969 and extract bits <15:8> and apply that zap at the end. This
970 is equivalent to simply performing the zap first and shifting
972 gen_zapnoti(tmp
, va
, byte_mask
);
974 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
975 tcg_gen_shli_i64(shift
, shift
, 3);
976 tcg_gen_shl_i64(vc
, tmp
, shift
);
980 /* MSKWH, MSKLH, MSKQH */
981 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
982 uint8_t lit
, uint8_t byte_mask
)
985 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
987 TCGv shift
= tcg_temp_new();
988 TCGv mask
= tcg_temp_new();
990 /* The instruction description is as above, where the byte_mask
991 is shifted left, and then we extract bits <15:8>. This can be
992 emulated with a right-shift on the expanded byte mask. This
993 requires extra care because for an input <2:0> == 0 we need a
994 shift of 64 bits in order to generate a zero. This is done by
995 splitting the shift into two parts, the variable shift - 1
996 followed by a constant 1 shift. The code we expand below is
997 equivalent to ~(B * 8) & 63. */
999 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1000 tcg_gen_not_i64(shift
, shift
);
1001 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1002 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1003 tcg_gen_shr_i64(mask
, mask
, shift
);
1004 tcg_gen_shri_i64(mask
, mask
, 1);
1006 tcg_gen_andc_i64(vc
, va
, mask
);
1010 /* MSKBL, MSKWL, MSKLL, MSKQL */
1011 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1012 uint8_t lit
, uint8_t byte_mask
)
1015 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1017 TCGv shift
= tcg_temp_new();
1018 TCGv mask
= tcg_temp_new();
1020 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1021 tcg_gen_shli_i64(shift
, shift
, 3);
1022 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1023 tcg_gen_shl_i64(mask
, mask
, shift
);
1025 tcg_gen_andc_i64(vc
, va
, mask
);
1029 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1032 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1035 st_flag_byte(tcg_constant_i64(set
), ENV_FLAG_RX_SHIFT
);
1038 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1040 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1041 to internal cpu registers. */
1043 /* Unprivileged PAL call */
1044 if (palcode
>= 0x80 && palcode
< 0xC0) {
1048 /* No-op inside QEMU. */
1052 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], tcg_env
,
1053 offsetof(CPUAlphaState
, unique
));
1057 tcg_gen_st_i64(ctx
->ir
[IR_A0
], tcg_env
,
1058 offsetof(CPUAlphaState
, unique
));
1067 #ifndef CONFIG_USER_ONLY
1068 /* Privileged PAL code */
1069 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1073 /* No-op inside QEMU. */
1077 /* No-op inside QEMU. */
1081 tcg_gen_st_i64(ctx
->ir
[IR_A0
], tcg_env
,
1082 offsetof(CPUAlphaState
, vptptr
));
1086 tcg_gen_st_i64(ctx
->ir
[IR_A0
], tcg_env
,
1087 offsetof(CPUAlphaState
, sysval
));
1091 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], tcg_env
,
1092 offsetof(CPUAlphaState
, sysval
));
1097 /* Note that we already know we're in kernel mode, so we know
1098 that PS only contains the 3 IPL bits. */
1099 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1101 /* But make sure and store only the 3 IPL bits from the user. */
1103 TCGv tmp
= tcg_temp_new();
1104 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1105 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1108 /* Allow interrupts to be recognized right away. */
1109 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1110 return DISAS_PC_UPDATED_NOCHAIN
;
1114 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1119 tcg_gen_st_i64(ctx
->ir
[IR_A0
], tcg_env
,
1120 offsetof(CPUAlphaState
, usp
));
1124 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], tcg_env
,
1125 offsetof(CPUAlphaState
, usp
));
1129 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], tcg_env
,
1130 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1135 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env
,
1136 -offsetof(AlphaCPU
, env
) +
1137 offsetof(CPUState
, halted
));
1138 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1139 return gen_excp(ctx
, EXCP_HALTED
, 0);
1148 return gen_invalid(ctx
);
1151 #ifdef CONFIG_USER_ONLY
1152 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1155 TCGv tmp
= tcg_temp_new();
1156 uint64_t exc_addr
= ctx
->base
.pc_next
;
1157 uint64_t entry
= ctx
->palbr
;
1159 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1162 tcg_gen_movi_i64(tmp
, 1);
1163 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1166 tcg_gen_movi_i64(tmp
, exc_addr
);
1167 tcg_gen_st_i64(tmp
, tcg_env
, offsetof(CPUAlphaState
, exc_addr
));
1169 entry
+= (palcode
& 0x80
1170 ? 0x2000 + (palcode
- 0x80) * 64
1171 : 0x1000 + palcode
* 64);
1173 tcg_gen_movi_i64(cpu_pc
, entry
);
1174 return DISAS_PC_UPDATED
;
1179 #ifndef CONFIG_USER_ONLY
1181 #define PR_LONG 0x200000
1183 static int cpu_pr_data(int pr
)
1186 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1187 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1188 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1189 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1190 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1191 case 7: return offsetof(CPUAlphaState
, palbr
);
1192 case 8: return offsetof(CPUAlphaState
, ptbr
);
1193 case 9: return offsetof(CPUAlphaState
, vptptr
);
1194 case 10: return offsetof(CPUAlphaState
, unique
);
1195 case 11: return offsetof(CPUAlphaState
, sysval
);
1196 case 12: return offsetof(CPUAlphaState
, usp
);
1199 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1202 return offsetof(CPUAlphaState
, alarm_expire
);
1207 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1209 void (*helper
)(TCGv
);
1214 /* Accessing the "non-shadow" general registers. */
1215 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1216 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1219 case 250: /* WALLTIME */
1220 helper
= gen_helper_get_walltime
;
1222 case 249: /* VMTIME */
1223 helper
= gen_helper_get_vmtime
;
1225 if (translator_io_start(&ctx
->base
)) {
1227 return DISAS_PC_STALE
;
1234 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1237 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1241 /* The basic registers are data only, and unknown registers
1242 are read-zero, write-ignore. */
1243 data
= cpu_pr_data(regno
);
1245 tcg_gen_movi_i64(va
, 0);
1246 } else if (data
& PR_LONG
) {
1247 tcg_gen_ld32s_i64(va
, tcg_env
, data
& ~PR_LONG
);
1249 tcg_gen_ld_i64(va
, tcg_env
, data
);
1257 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1260 DisasJumpType ret
= DISAS_NEXT
;
1265 gen_helper_tbia(tcg_env
);
1270 gen_helper_tbis(tcg_env
, vb
);
1275 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env
,
1276 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, halted
));
1277 return gen_excp(ctx
, EXCP_HALTED
, 0);
1281 gen_helper_halt(vb
);
1282 return DISAS_PC_STALE
;
1286 if (translator_io_start(&ctx
->base
)) {
1287 ret
= DISAS_PC_STALE
;
1289 gen_helper_set_alarm(tcg_env
, vb
);
1294 tcg_gen_st_i64(vb
, tcg_env
, offsetof(CPUAlphaState
, palbr
));
1295 /* Changing the PAL base register implies un-chaining all of the TBs
1296 that ended with a CALL_PAL. Since the base register usually only
1297 changes during boot, flushing everything works well. */
1298 gen_helper_tb_flush(tcg_env
);
1299 return DISAS_PC_STALE
;
1302 /* Accessing the "non-shadow" general registers. */
1303 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1304 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1308 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1311 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1315 /* The basic registers are data only, and unknown registers
1316 are read-zero, write-ignore. */
1317 data
= cpu_pr_data(regno
);
1319 if (data
& PR_LONG
) {
1320 tcg_gen_st32_i64(vb
, tcg_env
, data
& ~PR_LONG
);
1322 tcg_gen_st_i64(vb
, tcg_env
, data
);
1330 #endif /* !USER_ONLY*/
1332 #define REQUIRE_NO_LIT \
1339 #define REQUIRE_AMASK(FLAG) \
1341 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1346 #define REQUIRE_TB_FLAG(FLAG) \
1348 if ((ctx->tbflags & (FLAG)) == 0) { \
1353 #define REQUIRE_REG_31(WHICH) \
1355 if (WHICH != 31) { \
1360 #define REQUIRE_FEN \
1362 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1367 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1369 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1371 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1372 bool islit
, real_islit
;
1373 TCGv va
, vb
, vc
, tmp
, tmp2
;
1377 /* Decode all instruction fields */
1378 opc
= extract32(insn
, 26, 6);
1379 ra
= extract32(insn
, 21, 5);
1380 rb
= extract32(insn
, 16, 5);
1381 rc
= extract32(insn
, 0, 5);
1382 real_islit
= islit
= extract32(insn
, 12, 1);
1383 lit
= extract32(insn
, 13, 8);
1385 disp21
= sextract32(insn
, 0, 21);
1386 disp16
= sextract32(insn
, 0, 16);
1387 disp12
= sextract32(insn
, 0, 12);
1389 fn11
= extract32(insn
, 5, 11);
1390 fpfn
= extract32(insn
, 5, 6);
1391 fn7
= extract32(insn
, 5, 7);
1393 if (rb
== 31 && !islit
) {
1402 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1428 disp16
= (uint32_t)disp16
<< 16;
1432 va
= dest_gpr(ctx
, ra
);
1433 /* It's worth special-casing immediate loads. */
1435 tcg_gen_movi_i64(va
, disp16
);
1437 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1444 gen_load_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0, 0);
1448 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 1, 0);
1453 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0, 0);
1458 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0);
1463 gen_store_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0);
1467 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 1);
1471 vc
= dest_gpr(ctx
, rc
);
1472 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1476 /* Special case ADDL as SEXTL. */
1477 tcg_gen_ext32s_i64(vc
, vb
);
1481 /* Special case SUBQ as NEGQ. */
1482 tcg_gen_neg_i64(vc
, vb
);
1487 va
= load_gpr(ctx
, ra
);
1491 tcg_gen_add_i64(vc
, va
, vb
);
1492 tcg_gen_ext32s_i64(vc
, vc
);
1496 tmp
= tcg_temp_new();
1497 tcg_gen_shli_i64(tmp
, va
, 2);
1498 tcg_gen_add_i64(tmp
, tmp
, vb
);
1499 tcg_gen_ext32s_i64(vc
, tmp
);
1503 tcg_gen_sub_i64(vc
, va
, vb
);
1504 tcg_gen_ext32s_i64(vc
, vc
);
1508 tmp
= tcg_temp_new();
1509 tcg_gen_shli_i64(tmp
, va
, 2);
1510 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1511 tcg_gen_ext32s_i64(vc
, tmp
);
1516 /* Special case 0 >= X as X == 0. */
1517 gen_helper_cmpbe0(vc
, vb
);
1519 gen_helper_cmpbge(vc
, va
, vb
);
1524 tmp
= tcg_temp_new();
1525 tcg_gen_shli_i64(tmp
, va
, 3);
1526 tcg_gen_add_i64(tmp
, tmp
, vb
);
1527 tcg_gen_ext32s_i64(vc
, tmp
);
1531 tmp
= tcg_temp_new();
1532 tcg_gen_shli_i64(tmp
, va
, 3);
1533 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1534 tcg_gen_ext32s_i64(vc
, tmp
);
1538 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1542 tcg_gen_add_i64(vc
, va
, vb
);
1546 tmp
= tcg_temp_new();
1547 tcg_gen_shli_i64(tmp
, va
, 2);
1548 tcg_gen_add_i64(vc
, tmp
, vb
);
1552 tcg_gen_sub_i64(vc
, va
, vb
);
1556 tmp
= tcg_temp_new();
1557 tcg_gen_shli_i64(tmp
, va
, 2);
1558 tcg_gen_sub_i64(vc
, tmp
, vb
);
1562 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1566 tmp
= tcg_temp_new();
1567 tcg_gen_shli_i64(tmp
, va
, 3);
1568 tcg_gen_add_i64(vc
, tmp
, vb
);
1572 tmp
= tcg_temp_new();
1573 tcg_gen_shli_i64(tmp
, va
, 3);
1574 tcg_gen_sub_i64(vc
, tmp
, vb
);
1578 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1582 tmp
= tcg_temp_new();
1583 tcg_gen_ext32s_i64(tmp
, va
);
1584 tcg_gen_ext32s_i64(vc
, vb
);
1585 tcg_gen_add_i64(tmp
, tmp
, vc
);
1586 tcg_gen_ext32s_i64(vc
, tmp
);
1587 gen_helper_check_overflow(tcg_env
, vc
, tmp
);
1591 tmp
= tcg_temp_new();
1592 tcg_gen_ext32s_i64(tmp
, va
);
1593 tcg_gen_ext32s_i64(vc
, vb
);
1594 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1595 tcg_gen_ext32s_i64(vc
, tmp
);
1596 gen_helper_check_overflow(tcg_env
, vc
, tmp
);
1600 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1604 tmp
= tcg_temp_new();
1605 tmp2
= tcg_temp_new();
1606 tcg_gen_eqv_i64(tmp
, va
, vb
);
1607 tcg_gen_mov_i64(tmp2
, va
);
1608 tcg_gen_add_i64(vc
, va
, vb
);
1609 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1610 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1611 tcg_gen_shri_i64(tmp
, tmp
, 63);
1612 tcg_gen_movi_i64(tmp2
, 0);
1613 gen_helper_check_overflow(tcg_env
, tmp
, tmp2
);
1617 tmp
= tcg_temp_new();
1618 tmp2
= tcg_temp_new();
1619 tcg_gen_xor_i64(tmp
, va
, vb
);
1620 tcg_gen_mov_i64(tmp2
, va
);
1621 tcg_gen_sub_i64(vc
, va
, vb
);
1622 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1623 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1624 tcg_gen_shri_i64(tmp
, tmp
, 63);
1625 tcg_gen_movi_i64(tmp2
, 0);
1626 gen_helper_check_overflow(tcg_env
, tmp
, tmp2
);
1630 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1640 /* Special case BIS as NOP. */
1644 /* Special case BIS as MOV. */
1645 vc
= dest_gpr(ctx
, rc
);
1647 tcg_gen_movi_i64(vc
, lit
);
1649 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1655 vc
= dest_gpr(ctx
, rc
);
1656 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1658 if (fn7
== 0x28 && ra
== 31) {
1659 /* Special case ORNOT as NOT. */
1660 tcg_gen_not_i64(vc
, vb
);
1664 va
= load_gpr(ctx
, ra
);
1668 tcg_gen_and_i64(vc
, va
, vb
);
1672 tcg_gen_andc_i64(vc
, va
, vb
);
1676 tcg_gen_movcond_i64(TCG_COND_TSTNE
, vc
, va
, tcg_constant_i64(1),
1677 vb
, load_gpr(ctx
, rc
));
1681 tcg_gen_movcond_i64(TCG_COND_TSTEQ
, vc
, va
, tcg_constant_i64(1),
1682 vb
, load_gpr(ctx
, rc
));
1686 tcg_gen_or_i64(vc
, va
, vb
);
1690 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1691 vb
, load_gpr(ctx
, rc
));
1695 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1696 vb
, load_gpr(ctx
, rc
));
1700 tcg_gen_orc_i64(vc
, va
, vb
);
1704 tcg_gen_xor_i64(vc
, va
, vb
);
1708 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1709 vb
, load_gpr(ctx
, rc
));
1713 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1714 vb
, load_gpr(ctx
, rc
));
1718 tcg_gen_eqv_i64(vc
, va
, vb
);
1723 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1727 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1728 vb
, load_gpr(ctx
, rc
));
1732 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1733 vb
, load_gpr(ctx
, rc
));
1738 tcg_gen_movi_i64(vc
, ctx
->implver
);
1746 vc
= dest_gpr(ctx
, rc
);
1747 va
= load_gpr(ctx
, ra
);
1751 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1755 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1759 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1763 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1767 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1771 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1775 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1779 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1783 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1788 gen_zapnoti(vc
, va
, ~lit
);
1790 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1796 gen_zapnoti(vc
, va
, lit
);
1798 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1803 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1808 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1810 tmp
= tcg_temp_new();
1811 vb
= load_gpr(ctx
, rb
);
1812 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1813 tcg_gen_shr_i64(vc
, va
, tmp
);
1818 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1823 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1825 tmp
= tcg_temp_new();
1826 vb
= load_gpr(ctx
, rb
);
1827 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1828 tcg_gen_shl_i64(vc
, va
, tmp
);
1833 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1838 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1840 tmp
= tcg_temp_new();
1841 vb
= load_gpr(ctx
, rb
);
1842 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1843 tcg_gen_sar_i64(vc
, va
, tmp
);
1848 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1852 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1856 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1860 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1864 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1868 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1872 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1876 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1880 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1888 vc
= dest_gpr(ctx
, rc
);
1889 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1890 va
= load_gpr(ctx
, ra
);
1894 tcg_gen_mul_i64(vc
, va
, vb
);
1895 tcg_gen_ext32s_i64(vc
, vc
);
1899 tcg_gen_mul_i64(vc
, va
, vb
);
1903 tmp
= tcg_temp_new();
1904 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1908 tmp
= tcg_temp_new();
1909 tcg_gen_ext32s_i64(tmp
, va
);
1910 tcg_gen_ext32s_i64(vc
, vb
);
1911 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1912 tcg_gen_ext32s_i64(vc
, tmp
);
1913 gen_helper_check_overflow(tcg_env
, vc
, tmp
);
1917 tmp
= tcg_temp_new();
1918 tmp2
= tcg_temp_new();
1919 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1920 tcg_gen_sari_i64(tmp2
, vc
, 63);
1921 gen_helper_check_overflow(tcg_env
, tmp
, tmp2
);
1930 vc
= dest_fpr(ctx
, rc
);
1931 switch (fpfn
) { /* fn11 & 0x3F */
1936 t32
= tcg_temp_new_i32();
1937 va
= load_gpr(ctx
, ra
);
1938 tcg_gen_extrl_i64_i32(t32
, va
);
1939 gen_helper_memory_to_s(vc
, t32
);
1945 vb
= load_fpr(ctx
, rb
);
1946 gen_helper_sqrtf(vc
, tcg_env
, vb
);
1952 gen_sqrts(ctx
, rb
, rc
, fn11
);
1958 t32
= tcg_temp_new_i32();
1959 va
= load_gpr(ctx
, ra
);
1960 tcg_gen_extrl_i64_i32(t32
, va
);
1961 gen_helper_memory_to_f(vc
, t32
);
1967 va
= load_gpr(ctx
, ra
);
1968 tcg_gen_mov_i64(vc
, va
);
1974 vb
= load_fpr(ctx
, rb
);
1975 gen_helper_sqrtg(vc
, tcg_env
, vb
);
1981 gen_sqrtt(ctx
, rb
, rc
, fn11
);
1989 /* VAX floating point */
1990 /* XXX: rounding mode and trap are ignored (!) */
1991 vc
= dest_fpr(ctx
, rc
);
1992 vb
= load_fpr(ctx
, rb
);
1993 va
= load_fpr(ctx
, ra
);
1994 switch (fpfn
) { /* fn11 & 0x3F */
1998 gen_helper_addf(vc
, tcg_env
, va
, vb
);
2003 gen_helper_subf(vc
, tcg_env
, va
, vb
);
2008 gen_helper_mulf(vc
, tcg_env
, va
, vb
);
2013 gen_helper_divf(vc
, tcg_env
, va
, vb
);
2022 gen_helper_addg(vc
, tcg_env
, va
, vb
);
2027 gen_helper_subg(vc
, tcg_env
, va
, vb
);
2032 gen_helper_mulg(vc
, tcg_env
, va
, vb
);
2037 gen_helper_divg(vc
, tcg_env
, va
, vb
);
2042 gen_helper_cmpgeq(vc
, tcg_env
, va
, vb
);
2047 gen_helper_cmpglt(vc
, tcg_env
, va
, vb
);
2052 gen_helper_cmpgle(vc
, tcg_env
, va
, vb
);
2058 gen_helper_cvtgf(vc
, tcg_env
, vb
);
2068 gen_helper_cvtgq(vc
, tcg_env
, vb
);
2074 gen_helper_cvtqf(vc
, tcg_env
, vb
);
2080 gen_helper_cvtqg(vc
, tcg_env
, vb
);
2088 /* IEEE floating-point */
2089 switch (fpfn
) { /* fn11 & 0x3F */
2093 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2098 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2103 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2108 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2113 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2118 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2123 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2128 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2133 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2138 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2143 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2148 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2153 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2155 gen_cvtst(ctx
, rb
, rc
, fn11
);
2158 gen_cvtts(ctx
, rb
, rc
, fn11
);
2165 gen_cvttq(ctx
, rb
, rc
, fn11
);
2171 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2177 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2190 vc
= dest_fpr(ctx
, rc
);
2191 vb
= load_fpr(ctx
, rb
);
2198 /* Special case CPYS as FNOP. */
2200 vc
= dest_fpr(ctx
, rc
);
2201 va
= load_fpr(ctx
, ra
);
2203 /* Special case CPYS as FMOV. */
2204 tcg_gen_mov_i64(vc
, va
);
2206 vb
= load_fpr(ctx
, rb
);
2207 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2214 vc
= dest_fpr(ctx
, rc
);
2215 vb
= load_fpr(ctx
, rb
);
2216 va
= load_fpr(ctx
, ra
);
2217 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2222 vc
= dest_fpr(ctx
, rc
);
2223 vb
= load_fpr(ctx
, rb
);
2224 va
= load_fpr(ctx
, ra
);
2225 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2230 va
= load_fpr(ctx
, ra
);
2231 gen_helper_store_fpcr(tcg_env
, va
);
2232 if (ctx
->tb_rm
== QUAL_RM_D
) {
2233 /* Re-do the copy of the rounding mode to fp_status
2234 the next time we use dynamic rounding. */
2241 va
= dest_fpr(ctx
, ra
);
2242 gen_helper_load_fpcr(va
, tcg_env
);
2247 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2252 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2257 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2262 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2267 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2272 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2274 case 0x030: /* CVTQL */
2275 case 0x130: /* CVTQL/V */
2276 case 0x530: /* CVTQL/SV */
2279 vc
= dest_fpr(ctx
, rc
);
2280 vb
= load_fpr(ctx
, rb
);
2281 gen_helper_cvtql(vc
, tcg_env
, vb
);
2282 gen_fp_exc_raise(rc
, fn11
);
2290 switch ((uint16_t)disp16
) {
2301 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2305 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2317 va
= dest_gpr(ctx
, ra
);
2318 if (translator_io_start(&ctx
->base
)) {
2319 ret
= DISAS_PC_STALE
;
2321 gen_helper_load_pcc(va
, tcg_env
);
2348 /* HW_MFPR (PALcode) */
2349 #ifndef CONFIG_USER_ONLY
2350 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2351 va
= dest_gpr(ctx
, ra
);
2352 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2359 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2360 prediction stack action, which of course we don't implement. */
2361 vb
= load_gpr(ctx
, rb
);
2362 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2364 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2366 ret
= DISAS_PC_UPDATED
;
2370 /* HW_LD (PALcode) */
2371 #ifndef CONFIG_USER_ONLY
2372 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2374 TCGv addr
= tcg_temp_new();
2375 vb
= load_gpr(ctx
, rb
);
2376 va
= dest_gpr(ctx
, ra
);
2378 tcg_gen_addi_i64(addr
, vb
, disp12
);
2379 switch ((insn
>> 12) & 0xF) {
2381 /* Longword physical access (hw_ldl/p) */
2382 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2385 /* Quadword physical access (hw_ldq/p) */
2386 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2389 /* Longword physical access with lock (hw_ldl_l/p) */
2390 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2391 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2392 tcg_gen_mov_i64(cpu_lock_value
, va
);
2395 /* Quadword physical access with lock (hw_ldq_l/p) */
2396 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2397 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2398 tcg_gen_mov_i64(cpu_lock_value
, va
);
2401 /* Longword virtual PTE fetch (hw_ldl/v) */
2404 /* Quadword virtual PTE fetch (hw_ldq/v) */
2414 /* Longword virtual access (hw_ldl) */
2417 /* Quadword virtual access (hw_ldq) */
2420 /* Longword virtual access with protection check (hw_ldl/w) */
2421 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
,
2422 MO_LESL
| MO_ALIGN
);
2425 /* Quadword virtual access with protection check (hw_ldq/w) */
2426 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
,
2427 MO_LEUQ
| MO_ALIGN
);
2430 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2433 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2436 /* Longword virtual access with alternate access mode and
2437 protection checks (hw_ldl/wa) */
2438 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
,
2439 MO_LESL
| MO_ALIGN
);
2442 /* Quadword virtual access with alternate access mode and
2443 protection checks (hw_ldq/wa) */
2444 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
,
2445 MO_LEUQ
| MO_ALIGN
);
2455 vc
= dest_gpr(ctx
, rc
);
2460 va
= load_fpr(ctx
, ra
);
2461 tcg_gen_mov_i64(vc
, va
);
2463 } else if (fn7
== 0x78) {
2467 t32
= tcg_temp_new_i32();
2468 va
= load_fpr(ctx
, ra
);
2469 gen_helper_s_to_memory(t32
, va
);
2470 tcg_gen_ext_i32_i64(vc
, t32
);
2474 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2480 tcg_gen_ext8s_i64(vc
, vb
);
2486 tcg_gen_ext16s_i64(vc
, vb
);
2493 tcg_gen_ctpop_i64(vc
, vb
);
2499 va
= load_gpr(ctx
, ra
);
2500 gen_helper_perr(vc
, va
, vb
);
2507 tcg_gen_clzi_i64(vc
, vb
, 64);
2514 tcg_gen_ctzi_i64(vc
, vb
, 64);
2521 gen_helper_unpkbw(vc
, vb
);
2528 gen_helper_unpkbl(vc
, vb
);
2535 gen_helper_pkwb(vc
, vb
);
2542 gen_helper_pklb(vc
, vb
);
2547 va
= load_gpr(ctx
, ra
);
2548 gen_helper_minsb8(vc
, va
, vb
);
2553 va
= load_gpr(ctx
, ra
);
2554 gen_helper_minsw4(vc
, va
, vb
);
2559 va
= load_gpr(ctx
, ra
);
2560 gen_helper_minub8(vc
, va
, vb
);
2565 va
= load_gpr(ctx
, ra
);
2566 gen_helper_minuw4(vc
, va
, vb
);
2571 va
= load_gpr(ctx
, ra
);
2572 gen_helper_maxub8(vc
, va
, vb
);
2577 va
= load_gpr(ctx
, ra
);
2578 gen_helper_maxuw4(vc
, va
, vb
);
2583 va
= load_gpr(ctx
, ra
);
2584 gen_helper_maxsb8(vc
, va
, vb
);
2589 va
= load_gpr(ctx
, ra
);
2590 gen_helper_maxsw4(vc
, va
, vb
);
2598 /* HW_MTPR (PALcode) */
2599 #ifndef CONFIG_USER_ONLY
2600 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2601 vb
= load_gpr(ctx
, rb
);
2602 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2609 /* HW_RET (PALcode) */
2610 #ifndef CONFIG_USER_ONLY
2611 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2613 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2614 address from EXC_ADDR. This turns out to be useful for our
2615 emulation PALcode, so continue to accept it. */
2616 vb
= dest_sink(ctx
);
2617 tcg_gen_ld_i64(vb
, tcg_env
, offsetof(CPUAlphaState
, exc_addr
));
2619 vb
= load_gpr(ctx
, rb
);
2621 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2622 st_flag_byte(load_zero(ctx
), ENV_FLAG_RX_SHIFT
);
2623 tmp
= tcg_temp_new();
2624 tcg_gen_andi_i64(tmp
, vb
, 1);
2625 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2626 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2627 /* Allow interrupts to be recognized right away. */
2628 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2635 /* HW_ST (PALcode) */
2636 #ifndef CONFIG_USER_ONLY
2637 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2639 switch ((insn
>> 12) & 0xF) {
2641 /* Longword physical access */
2642 va
= load_gpr(ctx
, ra
);
2643 vb
= load_gpr(ctx
, rb
);
2644 tmp
= tcg_temp_new();
2645 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2646 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2649 /* Quadword physical access */
2650 va
= load_gpr(ctx
, ra
);
2651 vb
= load_gpr(ctx
, rb
);
2652 tmp
= tcg_temp_new();
2653 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2654 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2657 /* Longword physical access with lock */
2658 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2659 MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2662 /* Quadword physical access with lock */
2663 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2664 MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2667 /* Longword virtual access */
2670 /* Quadword virtual access */
2691 /* Longword virtual access with alternate access mode */
2694 /* Quadword virtual access with alternate access mode */
2711 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldf
);
2716 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldg
);
2721 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_lds
);
2726 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldt
);
2731 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stf
);
2736 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stg
);
2741 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_sts
);
2746 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stt
);
2750 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
, 0, 0);
2754 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0, 0);
2758 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
| MO_ALIGN
, 0, 1);
2762 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
| MO_ALIGN
, 0, 1);
2766 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUL
, 0);
2770 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0);
2774 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2775 ctx
->mem_idx
, MO_LESL
| MO_ALIGN
);
2779 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2780 ctx
->mem_idx
, MO_LEUQ
| MO_ALIGN
);
2784 ret
= gen_bdirect(ctx
, ra
, disp21
);
2786 case 0x31: /* FBEQ */
2788 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2790 case 0x32: /* FBLT */
2792 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2794 case 0x33: /* FBLE */
2796 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2800 ret
= gen_bdirect(ctx
, ra
, disp21
);
2802 case 0x35: /* FBNE */
2804 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2806 case 0x36: /* FBGE */
2808 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2810 case 0x37: /* FBGT */
2812 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2816 ret
= gen_bcond(ctx
, TCG_COND_TSTEQ
, ra
, disp21
);
2820 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2824 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2828 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2832 ret
= gen_bcond(ctx
, TCG_COND_TSTNE
, ra
, disp21
);
2836 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2840 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2844 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2847 ret
= gen_invalid(ctx
);
2850 ret
= gen_excp(ctx
, EXCP_FEN
, 0);
2857 static void alpha_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
2859 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2860 CPUAlphaState
*env
= cpu_env(cpu
);
2863 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2864 ctx
->mem_idx
= alpha_env_mmu_index(env
);
2865 ctx
->implver
= env
->implver
;
2866 ctx
->amask
= env
->amask
;
2868 #ifdef CONFIG_USER_ONLY
2869 ctx
->ir
= cpu_std_ir
;
2870 ctx
->unalign
= (ctx
->tbflags
& TB_FLAG_UNALIGN
? MO_UNALN
: MO_ALIGN
);
2872 ctx
->palbr
= env
->palbr
;
2873 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2876 /* ??? Every TB begins with unset rounding mode, to be initialized on
2877 the first fp insn of the TB. Alternately we could define a proper
2878 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2879 to reset the FP_STATUS to that default at the end of any TB that
2880 changes the default. We could even (gasp) dynamically figure out
2881 what default would be most efficient given the running program. */
2883 /* Similarly for flush-to-zero. */
2889 /* Bound the number of insns to execute to those left on the page. */
2890 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
2891 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
2894 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2898 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2900 tcg_gen_insn_start(dcbase
->pc_next
);
2903 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2905 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2906 uint32_t insn
= translator_ldl(cpu_env(cpu
), &ctx
->base
,
2909 ctx
->base
.pc_next
+= 4;
2910 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
2912 free_context_temps(ctx
);
2915 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
2917 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2919 switch (ctx
->base
.is_jmp
) {
2920 case DISAS_NORETURN
:
2922 case DISAS_TOO_MANY
:
2923 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
2925 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
2926 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
2929 case DISAS_PC_STALE
:
2930 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
2932 case DISAS_PC_UPDATED
:
2933 tcg_gen_lookup_and_goto_ptr();
2935 case DISAS_PC_UPDATED_NOCHAIN
:
2936 tcg_gen_exit_tb(NULL
, 0);
2939 g_assert_not_reached();
2943 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
,
2944 CPUState
*cpu
, FILE *logfile
)
2946 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
2947 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
2950 static const TranslatorOps alpha_tr_ops
= {
2951 .init_disas_context
= alpha_tr_init_disas_context
,
2952 .tb_start
= alpha_tr_tb_start
,
2953 .insn_start
= alpha_tr_insn_start
,
2954 .translate_insn
= alpha_tr_translate_insn
,
2955 .tb_stop
= alpha_tr_tb_stop
,
2956 .disas_log
= alpha_tr_disas_log
,
2959 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
2960 vaddr pc
, void *host_pc
)
2963 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &alpha_tr_ops
, &dc
.base
);