2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DISAS(...) do { } while (0)
45 typedef struct DisasContext DisasContext
;
47 DisasContextBase base
;
49 #ifdef CONFIG_USER_ONLY
57 /* implver and amask values for this CPU. */
61 /* Current rounding mode for this TB. */
63 /* Current flush-to-zero setting for this TB. */
66 /* The set of registers active in the current context. */
69 /* Temporaries for $31 and $f31 as source and destination. */
74 #ifdef CONFIG_USER_ONLY
75 #define UNALIGN(C) (C)->unalign
77 #define UNALIGN(C) MO_ALIGN
80 /* Target-specific return values from translate_one, indicating the
81 state of the TB. Note that DISAS_NEXT indicates that we are not
83 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
84 #define DISAS_PC_UPDATED DISAS_TARGET_1
85 #define DISAS_PC_STALE DISAS_TARGET_2
87 /* global register indexes */
88 static TCGv cpu_std_ir
[31];
89 static TCGv cpu_fir
[31];
91 static TCGv cpu_lock_addr
;
92 static TCGv cpu_lock_value
;
94 #ifndef CONFIG_USER_ONLY
95 static TCGv cpu_pal_ir
[31];
98 void alpha_translate_init(void)
100 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
102 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
103 static const GlobalVar vars
[] = {
111 /* Use the symbolic register names that match the disassembler. */
112 static const char greg_names
[31][4] = {
113 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
114 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
115 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
116 "t10", "t11", "ra", "t12", "at", "gp", "sp"
118 static const char freg_names
[31][4] = {
119 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
120 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
121 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
122 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
124 #ifndef CONFIG_USER_ONLY
125 static const char shadow_names
[8][8] = {
126 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
127 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
133 for (i
= 0; i
< 31; i
++) {
134 cpu_std_ir
[i
] = tcg_global_mem_new_i64(tcg_env
,
135 offsetof(CPUAlphaState
, ir
[i
]),
139 for (i
= 0; i
< 31; i
++) {
140 cpu_fir
[i
] = tcg_global_mem_new_i64(tcg_env
,
141 offsetof(CPUAlphaState
, fir
[i
]),
145 #ifndef CONFIG_USER_ONLY
146 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
147 for (i
= 0; i
< 8; i
++) {
148 int r
= (i
== 7 ? 25 : i
+ 8);
149 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(tcg_env
,
150 offsetof(CPUAlphaState
,
156 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
157 const GlobalVar
*v
= &vars
[i
];
158 *v
->var
= tcg_global_mem_new_i64(tcg_env
, v
->ofs
, v
->name
);
162 static TCGv
load_zero(DisasContext
*ctx
)
165 ctx
->zero
= tcg_constant_i64(0);
170 static TCGv
dest_sink(DisasContext
*ctx
)
173 ctx
->sink
= tcg_temp_new();
178 static void free_context_temps(DisasContext
*ctx
)
181 tcg_gen_discard_i64(ctx
->sink
);
186 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
188 if (likely(reg
< 31)) {
191 return load_zero(ctx
);
195 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
196 uint8_t lit
, bool islit
)
199 return tcg_constant_i64(lit
);
200 } else if (likely(reg
< 31)) {
203 return load_zero(ctx
);
207 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
209 if (likely(reg
< 31)) {
212 return dest_sink(ctx
);
216 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
218 if (likely(reg
< 31)) {
221 return load_zero(ctx
);
225 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
227 if (likely(reg
< 31)) {
230 return dest_sink(ctx
);
234 static int get_flag_ofs(unsigned shift
)
236 int ofs
= offsetof(CPUAlphaState
, flags
);
238 ofs
+= 3 - (shift
/ 8);
245 static void ld_flag_byte(TCGv val
, unsigned shift
)
247 tcg_gen_ld8u_i64(val
, tcg_env
, get_flag_ofs(shift
));
250 static void st_flag_byte(TCGv val
, unsigned shift
)
252 tcg_gen_st8_i64(val
, tcg_env
, get_flag_ofs(shift
));
255 static void gen_excp_1(int exception
, int error_code
)
259 tmp1
= tcg_constant_i32(exception
);
260 tmp2
= tcg_constant_i32(error_code
);
261 gen_helper_excp(tcg_env
, tmp1
, tmp2
);
264 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
266 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
267 gen_excp_1(exception
, error_code
);
268 return DISAS_NORETURN
;
271 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
273 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
276 static void gen_ldf(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
278 TCGv_i32 tmp32
= tcg_temp_new_i32();
279 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
280 gen_helper_memory_to_f(dest
, tmp32
);
283 static void gen_ldg(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
285 TCGv tmp
= tcg_temp_new();
286 tcg_gen_qemu_ld_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
287 gen_helper_memory_to_g(dest
, tmp
);
290 static void gen_lds(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
292 TCGv_i32 tmp32
= tcg_temp_new_i32();
293 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
294 gen_helper_memory_to_s(dest
, tmp32
);
297 static void gen_ldt(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
299 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
302 static void gen_load_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
303 void (*func
)(DisasContext
*, TCGv
, TCGv
))
305 /* Loads to $f31 are prefetches, which we can treat as nops. */
306 if (likely(ra
!= 31)) {
307 TCGv addr
= tcg_temp_new();
308 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
309 func(ctx
, cpu_fir
[ra
], addr
);
313 static void gen_load_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
314 MemOp op
, bool clear
, bool locked
)
318 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
319 prefetches, which we can treat as nops. No worries about
320 missed exceptions here. */
321 if (unlikely(ra
== 31)) {
325 addr
= tcg_temp_new();
326 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
328 tcg_gen_andi_i64(addr
, addr
, ~0x7);
329 } else if (!locked
) {
334 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, op
);
337 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
338 tcg_gen_mov_i64(cpu_lock_value
, dest
);
342 static void gen_stf(DisasContext
*ctx
, TCGv src
, TCGv addr
)
344 TCGv_i32 tmp32
= tcg_temp_new_i32();
345 gen_helper_f_to_memory(tmp32
, addr
);
346 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
349 static void gen_stg(DisasContext
*ctx
, TCGv src
, TCGv addr
)
351 TCGv tmp
= tcg_temp_new();
352 gen_helper_g_to_memory(tmp
, src
);
353 tcg_gen_qemu_st_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
356 static void gen_sts(DisasContext
*ctx
, TCGv src
, TCGv addr
)
358 TCGv_i32 tmp32
= tcg_temp_new_i32();
359 gen_helper_s_to_memory(tmp32
, src
);
360 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
363 static void gen_stt(DisasContext
*ctx
, TCGv src
, TCGv addr
)
365 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
368 static void gen_store_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
369 void (*func
)(DisasContext
*, TCGv
, TCGv
))
371 TCGv addr
= tcg_temp_new();
372 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
373 func(ctx
, load_fpr(ctx
, ra
), addr
);
376 static void gen_store_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
377 MemOp op
, bool clear
)
381 addr
= tcg_temp_new();
382 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
384 tcg_gen_andi_i64(addr
, addr
, ~0x7);
389 src
= load_gpr(ctx
, ra
);
390 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, op
);
393 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
394 int32_t disp16
, int mem_idx
,
397 TCGLabel
*lab_fail
, *lab_done
;
400 addr
= tcg_temp_new_i64();
401 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
402 free_context_temps(ctx
);
404 lab_fail
= gen_new_label();
405 lab_done
= gen_new_label();
406 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
408 val
= tcg_temp_new_i64();
409 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
410 load_gpr(ctx
, ra
), mem_idx
, op
);
411 free_context_temps(ctx
);
414 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
416 tcg_gen_br(lab_done
);
418 gen_set_label(lab_fail
);
420 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
423 gen_set_label(lab_done
);
424 tcg_gen_movi_i64(cpu_lock_addr
, -1);
428 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
430 return translator_use_goto_tb(&ctx
->base
, dest
);
433 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
435 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
438 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
441 /* Notice branch-to-next; used to initialize RA with the PC. */
444 } else if (use_goto_tb(ctx
, dest
)) {
446 tcg_gen_movi_i64(cpu_pc
, dest
);
447 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
448 return DISAS_NORETURN
;
450 tcg_gen_movi_i64(cpu_pc
, dest
);
451 return DISAS_PC_UPDATED
;
455 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
456 TCGv cmp
, int32_t disp
)
458 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
459 TCGLabel
*lab_true
= gen_new_label();
461 if (use_goto_tb(ctx
, dest
)) {
462 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
465 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
466 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
468 gen_set_label(lab_true
);
470 tcg_gen_movi_i64(cpu_pc
, dest
);
471 tcg_gen_exit_tb(ctx
->base
.tb
, 1);
473 return DISAS_NORETURN
;
475 TCGv_i64 z
= load_zero(ctx
);
476 TCGv_i64 d
= tcg_constant_i64(dest
);
477 TCGv_i64 p
= tcg_constant_i64(ctx
->base
.pc_next
);
479 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
480 return DISAS_PC_UPDATED
;
484 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
485 int32_t disp
, int mask
)
488 TCGv tmp
= tcg_temp_new();
491 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
492 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
495 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
498 /* Fold -0.0 for comparison with COND. */
500 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
502 uint64_t mzero
= 1ull << 63;
507 /* For <= or >, the -0.0 value directly compares the way we want. */
508 tcg_gen_mov_i64(dest
, src
);
513 /* For == or !=, we can simply mask off the sign bit and compare. */
514 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
519 /* For >= or <, map -0.0 to +0.0. */
520 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, src
, tcg_constant_i64(mzero
),
521 src
, tcg_constant_i64(0));
529 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
532 TCGv cmp_tmp
= tcg_temp_new();
535 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
536 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
540 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
545 vb
= load_fpr(ctx
, rb
);
547 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
549 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
552 #define QUAL_RM_N 0x080 /* Round mode nearest even */
553 #define QUAL_RM_C 0x000 /* Round mode chopped */
554 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
555 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
556 #define QUAL_RM_MASK 0x0c0
558 #define QUAL_U 0x100 /* Underflow enable (fp output) */
559 #define QUAL_V 0x100 /* Overflow enable (int output) */
560 #define QUAL_S 0x400 /* Software completion enable */
561 #define QUAL_I 0x200 /* Inexact detection enable */
563 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
567 fn11
&= QUAL_RM_MASK
;
568 if (fn11
== ctx
->tb_rm
) {
573 tmp
= tcg_temp_new_i32();
576 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
579 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
582 tcg_gen_movi_i32(tmp
, float_round_down
);
585 tcg_gen_ld8u_i32(tmp
, tcg_env
,
586 offsetof(CPUAlphaState
, fpcr_dyn_round
));
590 #if defined(CONFIG_SOFTFLOAT_INLINE)
591 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
592 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
593 sets the one field. */
594 tcg_gen_st8_i32(tmp
, tcg_env
,
595 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
597 gen_helper_setroundmode(tmp
);
601 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
606 if (fn11
== ctx
->tb_ftz
) {
611 tmp
= tcg_temp_new_i32();
613 /* Underflow is enabled, use the FPCR setting. */
614 tcg_gen_ld8u_i32(tmp
, tcg_env
,
615 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
617 /* Underflow is disabled, force flush-to-zero. */
618 tcg_gen_movi_i32(tmp
, 1);
621 #if defined(CONFIG_SOFTFLOAT_INLINE)
622 tcg_gen_st8_i32(tmp
, tcg_env
,
623 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
625 gen_helper_setflushzero(tmp
);
629 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
633 if (unlikely(reg
== 31)) {
634 val
= load_zero(ctx
);
637 if ((fn11
& QUAL_S
) == 0) {
639 gen_helper_ieee_input_cmp(tcg_env
, val
);
641 gen_helper_ieee_input(tcg_env
, val
);
644 #ifndef CONFIG_USER_ONLY
645 /* In system mode, raise exceptions for denormals like real
646 hardware. In user mode, proceed as if the OS completion
647 handler is handling the denormal as per spec. */
648 gen_helper_ieee_input_s(tcg_env
, val
);
655 static void gen_fp_exc_raise(int rc
, int fn11
)
657 /* ??? We ought to be able to do something with imprecise exceptions.
658 E.g. notice we're still in the trap shadow of something within the
659 TB and do not generate the code to signal the exception; end the TB
660 when an exception is forced to arrive, either by consumption of a
661 register value or TRAPB or EXCB. */
665 if (!(fn11
& QUAL_U
)) {
666 /* Note that QUAL_U == QUAL_V, so ignore either. */
667 ignore
|= FPCR_UNF
| FPCR_IOV
;
669 if (!(fn11
& QUAL_I
)) {
672 ign
= tcg_constant_i32(ignore
);
674 /* ??? Pass in the regno of the destination so that the helper can
675 set EXC_MASK, which contains a bitmask of destination registers
676 that have caused arithmetic traps. A simple userspace emulation
677 does not require this. We do need it for a guest kernel's entArith,
678 or if we were to do something clever with imprecise exceptions. */
679 reg
= tcg_constant_i32(rc
+ 32);
681 gen_helper_fp_exc_raise_s(tcg_env
, ign
, reg
);
683 gen_helper_fp_exc_raise(tcg_env
, ign
, reg
);
687 static void gen_cvtlq(TCGv vc
, TCGv vb
)
689 TCGv tmp
= tcg_temp_new();
691 /* The arithmetic right shift here, plus the sign-extended mask below
692 yields a sign-extended result without an explicit ext32s_i64. */
693 tcg_gen_shri_i64(tmp
, vb
, 29);
694 tcg_gen_sari_i64(vc
, vb
, 32);
695 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
698 static void gen_ieee_arith2(DisasContext
*ctx
,
699 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
700 int rb
, int rc
, int fn11
)
704 gen_qual_roundmode(ctx
, fn11
);
705 gen_qual_flushzero(ctx
, fn11
);
707 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
708 helper(dest_fpr(ctx
, rc
), tcg_env
, vb
);
710 gen_fp_exc_raise(rc
, fn11
);
713 #define IEEE_ARITH2(name) \
714 static inline void glue(gen_, name)(DisasContext *ctx, \
715 int rb, int rc, int fn11) \
717 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
724 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
728 /* No need to set flushzero, since we have an integer output. */
729 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
730 vc
= dest_fpr(ctx
, rc
);
732 /* Almost all integer conversions use cropped rounding;
733 special case that. */
734 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
735 gen_helper_cvttq_c(vc
, tcg_env
, vb
);
737 gen_qual_roundmode(ctx
, fn11
);
738 gen_helper_cvttq(vc
, tcg_env
, vb
);
740 gen_fp_exc_raise(rc
, fn11
);
743 static void gen_ieee_intcvt(DisasContext
*ctx
,
744 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
745 int rb
, int rc
, int fn11
)
749 gen_qual_roundmode(ctx
, fn11
);
750 vb
= load_fpr(ctx
, rb
);
751 vc
= dest_fpr(ctx
, rc
);
753 /* The only exception that can be raised by integer conversion
754 is inexact. Thus we only need to worry about exceptions when
755 inexact handling is requested. */
757 helper(vc
, tcg_env
, vb
);
758 gen_fp_exc_raise(rc
, fn11
);
760 helper(vc
, tcg_env
, vb
);
764 #define IEEE_INTCVT(name) \
765 static inline void glue(gen_, name)(DisasContext *ctx, \
766 int rb, int rc, int fn11) \
768 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
773 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
775 TCGv vmask
= tcg_constant_i64(mask
);
776 TCGv tmp
= tcg_temp_new_i64();
779 tcg_gen_andc_i64(tmp
, vmask
, va
);
781 tcg_gen_and_i64(tmp
, va
, vmask
);
784 tcg_gen_andc_i64(vc
, vb
, vmask
);
785 tcg_gen_or_i64(vc
, vc
, tmp
);
788 static void gen_ieee_arith3(DisasContext
*ctx
,
789 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
790 int ra
, int rb
, int rc
, int fn11
)
794 gen_qual_roundmode(ctx
, fn11
);
795 gen_qual_flushzero(ctx
, fn11
);
797 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
798 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
799 vc
= dest_fpr(ctx
, rc
);
800 helper(vc
, tcg_env
, va
, vb
);
802 gen_fp_exc_raise(rc
, fn11
);
805 #define IEEE_ARITH3(name) \
806 static inline void glue(gen_, name)(DisasContext *ctx, \
807 int ra, int rb, int rc, int fn11) \
809 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
820 static void gen_ieee_compare(DisasContext
*ctx
,
821 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
822 int ra
, int rb
, int rc
, int fn11
)
826 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
827 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
828 vc
= dest_fpr(ctx
, rc
);
829 helper(vc
, tcg_env
, va
, vb
);
831 gen_fp_exc_raise(rc
, fn11
);
834 #define IEEE_CMP3(name) \
835 static inline void glue(gen_, name)(DisasContext *ctx, \
836 int ra, int rb, int rc, int fn11) \
838 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
845 static inline uint64_t zapnot_mask(uint8_t lit
)
850 for (i
= 0; i
< 8; ++i
) {
851 if ((lit
>> i
) & 1) {
852 mask
|= 0xffull
<< (i
* 8);
858 /* Implement zapnot with an immediate operand, which expands to some
859 form of immediate AND. This is a basic building block in the
860 definition of many of the other byte manipulation instructions. */
861 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
865 tcg_gen_movi_i64(dest
, 0);
868 tcg_gen_ext8u_i64(dest
, src
);
871 tcg_gen_ext16u_i64(dest
, src
);
874 tcg_gen_ext32u_i64(dest
, src
);
877 tcg_gen_mov_i64(dest
, src
);
880 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
885 /* EXTWH, EXTLH, EXTQH */
886 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
887 uint8_t lit
, uint8_t byte_mask
)
890 int pos
= (64 - lit
* 8) & 0x3f;
891 int len
= cto32(byte_mask
) * 8;
893 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
895 tcg_gen_movi_i64(vc
, 0);
898 TCGv tmp
= tcg_temp_new();
899 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
900 tcg_gen_neg_i64(tmp
, tmp
);
901 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
902 tcg_gen_shl_i64(vc
, va
, tmp
);
904 gen_zapnoti(vc
, vc
, byte_mask
);
907 /* EXTBL, EXTWL, EXTLL, EXTQL */
908 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
909 uint8_t lit
, uint8_t byte_mask
)
912 int pos
= (lit
& 7) * 8;
913 int len
= cto32(byte_mask
) * 8;
914 if (pos
+ len
>= 64) {
917 tcg_gen_extract_i64(vc
, va
, pos
, len
);
919 TCGv tmp
= tcg_temp_new();
920 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
921 tcg_gen_shli_i64(tmp
, tmp
, 3);
922 tcg_gen_shr_i64(vc
, va
, tmp
);
923 gen_zapnoti(vc
, vc
, byte_mask
);
927 /* INSWH, INSLH, INSQH */
928 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
929 uint8_t lit
, uint8_t byte_mask
)
932 int pos
= 64 - (lit
& 7) * 8;
933 int len
= cto32(byte_mask
) * 8;
935 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
937 tcg_gen_movi_i64(vc
, 0);
940 TCGv tmp
= tcg_temp_new();
941 TCGv shift
= tcg_temp_new();
943 /* The instruction description has us left-shift the byte mask
944 and extract bits <15:8> and apply that zap at the end. This
945 is equivalent to simply performing the zap first and shifting
947 gen_zapnoti(tmp
, va
, byte_mask
);
949 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
950 portably by splitting the shift into two parts: shift_count-1 and 1.
951 Arrange for the -1 by using ones-complement instead of
952 twos-complement in the negation: ~(B * 8) & 63. */
954 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
955 tcg_gen_not_i64(shift
, shift
);
956 tcg_gen_andi_i64(shift
, shift
, 0x3f);
958 tcg_gen_shr_i64(vc
, tmp
, shift
);
959 tcg_gen_shri_i64(vc
, vc
, 1);
963 /* INSBL, INSWL, INSLL, INSQL */
964 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
965 uint8_t lit
, uint8_t byte_mask
)
968 int pos
= (lit
& 7) * 8;
969 int len
= cto32(byte_mask
) * 8;
970 if (pos
+ len
> 64) {
973 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
975 TCGv tmp
= tcg_temp_new();
976 TCGv shift
= tcg_temp_new();
978 /* The instruction description has us left-shift the byte mask
979 and extract bits <15:8> and apply that zap at the end. This
980 is equivalent to simply performing the zap first and shifting
982 gen_zapnoti(tmp
, va
, byte_mask
);
984 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
985 tcg_gen_shli_i64(shift
, shift
, 3);
986 tcg_gen_shl_i64(vc
, tmp
, shift
);
990 /* MSKWH, MSKLH, MSKQH */
991 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
992 uint8_t lit
, uint8_t byte_mask
)
995 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
997 TCGv shift
= tcg_temp_new();
998 TCGv mask
= tcg_temp_new();
1000 /* The instruction description is as above, where the byte_mask
1001 is shifted left, and then we extract bits <15:8>. This can be
1002 emulated with a right-shift on the expanded byte mask. This
1003 requires extra care because for an input <2:0> == 0 we need a
1004 shift of 64 bits in order to generate a zero. This is done by
1005 splitting the shift into two parts, the variable shift - 1
1006 followed by a constant 1 shift. The code we expand below is
1007 equivalent to ~(B * 8) & 63. */
1009 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1010 tcg_gen_not_i64(shift
, shift
);
1011 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1012 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1013 tcg_gen_shr_i64(mask
, mask
, shift
);
1014 tcg_gen_shri_i64(mask
, mask
, 1);
1016 tcg_gen_andc_i64(vc
, va
, mask
);
1020 /* MSKBL, MSKWL, MSKLL, MSKQL */
1021 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1022 uint8_t lit
, uint8_t byte_mask
)
1025 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1027 TCGv shift
= tcg_temp_new();
1028 TCGv mask
= tcg_temp_new();
1030 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1031 tcg_gen_shli_i64(shift
, shift
, 3);
1032 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1033 tcg_gen_shl_i64(mask
, mask
, shift
);
1035 tcg_gen_andc_i64(vc
, va
, mask
);
1039 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1042 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1045 st_flag_byte(tcg_constant_i64(set
), ENV_FLAG_RX_SHIFT
);
1048 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1050 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1051 to internal cpu registers. */
1053 /* Unprivileged PAL call */
1054 if (palcode
>= 0x80 && palcode
< 0xC0) {
1058 /* No-op inside QEMU. */
1062 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], tcg_env
,
1063 offsetof(CPUAlphaState
, unique
));
1067 tcg_gen_st_i64(ctx
->ir
[IR_A0
], tcg_env
,
1068 offsetof(CPUAlphaState
, unique
));
1077 #ifndef CONFIG_USER_ONLY
1078 /* Privileged PAL code */
1079 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1083 /* No-op inside QEMU. */
1087 /* No-op inside QEMU. */
1091 tcg_gen_st_i64(ctx
->ir
[IR_A0
], tcg_env
,
1092 offsetof(CPUAlphaState
, vptptr
));
1096 tcg_gen_st_i64(ctx
->ir
[IR_A0
], tcg_env
,
1097 offsetof(CPUAlphaState
, sysval
));
1101 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], tcg_env
,
1102 offsetof(CPUAlphaState
, sysval
));
1107 /* Note that we already know we're in kernel mode, so we know
1108 that PS only contains the 3 IPL bits. */
1109 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1111 /* But make sure and store only the 3 IPL bits from the user. */
1113 TCGv tmp
= tcg_temp_new();
1114 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1115 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1118 /* Allow interrupts to be recognized right away. */
1119 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1120 return DISAS_PC_UPDATED_NOCHAIN
;
1124 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1129 tcg_gen_st_i64(ctx
->ir
[IR_A0
], tcg_env
,
1130 offsetof(CPUAlphaState
, usp
));
1134 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], tcg_env
,
1135 offsetof(CPUAlphaState
, usp
));
1139 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], tcg_env
,
1140 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1145 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env
,
1146 -offsetof(AlphaCPU
, env
) +
1147 offsetof(CPUState
, halted
));
1148 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1149 return gen_excp(ctx
, EXCP_HALTED
, 0);
1158 return gen_invalid(ctx
);
1161 #ifdef CONFIG_USER_ONLY
1162 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1165 TCGv tmp
= tcg_temp_new();
1166 uint64_t exc_addr
= ctx
->base
.pc_next
;
1167 uint64_t entry
= ctx
->palbr
;
1169 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1172 tcg_gen_movi_i64(tmp
, 1);
1173 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1176 tcg_gen_movi_i64(tmp
, exc_addr
);
1177 tcg_gen_st_i64(tmp
, tcg_env
, offsetof(CPUAlphaState
, exc_addr
));
1179 entry
+= (palcode
& 0x80
1180 ? 0x2000 + (palcode
- 0x80) * 64
1181 : 0x1000 + palcode
* 64);
1183 tcg_gen_movi_i64(cpu_pc
, entry
);
1184 return DISAS_PC_UPDATED
;
1189 #ifndef CONFIG_USER_ONLY
1191 #define PR_LONG 0x200000
1193 static int cpu_pr_data(int pr
)
1196 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1197 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1198 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1199 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1200 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1201 case 7: return offsetof(CPUAlphaState
, palbr
);
1202 case 8: return offsetof(CPUAlphaState
, ptbr
);
1203 case 9: return offsetof(CPUAlphaState
, vptptr
);
1204 case 10: return offsetof(CPUAlphaState
, unique
);
1205 case 11: return offsetof(CPUAlphaState
, sysval
);
1206 case 12: return offsetof(CPUAlphaState
, usp
);
1209 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1212 return offsetof(CPUAlphaState
, alarm_expire
);
1217 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1219 void (*helper
)(TCGv
);
1224 /* Accessing the "non-shadow" general registers. */
1225 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1226 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1229 case 250: /* WALLTIME */
1230 helper
= gen_helper_get_walltime
;
1232 case 249: /* VMTIME */
1233 helper
= gen_helper_get_vmtime
;
1235 if (translator_io_start(&ctx
->base
)) {
1237 return DISAS_PC_STALE
;
1244 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1247 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1251 /* The basic registers are data only, and unknown registers
1252 are read-zero, write-ignore. */
1253 data
= cpu_pr_data(regno
);
1255 tcg_gen_movi_i64(va
, 0);
1256 } else if (data
& PR_LONG
) {
1257 tcg_gen_ld32s_i64(va
, tcg_env
, data
& ~PR_LONG
);
1259 tcg_gen_ld_i64(va
, tcg_env
, data
);
1267 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1270 DisasJumpType ret
= DISAS_NEXT
;
1275 gen_helper_tbia(tcg_env
);
1280 gen_helper_tbis(tcg_env
, vb
);
1285 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env
,
1286 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, halted
));
1287 return gen_excp(ctx
, EXCP_HALTED
, 0);
1291 gen_helper_halt(vb
);
1292 return DISAS_PC_STALE
;
1296 if (translator_io_start(&ctx
->base
)) {
1297 ret
= DISAS_PC_STALE
;
1299 gen_helper_set_alarm(tcg_env
, vb
);
1304 tcg_gen_st_i64(vb
, tcg_env
, offsetof(CPUAlphaState
, palbr
));
1305 /* Changing the PAL base register implies un-chaining all of the TBs
1306 that ended with a CALL_PAL. Since the base register usually only
1307 changes during boot, flushing everything works well. */
1308 gen_helper_tb_flush(tcg_env
);
1309 return DISAS_PC_STALE
;
1312 /* Accessing the "non-shadow" general registers. */
1313 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1314 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1318 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1321 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1325 /* The basic registers are data only, and unknown registers
1326 are read-zero, write-ignore. */
1327 data
= cpu_pr_data(regno
);
1329 if (data
& PR_LONG
) {
1330 tcg_gen_st32_i64(vb
, tcg_env
, data
& ~PR_LONG
);
1332 tcg_gen_st_i64(vb
, tcg_env
, data
);
1340 #endif /* !USER_ONLY*/
1342 #define REQUIRE_NO_LIT \
1349 #define REQUIRE_AMASK(FLAG) \
1351 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1356 #define REQUIRE_TB_FLAG(FLAG) \
1358 if ((ctx->tbflags & (FLAG)) == 0) { \
1363 #define REQUIRE_REG_31(WHICH) \
1365 if (WHICH != 31) { \
1370 #define REQUIRE_FEN \
1372 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1377 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1379 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1381 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1382 bool islit
, real_islit
;
1383 TCGv va
, vb
, vc
, tmp
, tmp2
;
1387 /* Decode all instruction fields */
1388 opc
= extract32(insn
, 26, 6);
1389 ra
= extract32(insn
, 21, 5);
1390 rb
= extract32(insn
, 16, 5);
1391 rc
= extract32(insn
, 0, 5);
1392 real_islit
= islit
= extract32(insn
, 12, 1);
1393 lit
= extract32(insn
, 13, 8);
1395 disp21
= sextract32(insn
, 0, 21);
1396 disp16
= sextract32(insn
, 0, 16);
1397 disp12
= sextract32(insn
, 0, 12);
1399 fn11
= extract32(insn
, 5, 11);
1400 fpfn
= extract32(insn
, 5, 6);
1401 fn7
= extract32(insn
, 5, 7);
1403 if (rb
== 31 && !islit
) {
1412 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1438 disp16
= (uint32_t)disp16
<< 16;
1442 va
= dest_gpr(ctx
, ra
);
1443 /* It's worth special-casing immediate loads. */
1445 tcg_gen_movi_i64(va
, disp16
);
1447 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1454 gen_load_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0, 0);
1458 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 1, 0);
1463 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0, 0);
1468 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0);
1473 gen_store_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0);
1477 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 1);
1481 vc
= dest_gpr(ctx
, rc
);
1482 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1486 /* Special case ADDL as SEXTL. */
1487 tcg_gen_ext32s_i64(vc
, vb
);
1491 /* Special case SUBQ as NEGQ. */
1492 tcg_gen_neg_i64(vc
, vb
);
1497 va
= load_gpr(ctx
, ra
);
1501 tcg_gen_add_i64(vc
, va
, vb
);
1502 tcg_gen_ext32s_i64(vc
, vc
);
1506 tmp
= tcg_temp_new();
1507 tcg_gen_shli_i64(tmp
, va
, 2);
1508 tcg_gen_add_i64(tmp
, tmp
, vb
);
1509 tcg_gen_ext32s_i64(vc
, tmp
);
1513 tcg_gen_sub_i64(vc
, va
, vb
);
1514 tcg_gen_ext32s_i64(vc
, vc
);
1518 tmp
= tcg_temp_new();
1519 tcg_gen_shli_i64(tmp
, va
, 2);
1520 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1521 tcg_gen_ext32s_i64(vc
, tmp
);
1526 /* Special case 0 >= X as X == 0. */
1527 gen_helper_cmpbe0(vc
, vb
);
1529 gen_helper_cmpbge(vc
, va
, vb
);
1534 tmp
= tcg_temp_new();
1535 tcg_gen_shli_i64(tmp
, va
, 3);
1536 tcg_gen_add_i64(tmp
, tmp
, vb
);
1537 tcg_gen_ext32s_i64(vc
, tmp
);
1541 tmp
= tcg_temp_new();
1542 tcg_gen_shli_i64(tmp
, va
, 3);
1543 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1544 tcg_gen_ext32s_i64(vc
, tmp
);
1548 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1552 tcg_gen_add_i64(vc
, va
, vb
);
1556 tmp
= tcg_temp_new();
1557 tcg_gen_shli_i64(tmp
, va
, 2);
1558 tcg_gen_add_i64(vc
, tmp
, vb
);
1562 tcg_gen_sub_i64(vc
, va
, vb
);
1566 tmp
= tcg_temp_new();
1567 tcg_gen_shli_i64(tmp
, va
, 2);
1568 tcg_gen_sub_i64(vc
, tmp
, vb
);
1572 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1576 tmp
= tcg_temp_new();
1577 tcg_gen_shli_i64(tmp
, va
, 3);
1578 tcg_gen_add_i64(vc
, tmp
, vb
);
1582 tmp
= tcg_temp_new();
1583 tcg_gen_shli_i64(tmp
, va
, 3);
1584 tcg_gen_sub_i64(vc
, tmp
, vb
);
1588 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1592 tmp
= tcg_temp_new();
1593 tcg_gen_ext32s_i64(tmp
, va
);
1594 tcg_gen_ext32s_i64(vc
, vb
);
1595 tcg_gen_add_i64(tmp
, tmp
, vc
);
1596 tcg_gen_ext32s_i64(vc
, tmp
);
1597 gen_helper_check_overflow(tcg_env
, vc
, tmp
);
1601 tmp
= tcg_temp_new();
1602 tcg_gen_ext32s_i64(tmp
, va
);
1603 tcg_gen_ext32s_i64(vc
, vb
);
1604 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1605 tcg_gen_ext32s_i64(vc
, tmp
);
1606 gen_helper_check_overflow(tcg_env
, vc
, tmp
);
1610 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1614 tmp
= tcg_temp_new();
1615 tmp2
= tcg_temp_new();
1616 tcg_gen_eqv_i64(tmp
, va
, vb
);
1617 tcg_gen_mov_i64(tmp2
, va
);
1618 tcg_gen_add_i64(vc
, va
, vb
);
1619 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1620 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1621 tcg_gen_shri_i64(tmp
, tmp
, 63);
1622 tcg_gen_movi_i64(tmp2
, 0);
1623 gen_helper_check_overflow(tcg_env
, tmp
, tmp2
);
1627 tmp
= tcg_temp_new();
1628 tmp2
= tcg_temp_new();
1629 tcg_gen_xor_i64(tmp
, va
, vb
);
1630 tcg_gen_mov_i64(tmp2
, va
);
1631 tcg_gen_sub_i64(vc
, va
, vb
);
1632 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1633 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1634 tcg_gen_shri_i64(tmp
, tmp
, 63);
1635 tcg_gen_movi_i64(tmp2
, 0);
1636 gen_helper_check_overflow(tcg_env
, tmp
, tmp2
);
1640 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1650 /* Special case BIS as NOP. */
1654 /* Special case BIS as MOV. */
1655 vc
= dest_gpr(ctx
, rc
);
1657 tcg_gen_movi_i64(vc
, lit
);
1659 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1665 vc
= dest_gpr(ctx
, rc
);
1666 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1668 if (fn7
== 0x28 && ra
== 31) {
1669 /* Special case ORNOT as NOT. */
1670 tcg_gen_not_i64(vc
, vb
);
1674 va
= load_gpr(ctx
, ra
);
1678 tcg_gen_and_i64(vc
, va
, vb
);
1682 tcg_gen_andc_i64(vc
, va
, vb
);
1686 tmp
= tcg_temp_new();
1687 tcg_gen_andi_i64(tmp
, va
, 1);
1688 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1689 vb
, load_gpr(ctx
, rc
));
1693 tmp
= tcg_temp_new();
1694 tcg_gen_andi_i64(tmp
, va
, 1);
1695 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1696 vb
, load_gpr(ctx
, rc
));
1700 tcg_gen_or_i64(vc
, va
, vb
);
1704 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1705 vb
, load_gpr(ctx
, rc
));
1709 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1710 vb
, load_gpr(ctx
, rc
));
1714 tcg_gen_orc_i64(vc
, va
, vb
);
1718 tcg_gen_xor_i64(vc
, va
, vb
);
1722 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1723 vb
, load_gpr(ctx
, rc
));
1727 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1728 vb
, load_gpr(ctx
, rc
));
1732 tcg_gen_eqv_i64(vc
, va
, vb
);
1737 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1741 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1742 vb
, load_gpr(ctx
, rc
));
1746 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1747 vb
, load_gpr(ctx
, rc
));
1752 tcg_gen_movi_i64(vc
, ctx
->implver
);
1760 vc
= dest_gpr(ctx
, rc
);
1761 va
= load_gpr(ctx
, ra
);
1765 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1769 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1773 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1777 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1781 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1785 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1789 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1793 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1797 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1802 gen_zapnoti(vc
, va
, ~lit
);
1804 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1810 gen_zapnoti(vc
, va
, lit
);
1812 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1817 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1822 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1824 tmp
= tcg_temp_new();
1825 vb
= load_gpr(ctx
, rb
);
1826 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1827 tcg_gen_shr_i64(vc
, va
, tmp
);
1832 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1837 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1839 tmp
= tcg_temp_new();
1840 vb
= load_gpr(ctx
, rb
);
1841 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1842 tcg_gen_shl_i64(vc
, va
, tmp
);
1847 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1852 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1854 tmp
= tcg_temp_new();
1855 vb
= load_gpr(ctx
, rb
);
1856 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1857 tcg_gen_sar_i64(vc
, va
, tmp
);
1862 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1866 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1870 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1874 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1878 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1882 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1886 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1890 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1894 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1902 vc
= dest_gpr(ctx
, rc
);
1903 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1904 va
= load_gpr(ctx
, ra
);
1908 tcg_gen_mul_i64(vc
, va
, vb
);
1909 tcg_gen_ext32s_i64(vc
, vc
);
1913 tcg_gen_mul_i64(vc
, va
, vb
);
1917 tmp
= tcg_temp_new();
1918 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1922 tmp
= tcg_temp_new();
1923 tcg_gen_ext32s_i64(tmp
, va
);
1924 tcg_gen_ext32s_i64(vc
, vb
);
1925 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1926 tcg_gen_ext32s_i64(vc
, tmp
);
1927 gen_helper_check_overflow(tcg_env
, vc
, tmp
);
1931 tmp
= tcg_temp_new();
1932 tmp2
= tcg_temp_new();
1933 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1934 tcg_gen_sari_i64(tmp2
, vc
, 63);
1935 gen_helper_check_overflow(tcg_env
, tmp
, tmp2
);
1944 vc
= dest_fpr(ctx
, rc
);
1945 switch (fpfn
) { /* fn11 & 0x3F */
1950 t32
= tcg_temp_new_i32();
1951 va
= load_gpr(ctx
, ra
);
1952 tcg_gen_extrl_i64_i32(t32
, va
);
1953 gen_helper_memory_to_s(vc
, t32
);
1959 vb
= load_fpr(ctx
, rb
);
1960 gen_helper_sqrtf(vc
, tcg_env
, vb
);
1966 gen_sqrts(ctx
, rb
, rc
, fn11
);
1972 t32
= tcg_temp_new_i32();
1973 va
= load_gpr(ctx
, ra
);
1974 tcg_gen_extrl_i64_i32(t32
, va
);
1975 gen_helper_memory_to_f(vc
, t32
);
1981 va
= load_gpr(ctx
, ra
);
1982 tcg_gen_mov_i64(vc
, va
);
1988 vb
= load_fpr(ctx
, rb
);
1989 gen_helper_sqrtg(vc
, tcg_env
, vb
);
1995 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2003 /* VAX floating point */
2004 /* XXX: rounding mode and trap are ignored (!) */
2005 vc
= dest_fpr(ctx
, rc
);
2006 vb
= load_fpr(ctx
, rb
);
2007 va
= load_fpr(ctx
, ra
);
2008 switch (fpfn
) { /* fn11 & 0x3F */
2012 gen_helper_addf(vc
, tcg_env
, va
, vb
);
2017 gen_helper_subf(vc
, tcg_env
, va
, vb
);
2022 gen_helper_mulf(vc
, tcg_env
, va
, vb
);
2027 gen_helper_divf(vc
, tcg_env
, va
, vb
);
2036 gen_helper_addg(vc
, tcg_env
, va
, vb
);
2041 gen_helper_subg(vc
, tcg_env
, va
, vb
);
2046 gen_helper_mulg(vc
, tcg_env
, va
, vb
);
2051 gen_helper_divg(vc
, tcg_env
, va
, vb
);
2056 gen_helper_cmpgeq(vc
, tcg_env
, va
, vb
);
2061 gen_helper_cmpglt(vc
, tcg_env
, va
, vb
);
2066 gen_helper_cmpgle(vc
, tcg_env
, va
, vb
);
2072 gen_helper_cvtgf(vc
, tcg_env
, vb
);
2082 gen_helper_cvtgq(vc
, tcg_env
, vb
);
2088 gen_helper_cvtqf(vc
, tcg_env
, vb
);
2094 gen_helper_cvtqg(vc
, tcg_env
, vb
);
2102 /* IEEE floating-point */
2103 switch (fpfn
) { /* fn11 & 0x3F */
2107 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2112 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2117 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2122 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2127 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2132 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2137 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2142 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2147 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2152 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2157 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2162 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2167 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2169 gen_cvtst(ctx
, rb
, rc
, fn11
);
2172 gen_cvtts(ctx
, rb
, rc
, fn11
);
2179 gen_cvttq(ctx
, rb
, rc
, fn11
);
2185 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2191 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2204 vc
= dest_fpr(ctx
, rc
);
2205 vb
= load_fpr(ctx
, rb
);
2212 /* Special case CPYS as FNOP. */
2214 vc
= dest_fpr(ctx
, rc
);
2215 va
= load_fpr(ctx
, ra
);
2217 /* Special case CPYS as FMOV. */
2218 tcg_gen_mov_i64(vc
, va
);
2220 vb
= load_fpr(ctx
, rb
);
2221 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2228 vc
= dest_fpr(ctx
, rc
);
2229 vb
= load_fpr(ctx
, rb
);
2230 va
= load_fpr(ctx
, ra
);
2231 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2236 vc
= dest_fpr(ctx
, rc
);
2237 vb
= load_fpr(ctx
, rb
);
2238 va
= load_fpr(ctx
, ra
);
2239 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2244 va
= load_fpr(ctx
, ra
);
2245 gen_helper_store_fpcr(tcg_env
, va
);
2246 if (ctx
->tb_rm
== QUAL_RM_D
) {
2247 /* Re-do the copy of the rounding mode to fp_status
2248 the next time we use dynamic rounding. */
2255 va
= dest_fpr(ctx
, ra
);
2256 gen_helper_load_fpcr(va
, tcg_env
);
2261 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2266 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2271 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2276 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2281 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2286 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2288 case 0x030: /* CVTQL */
2289 case 0x130: /* CVTQL/V */
2290 case 0x530: /* CVTQL/SV */
2293 vc
= dest_fpr(ctx
, rc
);
2294 vb
= load_fpr(ctx
, rb
);
2295 gen_helper_cvtql(vc
, tcg_env
, vb
);
2296 gen_fp_exc_raise(rc
, fn11
);
2304 switch ((uint16_t)disp16
) {
2315 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2319 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2331 va
= dest_gpr(ctx
, ra
);
2332 if (translator_io_start(&ctx
->base
)) {
2333 ret
= DISAS_PC_STALE
;
2335 gen_helper_load_pcc(va
, tcg_env
);
2362 /* HW_MFPR (PALcode) */
2363 #ifndef CONFIG_USER_ONLY
2364 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2365 va
= dest_gpr(ctx
, ra
);
2366 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2373 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2374 prediction stack action, which of course we don't implement. */
2375 vb
= load_gpr(ctx
, rb
);
2376 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2378 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2380 ret
= DISAS_PC_UPDATED
;
2384 /* HW_LD (PALcode) */
2385 #ifndef CONFIG_USER_ONLY
2386 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2388 TCGv addr
= tcg_temp_new();
2389 vb
= load_gpr(ctx
, rb
);
2390 va
= dest_gpr(ctx
, ra
);
2392 tcg_gen_addi_i64(addr
, vb
, disp12
);
2393 switch ((insn
>> 12) & 0xF) {
2395 /* Longword physical access (hw_ldl/p) */
2396 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2399 /* Quadword physical access (hw_ldq/p) */
2400 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2403 /* Longword physical access with lock (hw_ldl_l/p) */
2404 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2405 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2406 tcg_gen_mov_i64(cpu_lock_value
, va
);
2409 /* Quadword physical access with lock (hw_ldq_l/p) */
2410 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2411 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2412 tcg_gen_mov_i64(cpu_lock_value
, va
);
2415 /* Longword virtual PTE fetch (hw_ldl/v) */
2418 /* Quadword virtual PTE fetch (hw_ldq/v) */
2428 /* Longword virtual access (hw_ldl) */
2431 /* Quadword virtual access (hw_ldq) */
2434 /* Longword virtual access with protection check (hw_ldl/w) */
2435 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
,
2436 MO_LESL
| MO_ALIGN
);
2439 /* Quadword virtual access with protection check (hw_ldq/w) */
2440 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
,
2441 MO_LEUQ
| MO_ALIGN
);
2444 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2447 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2450 /* Longword virtual access with alternate access mode and
2451 protection checks (hw_ldl/wa) */
2452 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
,
2453 MO_LESL
| MO_ALIGN
);
2456 /* Quadword virtual access with alternate access mode and
2457 protection checks (hw_ldq/wa) */
2458 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
,
2459 MO_LEUQ
| MO_ALIGN
);
2469 vc
= dest_gpr(ctx
, rc
);
2474 va
= load_fpr(ctx
, ra
);
2475 tcg_gen_mov_i64(vc
, va
);
2477 } else if (fn7
== 0x78) {
2481 t32
= tcg_temp_new_i32();
2482 va
= load_fpr(ctx
, ra
);
2483 gen_helper_s_to_memory(t32
, va
);
2484 tcg_gen_ext_i32_i64(vc
, t32
);
2488 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2494 tcg_gen_ext8s_i64(vc
, vb
);
2500 tcg_gen_ext16s_i64(vc
, vb
);
2507 tcg_gen_ctpop_i64(vc
, vb
);
2513 va
= load_gpr(ctx
, ra
);
2514 gen_helper_perr(vc
, va
, vb
);
2521 tcg_gen_clzi_i64(vc
, vb
, 64);
2528 tcg_gen_ctzi_i64(vc
, vb
, 64);
2535 gen_helper_unpkbw(vc
, vb
);
2542 gen_helper_unpkbl(vc
, vb
);
2549 gen_helper_pkwb(vc
, vb
);
2556 gen_helper_pklb(vc
, vb
);
2561 va
= load_gpr(ctx
, ra
);
2562 gen_helper_minsb8(vc
, va
, vb
);
2567 va
= load_gpr(ctx
, ra
);
2568 gen_helper_minsw4(vc
, va
, vb
);
2573 va
= load_gpr(ctx
, ra
);
2574 gen_helper_minub8(vc
, va
, vb
);
2579 va
= load_gpr(ctx
, ra
);
2580 gen_helper_minuw4(vc
, va
, vb
);
2585 va
= load_gpr(ctx
, ra
);
2586 gen_helper_maxub8(vc
, va
, vb
);
2591 va
= load_gpr(ctx
, ra
);
2592 gen_helper_maxuw4(vc
, va
, vb
);
2597 va
= load_gpr(ctx
, ra
);
2598 gen_helper_maxsb8(vc
, va
, vb
);
2603 va
= load_gpr(ctx
, ra
);
2604 gen_helper_maxsw4(vc
, va
, vb
);
2612 /* HW_MTPR (PALcode) */
2613 #ifndef CONFIG_USER_ONLY
2614 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2615 vb
= load_gpr(ctx
, rb
);
2616 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2623 /* HW_RET (PALcode) */
2624 #ifndef CONFIG_USER_ONLY
2625 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2627 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2628 address from EXC_ADDR. This turns out to be useful for our
2629 emulation PALcode, so continue to accept it. */
2630 vb
= dest_sink(ctx
);
2631 tcg_gen_ld_i64(vb
, tcg_env
, offsetof(CPUAlphaState
, exc_addr
));
2633 vb
= load_gpr(ctx
, rb
);
2635 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2636 st_flag_byte(load_zero(ctx
), ENV_FLAG_RX_SHIFT
);
2637 tmp
= tcg_temp_new();
2638 tcg_gen_andi_i64(tmp
, vb
, 1);
2639 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2640 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2641 /* Allow interrupts to be recognized right away. */
2642 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2649 /* HW_ST (PALcode) */
2650 #ifndef CONFIG_USER_ONLY
2651 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2653 switch ((insn
>> 12) & 0xF) {
2655 /* Longword physical access */
2656 va
= load_gpr(ctx
, ra
);
2657 vb
= load_gpr(ctx
, rb
);
2658 tmp
= tcg_temp_new();
2659 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2660 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2663 /* Quadword physical access */
2664 va
= load_gpr(ctx
, ra
);
2665 vb
= load_gpr(ctx
, rb
);
2666 tmp
= tcg_temp_new();
2667 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2668 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2671 /* Longword physical access with lock */
2672 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2673 MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2676 /* Quadword physical access with lock */
2677 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2678 MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2681 /* Longword virtual access */
2684 /* Quadword virtual access */
2705 /* Longword virtual access with alternate access mode */
2708 /* Quadword virtual access with alternate access mode */
2725 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldf
);
2730 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldg
);
2735 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_lds
);
2740 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldt
);
2745 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stf
);
2750 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stg
);
2755 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_sts
);
2760 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stt
);
2764 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
, 0, 0);
2768 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0, 0);
2772 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
| MO_ALIGN
, 0, 1);
2776 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
| MO_ALIGN
, 0, 1);
2780 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUL
, 0);
2784 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0);
2788 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2789 ctx
->mem_idx
, MO_LESL
| MO_ALIGN
);
2793 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2794 ctx
->mem_idx
, MO_LEUQ
| MO_ALIGN
);
2798 ret
= gen_bdirect(ctx
, ra
, disp21
);
2800 case 0x31: /* FBEQ */
2802 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2804 case 0x32: /* FBLT */
2806 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2808 case 0x33: /* FBLE */
2810 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2814 ret
= gen_bdirect(ctx
, ra
, disp21
);
2816 case 0x35: /* FBNE */
2818 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2820 case 0x36: /* FBGE */
2822 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2824 case 0x37: /* FBGT */
2826 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2830 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2834 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2838 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2842 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2846 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2850 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2854 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2858 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2861 ret
= gen_invalid(ctx
);
2864 ret
= gen_excp(ctx
, EXCP_FEN
, 0);
2871 static void alpha_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
2873 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2874 CPUAlphaState
*env
= cpu_env(cpu
);
2877 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2878 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2879 ctx
->implver
= env
->implver
;
2880 ctx
->amask
= env
->amask
;
2882 #ifdef CONFIG_USER_ONLY
2883 ctx
->ir
= cpu_std_ir
;
2884 ctx
->unalign
= (ctx
->tbflags
& TB_FLAG_UNALIGN
? MO_UNALN
: MO_ALIGN
);
2886 ctx
->palbr
= env
->palbr
;
2887 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2890 /* ??? Every TB begins with unset rounding mode, to be initialized on
2891 the first fp insn of the TB. Alternately we could define a proper
2892 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2893 to reset the FP_STATUS to that default at the end of any TB that
2894 changes the default. We could even (gasp) dynamically figure out
2895 what default would be most efficient given the running program. */
2897 /* Similarly for flush-to-zero. */
2903 /* Bound the number of insns to execute to those left on the page. */
2904 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
2905 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
2908 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2912 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2914 tcg_gen_insn_start(dcbase
->pc_next
);
2917 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2919 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2920 CPUAlphaState
*env
= cpu_env(cpu
);
2921 uint32_t insn
= translator_ldl(env
, &ctx
->base
, ctx
->base
.pc_next
);
2923 ctx
->base
.pc_next
+= 4;
2924 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
2926 free_context_temps(ctx
);
2929 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
2931 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2933 switch (ctx
->base
.is_jmp
) {
2934 case DISAS_NORETURN
:
2936 case DISAS_TOO_MANY
:
2937 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
2939 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
2940 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
2943 case DISAS_PC_STALE
:
2944 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
2946 case DISAS_PC_UPDATED
:
2947 tcg_gen_lookup_and_goto_ptr();
2949 case DISAS_PC_UPDATED_NOCHAIN
:
2950 tcg_gen_exit_tb(NULL
, 0);
2953 g_assert_not_reached();
2957 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
,
2958 CPUState
*cpu
, FILE *logfile
)
2960 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
2961 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
2964 static const TranslatorOps alpha_tr_ops
= {
2965 .init_disas_context
= alpha_tr_init_disas_context
,
2966 .tb_start
= alpha_tr_tb_start
,
2967 .insn_start
= alpha_tr_insn_start
,
2968 .translate_insn
= alpha_tr_translate_insn
,
2969 .tb_stop
= alpha_tr_tb_stop
,
2970 .disas_log
= alpha_tr_disas_log
,
2973 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
2974 target_ulong pc
, void *host_pc
)
2977 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &alpha_tr_ops
, &dc
.base
);