2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "sysemu/cpu-timers.h"
24 #include "disas/disas.h"
25 #include "qemu/host-utils.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "exec/translator.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 DisasContextBase base
;
48 #ifndef CONFIG_USER_ONLY
54 /* implver and amask values for this CPU. */
58 /* Current rounding mode for this TB. */
60 /* Current flush-to-zero setting for this TB. */
63 /* The set of registers active in the current context. */
66 /* Temporaries for $31 and $f31 as source and destination. */
69 /* Temporary for immediate constants. */
73 /* Target-specific return values from translate_one, indicating the
74 state of the TB. Note that DISAS_NEXT indicates that we are not
76 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
77 #define DISAS_PC_UPDATED DISAS_TARGET_1
78 #define DISAS_PC_STALE DISAS_TARGET_2
80 /* global register indexes */
81 static TCGv cpu_std_ir
[31];
82 static TCGv cpu_fir
[31];
84 static TCGv cpu_lock_addr
;
85 static TCGv cpu_lock_value
;
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_pal_ir
[31];
91 #include "exec/gen-icount.h"
93 void alpha_translate_init(void)
95 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
97 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
98 static const GlobalVar vars
[] = {
106 /* Use the symbolic register names that match the disassembler. */
107 static const char greg_names
[31][4] = {
108 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
109 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
110 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
111 "t10", "t11", "ra", "t12", "at", "gp", "sp"
113 static const char freg_names
[31][4] = {
114 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
115 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
116 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
117 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
119 #ifndef CONFIG_USER_ONLY
120 static const char shadow_names
[8][8] = {
121 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
122 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
128 for (i
= 0; i
< 31; i
++) {
129 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
130 offsetof(CPUAlphaState
, ir
[i
]),
134 for (i
= 0; i
< 31; i
++) {
135 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
136 offsetof(CPUAlphaState
, fir
[i
]),
140 #ifndef CONFIG_USER_ONLY
141 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
142 for (i
= 0; i
< 8; i
++) {
143 int r
= (i
== 7 ? 25 : i
+ 8);
144 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
145 offsetof(CPUAlphaState
,
151 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
152 const GlobalVar
*v
= &vars
[i
];
153 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
157 static TCGv
load_zero(DisasContext
*ctx
)
160 ctx
->zero
= tcg_const_i64(0);
165 static TCGv
dest_sink(DisasContext
*ctx
)
168 ctx
->sink
= tcg_temp_new();
173 static void free_context_temps(DisasContext
*ctx
)
176 tcg_gen_discard_i64(ctx
->sink
);
177 tcg_temp_free(ctx
->sink
);
181 tcg_temp_free(ctx
->zero
);
185 tcg_temp_free(ctx
->lit
);
190 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
192 if (likely(reg
< 31)) {
195 return load_zero(ctx
);
199 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
200 uint8_t lit
, bool islit
)
203 ctx
->lit
= tcg_const_i64(lit
);
205 } else if (likely(reg
< 31)) {
208 return load_zero(ctx
);
212 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
214 if (likely(reg
< 31)) {
217 return dest_sink(ctx
);
221 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
223 if (likely(reg
< 31)) {
226 return load_zero(ctx
);
230 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
232 if (likely(reg
< 31)) {
235 return dest_sink(ctx
);
239 static int get_flag_ofs(unsigned shift
)
241 int ofs
= offsetof(CPUAlphaState
, flags
);
242 #ifdef HOST_WORDS_BIGENDIAN
243 ofs
+= 3 - (shift
/ 8);
250 static void ld_flag_byte(TCGv val
, unsigned shift
)
252 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
255 static void st_flag_byte(TCGv val
, unsigned shift
)
257 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
260 static void gen_excp_1(int exception
, int error_code
)
264 tmp1
= tcg_const_i32(exception
);
265 tmp2
= tcg_const_i32(error_code
);
266 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
267 tcg_temp_free_i32(tmp2
);
268 tcg_temp_free_i32(tmp1
);
271 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
273 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
274 gen_excp_1(exception
, error_code
);
275 return DISAS_NORETURN
;
278 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
280 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
283 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
285 TCGv_i32 tmp32
= tcg_temp_new_i32();
286 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
287 gen_helper_memory_to_f(t0
, tmp32
);
288 tcg_temp_free_i32(tmp32
);
291 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
293 TCGv tmp
= tcg_temp_new();
294 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
295 gen_helper_memory_to_g(t0
, tmp
);
299 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
301 TCGv_i32 tmp32
= tcg_temp_new_i32();
302 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
303 gen_helper_memory_to_s(t0
, tmp32
);
304 tcg_temp_free_i32(tmp32
);
307 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
309 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
310 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
311 tcg_gen_mov_i64(cpu_lock_value
, t0
);
314 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
316 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
317 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
318 tcg_gen_mov_i64(cpu_lock_value
, t0
);
321 static inline void gen_load_mem(DisasContext
*ctx
,
322 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
324 int ra
, int rb
, int32_t disp16
, bool fp
,
329 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
330 prefetches, which we can treat as nops. No worries about
331 missed exceptions here. */
332 if (unlikely(ra
== 31)) {
336 tmp
= tcg_temp_new();
337 addr
= load_gpr(ctx
, rb
);
340 tcg_gen_addi_i64(tmp
, addr
, disp16
);
344 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
348 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
349 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
354 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
356 TCGv_i32 tmp32
= tcg_temp_new_i32();
357 gen_helper_f_to_memory(tmp32
, t0
);
358 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
359 tcg_temp_free_i32(tmp32
);
362 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
364 TCGv tmp
= tcg_temp_new();
365 gen_helper_g_to_memory(tmp
, t0
);
366 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
370 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
372 TCGv_i32 tmp32
= tcg_temp_new_i32();
373 gen_helper_s_to_memory(tmp32
, t0
);
374 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
375 tcg_temp_free_i32(tmp32
);
378 static inline void gen_store_mem(DisasContext
*ctx
,
379 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
381 int ra
, int rb
, int32_t disp16
, bool fp
,
386 tmp
= tcg_temp_new();
387 addr
= load_gpr(ctx
, rb
);
390 tcg_gen_addi_i64(tmp
, addr
, disp16
);
394 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
398 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
399 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
404 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
405 int32_t disp16
, int mem_idx
,
408 TCGLabel
*lab_fail
, *lab_done
;
411 addr
= tcg_temp_new_i64();
412 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
413 free_context_temps(ctx
);
415 lab_fail
= gen_new_label();
416 lab_done
= gen_new_label();
417 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
418 tcg_temp_free_i64(addr
);
420 val
= tcg_temp_new_i64();
421 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
422 load_gpr(ctx
, ra
), mem_idx
, op
);
423 free_context_temps(ctx
);
426 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
428 tcg_temp_free_i64(val
);
429 tcg_gen_br(lab_done
);
431 gen_set_label(lab_fail
);
433 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
436 gen_set_label(lab_done
);
437 tcg_gen_movi_i64(cpu_lock_addr
, -1);
441 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
443 #ifndef CONFIG_USER_ONLY
444 /* Check for the dest on the same page as the start of the TB. */
445 return ((ctx
->base
.tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
451 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
453 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
456 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
459 /* Notice branch-to-next; used to initialize RA with the PC. */
462 } else if (use_goto_tb(ctx
, dest
)) {
464 tcg_gen_movi_i64(cpu_pc
, dest
);
465 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
466 return DISAS_NORETURN
;
468 tcg_gen_movi_i64(cpu_pc
, dest
);
469 return DISAS_PC_UPDATED
;
473 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
474 TCGv cmp
, int32_t disp
)
476 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
477 TCGLabel
*lab_true
= gen_new_label();
479 if (use_goto_tb(ctx
, dest
)) {
480 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
483 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
484 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
486 gen_set_label(lab_true
);
488 tcg_gen_movi_i64(cpu_pc
, dest
);
489 tcg_gen_exit_tb(ctx
->base
.tb
, 1);
491 return DISAS_NORETURN
;
493 TCGv_i64 z
= tcg_const_i64(0);
494 TCGv_i64 d
= tcg_const_i64(dest
);
495 TCGv_i64 p
= tcg_const_i64(ctx
->base
.pc_next
);
497 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
499 tcg_temp_free_i64(z
);
500 tcg_temp_free_i64(d
);
501 tcg_temp_free_i64(p
);
502 return DISAS_PC_UPDATED
;
506 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
507 int32_t disp
, int mask
)
510 TCGv tmp
= tcg_temp_new();
513 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
514 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
518 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
521 /* Fold -0.0 for comparison with COND. */
523 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
525 uint64_t mzero
= 1ull << 63;
530 /* For <= or >, the -0.0 value directly compares the way we want. */
531 tcg_gen_mov_i64(dest
, src
);
536 /* For == or !=, we can simply mask off the sign bit and compare. */
537 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
542 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
543 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
544 tcg_gen_neg_i64(dest
, dest
);
545 tcg_gen_and_i64(dest
, dest
, src
);
553 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
556 TCGv cmp_tmp
= tcg_temp_new();
559 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
560 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
561 tcg_temp_free(cmp_tmp
);
565 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
570 vb
= load_fpr(ctx
, rb
);
572 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
574 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
579 #define QUAL_RM_N 0x080 /* Round mode nearest even */
580 #define QUAL_RM_C 0x000 /* Round mode chopped */
581 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
582 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
583 #define QUAL_RM_MASK 0x0c0
585 #define QUAL_U 0x100 /* Underflow enable (fp output) */
586 #define QUAL_V 0x100 /* Overflow enable (int output) */
587 #define QUAL_S 0x400 /* Software completion enable */
588 #define QUAL_I 0x200 /* Inexact detection enable */
590 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
594 fn11
&= QUAL_RM_MASK
;
595 if (fn11
== ctx
->tb_rm
) {
600 tmp
= tcg_temp_new_i32();
603 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
606 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
609 tcg_gen_movi_i32(tmp
, float_round_down
);
612 tcg_gen_ld8u_i32(tmp
, cpu_env
,
613 offsetof(CPUAlphaState
, fpcr_dyn_round
));
617 #if defined(CONFIG_SOFTFLOAT_INLINE)
618 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
619 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
620 sets the one field. */
621 tcg_gen_st8_i32(tmp
, cpu_env
,
622 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
624 gen_helper_setroundmode(tmp
);
627 tcg_temp_free_i32(tmp
);
630 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
635 if (fn11
== ctx
->tb_ftz
) {
640 tmp
= tcg_temp_new_i32();
642 /* Underflow is enabled, use the FPCR setting. */
643 tcg_gen_ld8u_i32(tmp
, cpu_env
,
644 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
646 /* Underflow is disabled, force flush-to-zero. */
647 tcg_gen_movi_i32(tmp
, 1);
650 #if defined(CONFIG_SOFTFLOAT_INLINE)
651 tcg_gen_st8_i32(tmp
, cpu_env
,
652 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
654 gen_helper_setflushzero(tmp
);
657 tcg_temp_free_i32(tmp
);
660 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
664 if (unlikely(reg
== 31)) {
665 val
= load_zero(ctx
);
668 if ((fn11
& QUAL_S
) == 0) {
670 gen_helper_ieee_input_cmp(cpu_env
, val
);
672 gen_helper_ieee_input(cpu_env
, val
);
675 #ifndef CONFIG_USER_ONLY
676 /* In system mode, raise exceptions for denormals like real
677 hardware. In user mode, proceed as if the OS completion
678 handler is handling the denormal as per spec. */
679 gen_helper_ieee_input_s(cpu_env
, val
);
686 static void gen_fp_exc_raise(int rc
, int fn11
)
688 /* ??? We ought to be able to do something with imprecise exceptions.
689 E.g. notice we're still in the trap shadow of something within the
690 TB and do not generate the code to signal the exception; end the TB
691 when an exception is forced to arrive, either by consumption of a
692 register value or TRAPB or EXCB. */
696 if (!(fn11
& QUAL_U
)) {
697 /* Note that QUAL_U == QUAL_V, so ignore either. */
698 ignore
|= FPCR_UNF
| FPCR_IOV
;
700 if (!(fn11
& QUAL_I
)) {
703 ign
= tcg_const_i32(ignore
);
705 /* ??? Pass in the regno of the destination so that the helper can
706 set EXC_MASK, which contains a bitmask of destination registers
707 that have caused arithmetic traps. A simple userspace emulation
708 does not require this. We do need it for a guest kernel's entArith,
709 or if we were to do something clever with imprecise exceptions. */
710 reg
= tcg_const_i32(rc
+ 32);
712 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
714 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
717 tcg_temp_free_i32(reg
);
718 tcg_temp_free_i32(ign
);
721 static void gen_cvtlq(TCGv vc
, TCGv vb
)
723 TCGv tmp
= tcg_temp_new();
725 /* The arithmetic right shift here, plus the sign-extended mask below
726 yields a sign-extended result without an explicit ext32s_i64. */
727 tcg_gen_shri_i64(tmp
, vb
, 29);
728 tcg_gen_sari_i64(vc
, vb
, 32);
729 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
734 static void gen_ieee_arith2(DisasContext
*ctx
,
735 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
736 int rb
, int rc
, int fn11
)
740 gen_qual_roundmode(ctx
, fn11
);
741 gen_qual_flushzero(ctx
, fn11
);
743 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
744 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
746 gen_fp_exc_raise(rc
, fn11
);
749 #define IEEE_ARITH2(name) \
750 static inline void glue(gen_, name)(DisasContext *ctx, \
751 int rb, int rc, int fn11) \
753 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
760 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
764 /* No need to set flushzero, since we have an integer output. */
765 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
766 vc
= dest_fpr(ctx
, rc
);
768 /* Almost all integer conversions use cropped rounding;
769 special case that. */
770 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
771 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
773 gen_qual_roundmode(ctx
, fn11
);
774 gen_helper_cvttq(vc
, cpu_env
, vb
);
776 gen_fp_exc_raise(rc
, fn11
);
779 static void gen_ieee_intcvt(DisasContext
*ctx
,
780 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
781 int rb
, int rc
, int fn11
)
785 gen_qual_roundmode(ctx
, fn11
);
786 vb
= load_fpr(ctx
, rb
);
787 vc
= dest_fpr(ctx
, rc
);
789 /* The only exception that can be raised by integer conversion
790 is inexact. Thus we only need to worry about exceptions when
791 inexact handling is requested. */
793 helper(vc
, cpu_env
, vb
);
794 gen_fp_exc_raise(rc
, fn11
);
796 helper(vc
, cpu_env
, vb
);
800 #define IEEE_INTCVT(name) \
801 static inline void glue(gen_, name)(DisasContext *ctx, \
802 int rb, int rc, int fn11) \
804 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
809 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
811 TCGv vmask
= tcg_const_i64(mask
);
812 TCGv tmp
= tcg_temp_new_i64();
815 tcg_gen_andc_i64(tmp
, vmask
, va
);
817 tcg_gen_and_i64(tmp
, va
, vmask
);
820 tcg_gen_andc_i64(vc
, vb
, vmask
);
821 tcg_gen_or_i64(vc
, vc
, tmp
);
823 tcg_temp_free(vmask
);
827 static void gen_ieee_arith3(DisasContext
*ctx
,
828 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
829 int ra
, int rb
, int rc
, int fn11
)
833 gen_qual_roundmode(ctx
, fn11
);
834 gen_qual_flushzero(ctx
, fn11
);
836 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
837 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
838 vc
= dest_fpr(ctx
, rc
);
839 helper(vc
, cpu_env
, va
, vb
);
841 gen_fp_exc_raise(rc
, fn11
);
844 #define IEEE_ARITH3(name) \
845 static inline void glue(gen_, name)(DisasContext *ctx, \
846 int ra, int rb, int rc, int fn11) \
848 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
859 static void gen_ieee_compare(DisasContext
*ctx
,
860 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
861 int ra
, int rb
, int rc
, int fn11
)
865 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
866 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
867 vc
= dest_fpr(ctx
, rc
);
868 helper(vc
, cpu_env
, va
, vb
);
870 gen_fp_exc_raise(rc
, fn11
);
873 #define IEEE_CMP3(name) \
874 static inline void glue(gen_, name)(DisasContext *ctx, \
875 int ra, int rb, int rc, int fn11) \
877 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
884 static inline uint64_t zapnot_mask(uint8_t lit
)
889 for (i
= 0; i
< 8; ++i
) {
890 if ((lit
>> i
) & 1) {
891 mask
|= 0xffull
<< (i
* 8);
897 /* Implement zapnot with an immediate operand, which expands to some
898 form of immediate AND. This is a basic building block in the
899 definition of many of the other byte manipulation instructions. */
900 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
904 tcg_gen_movi_i64(dest
, 0);
907 tcg_gen_ext8u_i64(dest
, src
);
910 tcg_gen_ext16u_i64(dest
, src
);
913 tcg_gen_ext32u_i64(dest
, src
);
916 tcg_gen_mov_i64(dest
, src
);
919 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
924 /* EXTWH, EXTLH, EXTQH */
925 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
926 uint8_t lit
, uint8_t byte_mask
)
929 int pos
= (64 - lit
* 8) & 0x3f;
930 int len
= cto32(byte_mask
) * 8;
932 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
934 tcg_gen_movi_i64(vc
, 0);
937 TCGv tmp
= tcg_temp_new();
938 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
939 tcg_gen_neg_i64(tmp
, tmp
);
940 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
941 tcg_gen_shl_i64(vc
, va
, tmp
);
944 gen_zapnoti(vc
, vc
, byte_mask
);
947 /* EXTBL, EXTWL, EXTLL, EXTQL */
948 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
949 uint8_t lit
, uint8_t byte_mask
)
952 int pos
= (lit
& 7) * 8;
953 int len
= cto32(byte_mask
) * 8;
954 if (pos
+ len
>= 64) {
957 tcg_gen_extract_i64(vc
, va
, pos
, len
);
959 TCGv tmp
= tcg_temp_new();
960 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
961 tcg_gen_shli_i64(tmp
, tmp
, 3);
962 tcg_gen_shr_i64(vc
, va
, tmp
);
964 gen_zapnoti(vc
, vc
, byte_mask
);
968 /* INSWH, INSLH, INSQH */
969 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
970 uint8_t lit
, uint8_t byte_mask
)
973 int pos
= 64 - (lit
& 7) * 8;
974 int len
= cto32(byte_mask
) * 8;
976 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
978 tcg_gen_movi_i64(vc
, 0);
981 TCGv tmp
= tcg_temp_new();
982 TCGv shift
= tcg_temp_new();
984 /* The instruction description has us left-shift the byte mask
985 and extract bits <15:8> and apply that zap at the end. This
986 is equivalent to simply performing the zap first and shifting
988 gen_zapnoti(tmp
, va
, byte_mask
);
990 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
991 portably by splitting the shift into two parts: shift_count-1 and 1.
992 Arrange for the -1 by using ones-complement instead of
993 twos-complement in the negation: ~(B * 8) & 63. */
995 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
996 tcg_gen_not_i64(shift
, shift
);
997 tcg_gen_andi_i64(shift
, shift
, 0x3f);
999 tcg_gen_shr_i64(vc
, tmp
, shift
);
1000 tcg_gen_shri_i64(vc
, vc
, 1);
1001 tcg_temp_free(shift
);
1006 /* INSBL, INSWL, INSLL, INSQL */
1007 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1008 uint8_t lit
, uint8_t byte_mask
)
1011 int pos
= (lit
& 7) * 8;
1012 int len
= cto32(byte_mask
) * 8;
1013 if (pos
+ len
> 64) {
1016 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1018 TCGv tmp
= tcg_temp_new();
1019 TCGv shift
= tcg_temp_new();
1021 /* The instruction description has us left-shift the byte mask
1022 and extract bits <15:8> and apply that zap at the end. This
1023 is equivalent to simply performing the zap first and shifting
1025 gen_zapnoti(tmp
, va
, byte_mask
);
1027 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1028 tcg_gen_shli_i64(shift
, shift
, 3);
1029 tcg_gen_shl_i64(vc
, tmp
, shift
);
1030 tcg_temp_free(shift
);
1035 /* MSKWH, MSKLH, MSKQH */
1036 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1037 uint8_t lit
, uint8_t byte_mask
)
1040 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1042 TCGv shift
= tcg_temp_new();
1043 TCGv mask
= tcg_temp_new();
1045 /* The instruction description is as above, where the byte_mask
1046 is shifted left, and then we extract bits <15:8>. This can be
1047 emulated with a right-shift on the expanded byte mask. This
1048 requires extra care because for an input <2:0> == 0 we need a
1049 shift of 64 bits in order to generate a zero. This is done by
1050 splitting the shift into two parts, the variable shift - 1
1051 followed by a constant 1 shift. The code we expand below is
1052 equivalent to ~(B * 8) & 63. */
1054 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1055 tcg_gen_not_i64(shift
, shift
);
1056 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1057 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1058 tcg_gen_shr_i64(mask
, mask
, shift
);
1059 tcg_gen_shri_i64(mask
, mask
, 1);
1061 tcg_gen_andc_i64(vc
, va
, mask
);
1063 tcg_temp_free(mask
);
1064 tcg_temp_free(shift
);
1068 /* MSKBL, MSKWL, MSKLL, MSKQL */
1069 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1070 uint8_t lit
, uint8_t byte_mask
)
1073 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1075 TCGv shift
= tcg_temp_new();
1076 TCGv mask
= tcg_temp_new();
1078 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1079 tcg_gen_shli_i64(shift
, shift
, 3);
1080 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1081 tcg_gen_shl_i64(mask
, mask
, shift
);
1083 tcg_gen_andc_i64(vc
, va
, mask
);
1085 tcg_temp_free(mask
);
1086 tcg_temp_free(shift
);
1090 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1095 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1098 tmp
= tcg_const_i64(set
);
1099 st_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1103 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1105 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1106 to internal cpu registers. */
1108 /* Unprivileged PAL call */
1109 if (palcode
>= 0x80 && palcode
< 0xC0) {
1113 /* No-op inside QEMU. */
1117 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1118 offsetof(CPUAlphaState
, unique
));
1122 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1123 offsetof(CPUAlphaState
, unique
));
1132 #ifndef CONFIG_USER_ONLY
1133 /* Privileged PAL code */
1134 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1138 /* No-op inside QEMU. */
1142 /* No-op inside QEMU. */
1146 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1147 offsetof(CPUAlphaState
, vptptr
));
1151 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1152 offsetof(CPUAlphaState
, sysval
));
1156 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1157 offsetof(CPUAlphaState
, sysval
));
1162 /* Note that we already know we're in kernel mode, so we know
1163 that PS only contains the 3 IPL bits. */
1164 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1166 /* But make sure and store only the 3 IPL bits from the user. */
1168 TCGv tmp
= tcg_temp_new();
1169 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1170 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1174 /* Allow interrupts to be recognized right away. */
1175 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1176 return DISAS_PC_UPDATED_NOCHAIN
;
1180 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1185 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1186 offsetof(CPUAlphaState
, usp
));
1190 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1191 offsetof(CPUAlphaState
, usp
));
1195 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1196 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1202 TCGv_i32 tmp
= tcg_const_i32(1);
1203 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1204 offsetof(CPUState
, halted
));
1205 tcg_temp_free_i32(tmp
);
1207 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1208 return gen_excp(ctx
, EXCP_HALTED
, 0);
1217 return gen_invalid(ctx
);
1220 #ifdef CONFIG_USER_ONLY
1221 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1224 TCGv tmp
= tcg_temp_new();
1225 uint64_t exc_addr
= ctx
->base
.pc_next
;
1226 uint64_t entry
= ctx
->palbr
;
1228 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1231 tcg_gen_movi_i64(tmp
, 1);
1232 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1235 tcg_gen_movi_i64(tmp
, exc_addr
);
1236 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1239 entry
+= (palcode
& 0x80
1240 ? 0x2000 + (palcode
- 0x80) * 64
1241 : 0x1000 + palcode
* 64);
1243 /* Since the destination is running in PALmode, we don't really
1244 need the page permissions check. We'll see the existence of
1245 the page when we create the TB, and we'll flush all TBs if
1246 we change the PAL base register. */
1247 if (!ctx
->base
.singlestep_enabled
) {
1249 tcg_gen_movi_i64(cpu_pc
, entry
);
1250 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
1251 return DISAS_NORETURN
;
1253 tcg_gen_movi_i64(cpu_pc
, entry
);
1254 return DISAS_PC_UPDATED
;
1260 #ifndef CONFIG_USER_ONLY
1262 #define PR_LONG 0x200000
1264 static int cpu_pr_data(int pr
)
1267 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1268 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1269 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1270 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1271 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1272 case 7: return offsetof(CPUAlphaState
, palbr
);
1273 case 8: return offsetof(CPUAlphaState
, ptbr
);
1274 case 9: return offsetof(CPUAlphaState
, vptptr
);
1275 case 10: return offsetof(CPUAlphaState
, unique
);
1276 case 11: return offsetof(CPUAlphaState
, sysval
);
1277 case 12: return offsetof(CPUAlphaState
, usp
);
1280 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1283 return offsetof(CPUAlphaState
, alarm_expire
);
1288 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1290 void (*helper
)(TCGv
);
1295 /* Accessing the "non-shadow" general registers. */
1296 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1297 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1300 case 250: /* WALLTIME */
1301 helper
= gen_helper_get_walltime
;
1303 case 249: /* VMTIME */
1304 helper
= gen_helper_get_vmtime
;
1306 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1309 return DISAS_PC_STALE
;
1316 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1319 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1323 /* The basic registers are data only, and unknown registers
1324 are read-zero, write-ignore. */
1325 data
= cpu_pr_data(regno
);
1327 tcg_gen_movi_i64(va
, 0);
1328 } else if (data
& PR_LONG
) {
1329 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1331 tcg_gen_ld_i64(va
, cpu_env
, data
);
1339 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1342 DisasJumpType ret
= DISAS_NEXT
;
1347 gen_helper_tbia(cpu_env
);
1352 gen_helper_tbis(cpu_env
, vb
);
1358 TCGv_i32 tmp
= tcg_const_i32(1);
1359 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1360 offsetof(CPUState
, halted
));
1361 tcg_temp_free_i32(tmp
);
1363 return gen_excp(ctx
, EXCP_HALTED
, 0);
1367 gen_helper_halt(vb
);
1368 return DISAS_PC_STALE
;
1372 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1374 ret
= DISAS_PC_STALE
;
1376 gen_helper_set_alarm(cpu_env
, vb
);
1381 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1382 /* Changing the PAL base register implies un-chaining all of the TBs
1383 that ended with a CALL_PAL. Since the base register usually only
1384 changes during boot, flushing everything works well. */
1385 gen_helper_tb_flush(cpu_env
);
1386 return DISAS_PC_STALE
;
1389 /* Accessing the "non-shadow" general registers. */
1390 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1391 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1395 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1398 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1402 /* The basic registers are data only, and unknown registers
1403 are read-zero, write-ignore. */
1404 data
= cpu_pr_data(regno
);
1406 if (data
& PR_LONG
) {
1407 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1409 tcg_gen_st_i64(vb
, cpu_env
, data
);
1417 #endif /* !USER_ONLY*/
1419 #define REQUIRE_NO_LIT \
1426 #define REQUIRE_AMASK(FLAG) \
1428 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1433 #define REQUIRE_TB_FLAG(FLAG) \
1435 if ((ctx->tbflags & (FLAG)) == 0) { \
1440 #define REQUIRE_REG_31(WHICH) \
1442 if (WHICH != 31) { \
1447 #define REQUIRE_FEN \
1449 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1454 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1456 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1458 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1459 bool islit
, real_islit
;
1460 TCGv va
, vb
, vc
, tmp
, tmp2
;
1464 /* Decode all instruction fields */
1465 opc
= extract32(insn
, 26, 6);
1466 ra
= extract32(insn
, 21, 5);
1467 rb
= extract32(insn
, 16, 5);
1468 rc
= extract32(insn
, 0, 5);
1469 real_islit
= islit
= extract32(insn
, 12, 1);
1470 lit
= extract32(insn
, 13, 8);
1472 disp21
= sextract32(insn
, 0, 21);
1473 disp16
= sextract32(insn
, 0, 16);
1474 disp12
= sextract32(insn
, 0, 12);
1476 fn11
= extract32(insn
, 5, 11);
1477 fpfn
= extract32(insn
, 5, 6);
1478 fn7
= extract32(insn
, 5, 7);
1480 if (rb
== 31 && !islit
) {
1489 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1515 disp16
= (uint32_t)disp16
<< 16;
1519 va
= dest_gpr(ctx
, ra
);
1520 /* It's worth special-casing immediate loads. */
1522 tcg_gen_movi_i64(va
, disp16
);
1524 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1531 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1535 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1540 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1545 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1550 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1554 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1558 vc
= dest_gpr(ctx
, rc
);
1559 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1563 /* Special case ADDL as SEXTL. */
1564 tcg_gen_ext32s_i64(vc
, vb
);
1568 /* Special case SUBQ as NEGQ. */
1569 tcg_gen_neg_i64(vc
, vb
);
1574 va
= load_gpr(ctx
, ra
);
1578 tcg_gen_add_i64(vc
, va
, vb
);
1579 tcg_gen_ext32s_i64(vc
, vc
);
1583 tmp
= tcg_temp_new();
1584 tcg_gen_shli_i64(tmp
, va
, 2);
1585 tcg_gen_add_i64(tmp
, tmp
, vb
);
1586 tcg_gen_ext32s_i64(vc
, tmp
);
1591 tcg_gen_sub_i64(vc
, va
, vb
);
1592 tcg_gen_ext32s_i64(vc
, vc
);
1596 tmp
= tcg_temp_new();
1597 tcg_gen_shli_i64(tmp
, va
, 2);
1598 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1599 tcg_gen_ext32s_i64(vc
, tmp
);
1605 /* Special case 0 >= X as X == 0. */
1606 gen_helper_cmpbe0(vc
, vb
);
1608 gen_helper_cmpbge(vc
, va
, vb
);
1613 tmp
= tcg_temp_new();
1614 tcg_gen_shli_i64(tmp
, va
, 3);
1615 tcg_gen_add_i64(tmp
, tmp
, vb
);
1616 tcg_gen_ext32s_i64(vc
, tmp
);
1621 tmp
= tcg_temp_new();
1622 tcg_gen_shli_i64(tmp
, va
, 3);
1623 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1624 tcg_gen_ext32s_i64(vc
, tmp
);
1629 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1633 tcg_gen_add_i64(vc
, va
, vb
);
1637 tmp
= tcg_temp_new();
1638 tcg_gen_shli_i64(tmp
, va
, 2);
1639 tcg_gen_add_i64(vc
, tmp
, vb
);
1644 tcg_gen_sub_i64(vc
, va
, vb
);
1648 tmp
= tcg_temp_new();
1649 tcg_gen_shli_i64(tmp
, va
, 2);
1650 tcg_gen_sub_i64(vc
, tmp
, vb
);
1655 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1659 tmp
= tcg_temp_new();
1660 tcg_gen_shli_i64(tmp
, va
, 3);
1661 tcg_gen_add_i64(vc
, tmp
, vb
);
1666 tmp
= tcg_temp_new();
1667 tcg_gen_shli_i64(tmp
, va
, 3);
1668 tcg_gen_sub_i64(vc
, tmp
, vb
);
1673 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1677 tmp
= tcg_temp_new();
1678 tcg_gen_ext32s_i64(tmp
, va
);
1679 tcg_gen_ext32s_i64(vc
, vb
);
1680 tcg_gen_add_i64(tmp
, tmp
, vc
);
1681 tcg_gen_ext32s_i64(vc
, tmp
);
1682 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1687 tmp
= tcg_temp_new();
1688 tcg_gen_ext32s_i64(tmp
, va
);
1689 tcg_gen_ext32s_i64(vc
, vb
);
1690 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1691 tcg_gen_ext32s_i64(vc
, tmp
);
1692 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1697 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1701 tmp
= tcg_temp_new();
1702 tmp2
= tcg_temp_new();
1703 tcg_gen_eqv_i64(tmp
, va
, vb
);
1704 tcg_gen_mov_i64(tmp2
, va
);
1705 tcg_gen_add_i64(vc
, va
, vb
);
1706 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1707 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1708 tcg_gen_shri_i64(tmp
, tmp
, 63);
1709 tcg_gen_movi_i64(tmp2
, 0);
1710 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1712 tcg_temp_free(tmp2
);
1716 tmp
= tcg_temp_new();
1717 tmp2
= tcg_temp_new();
1718 tcg_gen_xor_i64(tmp
, va
, vb
);
1719 tcg_gen_mov_i64(tmp2
, va
);
1720 tcg_gen_sub_i64(vc
, va
, vb
);
1721 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1722 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1723 tcg_gen_shri_i64(tmp
, tmp
, 63);
1724 tcg_gen_movi_i64(tmp2
, 0);
1725 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1727 tcg_temp_free(tmp2
);
1731 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1741 /* Special case BIS as NOP. */
1745 /* Special case BIS as MOV. */
1746 vc
= dest_gpr(ctx
, rc
);
1748 tcg_gen_movi_i64(vc
, lit
);
1750 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1756 vc
= dest_gpr(ctx
, rc
);
1757 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1759 if (fn7
== 0x28 && ra
== 31) {
1760 /* Special case ORNOT as NOT. */
1761 tcg_gen_not_i64(vc
, vb
);
1765 va
= load_gpr(ctx
, ra
);
1769 tcg_gen_and_i64(vc
, va
, vb
);
1773 tcg_gen_andc_i64(vc
, va
, vb
);
1777 tmp
= tcg_temp_new();
1778 tcg_gen_andi_i64(tmp
, va
, 1);
1779 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1780 vb
, load_gpr(ctx
, rc
));
1785 tmp
= tcg_temp_new();
1786 tcg_gen_andi_i64(tmp
, va
, 1);
1787 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1788 vb
, load_gpr(ctx
, rc
));
1793 tcg_gen_or_i64(vc
, va
, vb
);
1797 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1798 vb
, load_gpr(ctx
, rc
));
1802 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1803 vb
, load_gpr(ctx
, rc
));
1807 tcg_gen_orc_i64(vc
, va
, vb
);
1811 tcg_gen_xor_i64(vc
, va
, vb
);
1815 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1816 vb
, load_gpr(ctx
, rc
));
1820 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1821 vb
, load_gpr(ctx
, rc
));
1825 tcg_gen_eqv_i64(vc
, va
, vb
);
1830 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1834 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1835 vb
, load_gpr(ctx
, rc
));
1839 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1840 vb
, load_gpr(ctx
, rc
));
1845 tcg_gen_movi_i64(vc
, ctx
->implver
);
1853 vc
= dest_gpr(ctx
, rc
);
1854 va
= load_gpr(ctx
, ra
);
1858 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1862 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1866 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1870 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1874 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1878 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1882 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1886 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1890 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1895 gen_zapnoti(vc
, va
, ~lit
);
1897 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1903 gen_zapnoti(vc
, va
, lit
);
1905 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1910 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1915 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1917 tmp
= tcg_temp_new();
1918 vb
= load_gpr(ctx
, rb
);
1919 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1920 tcg_gen_shr_i64(vc
, va
, tmp
);
1926 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1931 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1933 tmp
= tcg_temp_new();
1934 vb
= load_gpr(ctx
, rb
);
1935 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1936 tcg_gen_shl_i64(vc
, va
, tmp
);
1942 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1947 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1949 tmp
= tcg_temp_new();
1950 vb
= load_gpr(ctx
, rb
);
1951 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1952 tcg_gen_sar_i64(vc
, va
, tmp
);
1958 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1962 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1966 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1970 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1974 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1978 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1982 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1986 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1990 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1998 vc
= dest_gpr(ctx
, rc
);
1999 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2000 va
= load_gpr(ctx
, ra
);
2004 tcg_gen_mul_i64(vc
, va
, vb
);
2005 tcg_gen_ext32s_i64(vc
, vc
);
2009 tcg_gen_mul_i64(vc
, va
, vb
);
2013 tmp
= tcg_temp_new();
2014 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
2019 tmp
= tcg_temp_new();
2020 tcg_gen_ext32s_i64(tmp
, va
);
2021 tcg_gen_ext32s_i64(vc
, vb
);
2022 tcg_gen_mul_i64(tmp
, tmp
, vc
);
2023 tcg_gen_ext32s_i64(vc
, tmp
);
2024 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2029 tmp
= tcg_temp_new();
2030 tmp2
= tcg_temp_new();
2031 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2032 tcg_gen_sari_i64(tmp2
, vc
, 63);
2033 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2035 tcg_temp_free(tmp2
);
2044 vc
= dest_fpr(ctx
, rc
);
2045 switch (fpfn
) { /* fn11 & 0x3F */
2050 t32
= tcg_temp_new_i32();
2051 va
= load_gpr(ctx
, ra
);
2052 tcg_gen_extrl_i64_i32(t32
, va
);
2053 gen_helper_memory_to_s(vc
, t32
);
2054 tcg_temp_free_i32(t32
);
2060 vb
= load_fpr(ctx
, rb
);
2061 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2067 gen_sqrts(ctx
, rb
, rc
, fn11
);
2073 t32
= tcg_temp_new_i32();
2074 va
= load_gpr(ctx
, ra
);
2075 tcg_gen_extrl_i64_i32(t32
, va
);
2076 gen_helper_memory_to_f(vc
, t32
);
2077 tcg_temp_free_i32(t32
);
2083 va
= load_gpr(ctx
, ra
);
2084 tcg_gen_mov_i64(vc
, va
);
2090 vb
= load_fpr(ctx
, rb
);
2091 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2097 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2105 /* VAX floating point */
2106 /* XXX: rounding mode and trap are ignored (!) */
2107 vc
= dest_fpr(ctx
, rc
);
2108 vb
= load_fpr(ctx
, rb
);
2109 va
= load_fpr(ctx
, ra
);
2110 switch (fpfn
) { /* fn11 & 0x3F */
2114 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2119 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2124 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2129 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2138 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2143 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2148 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2153 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2158 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2163 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2168 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2174 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2184 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2190 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2196 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2204 /* IEEE floating-point */
2205 switch (fpfn
) { /* fn11 & 0x3F */
2209 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2214 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2219 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2224 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2229 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2234 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2239 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2244 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2249 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2254 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2259 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2264 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2269 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2271 gen_cvtst(ctx
, rb
, rc
, fn11
);
2274 gen_cvtts(ctx
, rb
, rc
, fn11
);
2281 gen_cvttq(ctx
, rb
, rc
, fn11
);
2287 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2293 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2306 vc
= dest_fpr(ctx
, rc
);
2307 vb
= load_fpr(ctx
, rb
);
2314 /* Special case CPYS as FNOP. */
2316 vc
= dest_fpr(ctx
, rc
);
2317 va
= load_fpr(ctx
, ra
);
2319 /* Special case CPYS as FMOV. */
2320 tcg_gen_mov_i64(vc
, va
);
2322 vb
= load_fpr(ctx
, rb
);
2323 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2330 vc
= dest_fpr(ctx
, rc
);
2331 vb
= load_fpr(ctx
, rb
);
2332 va
= load_fpr(ctx
, ra
);
2333 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2338 vc
= dest_fpr(ctx
, rc
);
2339 vb
= load_fpr(ctx
, rb
);
2340 va
= load_fpr(ctx
, ra
);
2341 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2346 va
= load_fpr(ctx
, ra
);
2347 gen_helper_store_fpcr(cpu_env
, va
);
2348 if (ctx
->tb_rm
== QUAL_RM_D
) {
2349 /* Re-do the copy of the rounding mode to fp_status
2350 the next time we use dynamic rounding. */
2357 va
= dest_fpr(ctx
, ra
);
2358 gen_helper_load_fpcr(va
, cpu_env
);
2363 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2368 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2373 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2378 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2383 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2388 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2390 case 0x030: /* CVTQL */
2391 case 0x130: /* CVTQL/V */
2392 case 0x530: /* CVTQL/SV */
2395 vc
= dest_fpr(ctx
, rc
);
2396 vb
= load_fpr(ctx
, rb
);
2397 gen_helper_cvtql(vc
, cpu_env
, vb
);
2398 gen_fp_exc_raise(rc
, fn11
);
2406 switch ((uint16_t)disp16
) {
2417 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2421 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2433 va
= dest_gpr(ctx
, ra
);
2434 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2436 gen_helper_load_pcc(va
, cpu_env
);
2437 ret
= DISAS_PC_STALE
;
2439 gen_helper_load_pcc(va
, cpu_env
);
2467 /* HW_MFPR (PALcode) */
2468 #ifndef CONFIG_USER_ONLY
2469 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2470 va
= dest_gpr(ctx
, ra
);
2471 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2478 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2479 prediction stack action, which of course we don't implement. */
2480 vb
= load_gpr(ctx
, rb
);
2481 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2483 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2485 ret
= DISAS_PC_UPDATED
;
2489 /* HW_LD (PALcode) */
2490 #ifndef CONFIG_USER_ONLY
2491 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2493 TCGv addr
= tcg_temp_new();
2494 vb
= load_gpr(ctx
, rb
);
2495 va
= dest_gpr(ctx
, ra
);
2497 tcg_gen_addi_i64(addr
, vb
, disp12
);
2498 switch ((insn
>> 12) & 0xF) {
2500 /* Longword physical access (hw_ldl/p) */
2501 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2504 /* Quadword physical access (hw_ldq/p) */
2505 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2508 /* Longword physical access with lock (hw_ldl_l/p) */
2509 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2512 /* Quadword physical access with lock (hw_ldq_l/p) */
2513 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2516 /* Longword virtual PTE fetch (hw_ldl/v) */
2519 /* Quadword virtual PTE fetch (hw_ldq/v) */
2529 /* Longword virtual access (hw_ldl) */
2532 /* Quadword virtual access (hw_ldq) */
2535 /* Longword virtual access with protection check (hw_ldl/w) */
2536 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2539 /* Quadword virtual access with protection check (hw_ldq/w) */
2540 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2543 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2546 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2549 /* Longword virtual access with alternate access mode and
2550 protection checks (hw_ldl/wa) */
2551 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2554 /* Quadword virtual access with alternate access mode and
2555 protection checks (hw_ldq/wa) */
2556 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2559 tcg_temp_free(addr
);
2567 vc
= dest_gpr(ctx
, rc
);
2572 va
= load_fpr(ctx
, ra
);
2573 tcg_gen_mov_i64(vc
, va
);
2575 } else if (fn7
== 0x78) {
2579 t32
= tcg_temp_new_i32();
2580 va
= load_fpr(ctx
, ra
);
2581 gen_helper_s_to_memory(t32
, va
);
2582 tcg_gen_ext_i32_i64(vc
, t32
);
2583 tcg_temp_free_i32(t32
);
2587 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2593 tcg_gen_ext8s_i64(vc
, vb
);
2599 tcg_gen_ext16s_i64(vc
, vb
);
2606 tcg_gen_ctpop_i64(vc
, vb
);
2612 va
= load_gpr(ctx
, ra
);
2613 gen_helper_perr(vc
, va
, vb
);
2620 tcg_gen_clzi_i64(vc
, vb
, 64);
2627 tcg_gen_ctzi_i64(vc
, vb
, 64);
2634 gen_helper_unpkbw(vc
, vb
);
2641 gen_helper_unpkbl(vc
, vb
);
2648 gen_helper_pkwb(vc
, vb
);
2655 gen_helper_pklb(vc
, vb
);
2660 va
= load_gpr(ctx
, ra
);
2661 gen_helper_minsb8(vc
, va
, vb
);
2666 va
= load_gpr(ctx
, ra
);
2667 gen_helper_minsw4(vc
, va
, vb
);
2672 va
= load_gpr(ctx
, ra
);
2673 gen_helper_minub8(vc
, va
, vb
);
2678 va
= load_gpr(ctx
, ra
);
2679 gen_helper_minuw4(vc
, va
, vb
);
2684 va
= load_gpr(ctx
, ra
);
2685 gen_helper_maxub8(vc
, va
, vb
);
2690 va
= load_gpr(ctx
, ra
);
2691 gen_helper_maxuw4(vc
, va
, vb
);
2696 va
= load_gpr(ctx
, ra
);
2697 gen_helper_maxsb8(vc
, va
, vb
);
2702 va
= load_gpr(ctx
, ra
);
2703 gen_helper_maxsw4(vc
, va
, vb
);
2711 /* HW_MTPR (PALcode) */
2712 #ifndef CONFIG_USER_ONLY
2713 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2714 vb
= load_gpr(ctx
, rb
);
2715 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2722 /* HW_RET (PALcode) */
2723 #ifndef CONFIG_USER_ONLY
2724 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2726 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2727 address from EXC_ADDR. This turns out to be useful for our
2728 emulation PALcode, so continue to accept it. */
2729 ctx
->lit
= vb
= tcg_temp_new();
2730 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2732 vb
= load_gpr(ctx
, rb
);
2734 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2735 tmp
= tcg_temp_new();
2736 tcg_gen_movi_i64(tmp
, 0);
2737 st_flag_byte(tmp
, ENV_FLAG_RX_SHIFT
);
2738 tcg_gen_andi_i64(tmp
, vb
, 1);
2739 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2741 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2742 /* Allow interrupts to be recognized right away. */
2743 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2750 /* HW_ST (PALcode) */
2751 #ifndef CONFIG_USER_ONLY
2752 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2754 switch ((insn
>> 12) & 0xF) {
2756 /* Longword physical access */
2757 va
= load_gpr(ctx
, ra
);
2758 vb
= load_gpr(ctx
, rb
);
2759 tmp
= tcg_temp_new();
2760 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2761 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2765 /* Quadword physical access */
2766 va
= load_gpr(ctx
, ra
);
2767 vb
= load_gpr(ctx
, rb
);
2768 tmp
= tcg_temp_new();
2769 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2770 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2774 /* Longword physical access with lock */
2775 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2776 MMU_PHYS_IDX
, MO_LESL
);
2779 /* Quadword physical access with lock */
2780 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2781 MMU_PHYS_IDX
, MO_LEQ
);
2784 /* Longword virtual access */
2787 /* Quadword virtual access */
2808 /* Longword virtual access with alternate access mode */
2811 /* Quadword virtual access with alternate access mode */
2828 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2833 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2838 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2843 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2848 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2853 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2858 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2863 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2867 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2871 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2875 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2879 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2883 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2887 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2891 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2892 ctx
->mem_idx
, MO_LESL
);
2896 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2897 ctx
->mem_idx
, MO_LEQ
);
2901 ret
= gen_bdirect(ctx
, ra
, disp21
);
2903 case 0x31: /* FBEQ */
2905 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2907 case 0x32: /* FBLT */
2909 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2911 case 0x33: /* FBLE */
2913 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2917 ret
= gen_bdirect(ctx
, ra
, disp21
);
2919 case 0x35: /* FBNE */
2921 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2923 case 0x36: /* FBGE */
2925 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2927 case 0x37: /* FBGT */
2929 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2933 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2937 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2941 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2945 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2949 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2953 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2957 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2961 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2964 ret
= gen_invalid(ctx
);
2967 ret
= gen_excp(ctx
, EXCP_FEN
, 0);
2974 static void alpha_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
2976 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2977 CPUAlphaState
*env
= cpu
->env_ptr
;
2980 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2981 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2982 ctx
->implver
= env
->implver
;
2983 ctx
->amask
= env
->amask
;
2985 #ifdef CONFIG_USER_ONLY
2986 ctx
->ir
= cpu_std_ir
;
2988 ctx
->palbr
= env
->palbr
;
2989 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2992 /* ??? Every TB begins with unset rounding mode, to be initialized on
2993 the first fp insn of the TB. Alternately we could define a proper
2994 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2995 to reset the FP_STATUS to that default at the end of any TB that
2996 changes the default. We could even (gasp) dynamiclly figure out
2997 what default would be most efficient given the running program. */
2999 /* Similarly for flush-to-zero. */
3006 /* Bound the number of insns to execute to those left on the page. */
3007 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
3008 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
3011 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
3015 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
3017 tcg_gen_insn_start(dcbase
->pc_next
);
3020 static bool alpha_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
3021 const CPUBreakpoint
*bp
)
3023 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
3025 ctx
->base
.is_jmp
= gen_excp(ctx
, EXCP_DEBUG
, 0);
3027 /* The address covered by the breakpoint must be included in
3028 [tb->pc, tb->pc + tb->size) in order to for it to be
3029 properly cleared -- thus we increment the PC here so that
3030 the logic setting tb->size below does the right thing. */
3031 ctx
->base
.pc_next
+= 4;
3035 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
3037 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
3038 CPUAlphaState
*env
= cpu
->env_ptr
;
3039 uint32_t insn
= translator_ldl(env
, ctx
->base
.pc_next
);
3041 ctx
->base
.pc_next
+= 4;
3042 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
3044 free_context_temps(ctx
);
3045 translator_loop_temp_check(&ctx
->base
);
3048 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
3050 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
3052 switch (ctx
->base
.is_jmp
) {
3053 case DISAS_NORETURN
:
3055 case DISAS_TOO_MANY
:
3056 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
3058 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3059 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
3062 case DISAS_PC_STALE
:
3063 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3065 case DISAS_PC_UPDATED
:
3066 if (!ctx
->base
.singlestep_enabled
) {
3067 tcg_gen_lookup_and_goto_ptr();
3071 case DISAS_PC_UPDATED_NOCHAIN
:
3072 if (ctx
->base
.singlestep_enabled
) {
3073 gen_excp_1(EXCP_DEBUG
, 0);
3075 tcg_gen_exit_tb(NULL
, 0);
3079 g_assert_not_reached();
3083 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
3085 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
3086 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
3089 static const TranslatorOps alpha_tr_ops
= {
3090 .init_disas_context
= alpha_tr_init_disas_context
,
3091 .tb_start
= alpha_tr_tb_start
,
3092 .insn_start
= alpha_tr_insn_start
,
3093 .breakpoint_check
= alpha_tr_breakpoint_check
,
3094 .translate_insn
= alpha_tr_translate_insn
,
3095 .tb_stop
= alpha_tr_tb_stop
,
3096 .disas_log
= alpha_tr_disas_log
,
3099 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
3102 translator_loop(&alpha_tr_ops
, &dc
.base
, cpu
, tb
, max_insns
);
3105 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,