2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DISAS(...) do { } while (0)
38 typedef struct DisasContext DisasContext
;
40 struct TranslationBlock
*tb
;
44 /* Current rounding mode for this TB. */
46 /* Current flush-to-zero setting for this TB. */
49 /* implver value for this CPU. */
52 /* Temporaries for $31 and $f31 as source and destination. */
55 /* Temporary for immediate constants. */
58 bool singlestep_enabled
;
61 /* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
67 /* We have emitted one or more goto_tb. No fixup required. */
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
84 /* global register indexes */
85 static TCGv_ptr cpu_env
;
86 static TCGv cpu_ir
[31];
87 static TCGv cpu_fir
[31];
89 static TCGv cpu_lock_addr
;
90 static TCGv cpu_lock_st_addr
;
91 static TCGv cpu_lock_value
;
93 #include "exec/gen-icount.h"
95 void alpha_translate_init(void)
97 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
99 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
100 static const GlobalVar vars
[] = {
103 DEF_VAR(lock_st_addr
),
109 /* Use the symbolic register names that match the disassembler. */
110 static const char greg_names
[31][4] = {
111 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
112 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
113 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
114 "t10", "t11", "ra", "t12", "at", "gp", "sp"
116 static const char freg_names
[31][4] = {
117 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
118 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
119 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
120 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
123 static bool done_init
= 0;
131 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
133 for (i
= 0; i
< 31; i
++) {
134 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
135 offsetof(CPUAlphaState
, ir
[i
]),
139 for (i
= 0; i
< 31; i
++) {
140 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
141 offsetof(CPUAlphaState
, fir
[i
]),
145 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
146 const GlobalVar
*v
= &vars
[i
];
147 *v
->var
= tcg_global_mem_new_i64(TCG_AREG0
, v
->ofs
, v
->name
);
151 static TCGv
load_zero(DisasContext
*ctx
)
153 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
154 ctx
->zero
= tcg_const_i64(0);
159 static TCGv
dest_sink(DisasContext
*ctx
)
161 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
162 ctx
->sink
= tcg_temp_new();
167 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
169 if (likely(reg
< 31)) {
172 return load_zero(ctx
);
176 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
177 uint8_t lit
, bool islit
)
180 ctx
->lit
= tcg_const_i64(lit
);
182 } else if (likely(reg
< 31)) {
185 return load_zero(ctx
);
189 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
191 if (likely(reg
< 31)) {
194 return dest_sink(ctx
);
198 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
200 if (likely(reg
< 31)) {
203 return load_zero(ctx
);
207 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
209 if (likely(reg
< 31)) {
212 return dest_sink(ctx
);
216 static void gen_excp_1(int exception
, int error_code
)
220 tmp1
= tcg_const_i32(exception
);
221 tmp2
= tcg_const_i32(error_code
);
222 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
223 tcg_temp_free_i32(tmp2
);
224 tcg_temp_free_i32(tmp1
);
227 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
229 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
230 gen_excp_1(exception
, error_code
);
231 return EXIT_NORETURN
;
234 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
236 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
239 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
241 TCGv_i32 tmp32
= tcg_temp_new_i32();
242 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
243 gen_helper_memory_to_f(t0
, tmp32
);
244 tcg_temp_free_i32(tmp32
);
247 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
249 TCGv tmp
= tcg_temp_new();
250 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
251 gen_helper_memory_to_g(t0
, tmp
);
255 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
257 TCGv_i32 tmp32
= tcg_temp_new_i32();
258 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
259 gen_helper_memory_to_s(t0
, tmp32
);
260 tcg_temp_free_i32(tmp32
);
263 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
265 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
266 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
267 tcg_gen_mov_i64(cpu_lock_value
, t0
);
270 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
272 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
273 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
274 tcg_gen_mov_i64(cpu_lock_value
, t0
);
277 static inline void gen_load_mem(DisasContext
*ctx
,
278 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
280 int ra
, int rb
, int32_t disp16
, bool fp
,
285 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
286 prefetches, which we can treat as nops. No worries about
287 missed exceptions here. */
288 if (unlikely(ra
== 31)) {
292 tmp
= tcg_temp_new();
293 addr
= load_gpr(ctx
, rb
);
296 tcg_gen_addi_i64(tmp
, addr
, disp16
);
300 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
304 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
305 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
310 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
312 TCGv_i32 tmp32
= tcg_temp_new_i32();
313 gen_helper_f_to_memory(tmp32
, t0
);
314 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
315 tcg_temp_free_i32(tmp32
);
318 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
320 TCGv tmp
= tcg_temp_new();
321 gen_helper_g_to_memory(tmp
, t0
);
322 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
326 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
328 TCGv_i32 tmp32
= tcg_temp_new_i32();
329 gen_helper_s_to_memory(tmp32
, t0
);
330 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
331 tcg_temp_free_i32(tmp32
);
334 static inline void gen_store_mem(DisasContext
*ctx
,
335 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
337 int ra
, int rb
, int32_t disp16
, bool fp
,
342 tmp
= tcg_temp_new();
343 addr
= load_gpr(ctx
, rb
);
346 tcg_gen_addi_i64(tmp
, addr
, disp16
);
350 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
354 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
355 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
360 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
361 int32_t disp16
, int quad
)
366 /* ??? Don't bother storing anything. The user can't tell
367 the difference, since the zero register always reads zero. */
371 #if defined(CONFIG_USER_ONLY)
372 addr
= cpu_lock_st_addr
;
374 addr
= tcg_temp_local_new();
377 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
379 #if defined(CONFIG_USER_ONLY)
380 /* ??? This is handled via a complicated version of compare-and-swap
381 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
382 in TCG so that this isn't necessary. */
383 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
385 /* ??? In system mode we are never multi-threaded, so CAS can be
386 implemented via a non-atomic load-compare-store sequence. */
388 int lab_fail
, lab_done
;
391 lab_fail
= gen_new_label();
392 lab_done
= gen_new_label();
393 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
395 val
= tcg_temp_new();
396 tcg_gen_qemu_ld_i64(val
, addr
, ctx
->mem_idx
, quad
? MO_LEQ
: MO_LESL
);
397 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
399 tcg_gen_qemu_st_i64(cpu_ir
[ra
], addr
, ctx
->mem_idx
,
400 quad
? MO_LEQ
: MO_LEUL
);
401 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
402 tcg_gen_br(lab_done
);
404 gen_set_label(lab_fail
);
405 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
407 gen_set_label(lab_done
);
408 tcg_gen_movi_i64(cpu_lock_addr
, -1);
416 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
418 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
420 && ((addr
>> 41) & 3) == 2
421 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
424 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
426 /* Suppress goto_tb in the case of single-steping and IO. */
427 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
428 || ctx
->singlestep_enabled
|| singlestep
) {
431 /* If the destination is in the superpage, the page perms can't change. */
432 if (in_superpage(ctx
, dest
)) {
435 /* Check for the dest on the same page as the start of the TB. */
436 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
439 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
441 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
444 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
447 /* Notice branch-to-next; used to initialize RA with the PC. */
450 } else if (use_goto_tb(ctx
, dest
)) {
452 tcg_gen_movi_i64(cpu_pc
, dest
);
453 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
456 tcg_gen_movi_i64(cpu_pc
, dest
);
457 return EXIT_PC_UPDATED
;
461 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
462 TCGv cmp
, int32_t disp
)
464 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
465 int lab_true
= gen_new_label();
467 if (use_goto_tb(ctx
, dest
)) {
468 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
471 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
472 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
474 gen_set_label(lab_true
);
476 tcg_gen_movi_i64(cpu_pc
, dest
);
477 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
481 TCGv_i64 z
= tcg_const_i64(0);
482 TCGv_i64 d
= tcg_const_i64(dest
);
483 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
485 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
487 tcg_temp_free_i64(z
);
488 tcg_temp_free_i64(d
);
489 tcg_temp_free_i64(p
);
490 return EXIT_PC_UPDATED
;
494 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
495 int32_t disp
, int mask
)
500 cmp_tmp
= tcg_temp_new();
501 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
503 cmp_tmp
= load_gpr(ctx
, ra
);
506 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
509 /* Fold -0.0 for comparison with COND. */
511 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
513 uint64_t mzero
= 1ull << 63;
518 /* For <= or >, the -0.0 value directly compares the way we want. */
519 tcg_gen_mov_i64(dest
, src
);
524 /* For == or !=, we can simply mask off the sign bit and compare. */
525 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
530 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
531 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
532 tcg_gen_neg_i64(dest
, dest
);
533 tcg_gen_and_i64(dest
, dest
, src
);
541 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
544 TCGv cmp_tmp
= tcg_temp_new();
545 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
546 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
549 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
554 vb
= load_fpr(ctx
, rb
);
556 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
558 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
563 #define QUAL_RM_N 0x080 /* Round mode nearest even */
564 #define QUAL_RM_C 0x000 /* Round mode chopped */
565 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
566 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
567 #define QUAL_RM_MASK 0x0c0
569 #define QUAL_U 0x100 /* Underflow enable (fp output) */
570 #define QUAL_V 0x100 /* Overflow enable (int output) */
571 #define QUAL_S 0x400 /* Software completion enable */
572 #define QUAL_I 0x200 /* Inexact detection enable */
574 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
578 fn11
&= QUAL_RM_MASK
;
579 if (fn11
== ctx
->tb_rm
) {
584 tmp
= tcg_temp_new_i32();
587 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
590 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
593 tcg_gen_movi_i32(tmp
, float_round_down
);
596 tcg_gen_ld8u_i32(tmp
, cpu_env
,
597 offsetof(CPUAlphaState
, fpcr_dyn_round
));
601 #if defined(CONFIG_SOFTFLOAT_INLINE)
602 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
603 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
604 sets the one field. */
605 tcg_gen_st8_i32(tmp
, cpu_env
,
606 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
608 gen_helper_setroundmode(tmp
);
611 tcg_temp_free_i32(tmp
);
614 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
619 if (fn11
== ctx
->tb_ftz
) {
624 tmp
= tcg_temp_new_i32();
626 /* Underflow is enabled, use the FPCR setting. */
627 tcg_gen_ld8u_i32(tmp
, cpu_env
,
628 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
630 /* Underflow is disabled, force flush-to-zero. */
631 tcg_gen_movi_i32(tmp
, 1);
634 #if defined(CONFIG_SOFTFLOAT_INLINE)
635 tcg_gen_st8_i32(tmp
, cpu_env
,
636 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
638 gen_helper_setflushzero(tmp
);
641 tcg_temp_free_i32(tmp
);
644 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
648 if (unlikely(reg
== 31)) {
649 val
= load_zero(ctx
);
652 if ((fn11
& QUAL_S
) == 0) {
654 gen_helper_ieee_input_cmp(cpu_env
, val
);
656 gen_helper_ieee_input(cpu_env
, val
);
663 static void gen_fp_exc_clear(void)
665 #if defined(CONFIG_SOFTFLOAT_INLINE)
666 TCGv_i32 zero
= tcg_const_i32(0);
667 tcg_gen_st8_i32(zero
, cpu_env
,
668 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
669 tcg_temp_free_i32(zero
);
671 gen_helper_fp_exc_clear(cpu_env
);
675 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
677 /* ??? We ought to be able to do something with imprecise exceptions.
678 E.g. notice we're still in the trap shadow of something within the
679 TB and do not generate the code to signal the exception; end the TB
680 when an exception is forced to arrive, either by consumption of a
681 register value or TRAPB or EXCB. */
682 TCGv_i32 exc
= tcg_temp_new_i32();
685 #if defined(CONFIG_SOFTFLOAT_INLINE)
686 tcg_gen_ld8u_i32(exc
, cpu_env
,
687 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
689 gen_helper_fp_exc_get(exc
, cpu_env
);
693 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
696 /* ??? Pass in the regno of the destination so that the helper can
697 set EXC_MASK, which contains a bitmask of destination registers
698 that have caused arithmetic traps. A simple userspace emulation
699 does not require this. We do need it for a guest kernel's entArith,
700 or if we were to do something clever with imprecise exceptions. */
701 reg
= tcg_const_i32(rc
+ 32);
704 gen_helper_fp_exc_raise_s(cpu_env
, exc
, reg
);
706 gen_helper_fp_exc_raise(cpu_env
, exc
, reg
);
709 tcg_temp_free_i32(reg
);
710 tcg_temp_free_i32(exc
);
713 static inline void gen_fp_exc_raise(int rc
, int fn11
)
715 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
718 static void gen_fcvtlq(TCGv vc
, TCGv vb
)
720 TCGv tmp
= tcg_temp_new();
722 /* The arithmetic right shift here, plus the sign-extended mask below
723 yields a sign-extended result without an explicit ext32s_i64. */
724 tcg_gen_sari_i64(tmp
, vb
, 32);
725 tcg_gen_shri_i64(vc
, vb
, 29);
726 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
727 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
728 tcg_gen_or_i64(vc
, vc
, tmp
);
733 static void gen_fcvtql(TCGv vc
, TCGv vb
)
735 TCGv tmp
= tcg_temp_new();
737 tcg_gen_andi_i64(tmp
, vb
, (int32_t)0xc0000000);
738 tcg_gen_andi_i64(vc
, vb
, 0x3FFFFFFF);
739 tcg_gen_shli_i64(tmp
, tmp
, 32);
740 tcg_gen_shli_i64(vc
, vc
, 29);
741 tcg_gen_or_i64(vc
, vc
, tmp
);
746 static void gen_ieee_arith2(DisasContext
*ctx
,
747 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
748 int rb
, int rc
, int fn11
)
752 gen_qual_roundmode(ctx
, fn11
);
753 gen_qual_flushzero(ctx
, fn11
);
756 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
757 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
759 gen_fp_exc_raise(rc
, fn11
);
762 #define IEEE_ARITH2(name) \
763 static inline void glue(gen_f, name)(DisasContext *ctx, \
764 int rb, int rc, int fn11) \
766 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
773 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
778 /* No need to set flushzero, since we have an integer output. */
780 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
781 vc
= dest_fpr(ctx
, rc
);
783 /* Almost all integer conversions use cropped rounding, and most
784 also do not have integer overflow enabled. Special case that. */
787 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
789 case QUAL_V
| QUAL_RM_C
:
790 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
791 ignore
= float_flag_inexact
;
793 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
794 gen_helper_cvttq_svic(vc
, cpu_env
, vb
);
797 gen_qual_roundmode(ctx
, fn11
);
798 gen_helper_cvttq(vc
, cpu_env
, vb
);
799 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
800 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
804 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
807 static void gen_ieee_intcvt(DisasContext
*ctx
,
808 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
809 int rb
, int rc
, int fn11
)
813 gen_qual_roundmode(ctx
, fn11
);
814 vb
= load_fpr(ctx
, rb
);
815 vc
= dest_fpr(ctx
, rc
);
817 /* The only exception that can be raised by integer conversion
818 is inexact. Thus we only need to worry about exceptions when
819 inexact handling is requested. */
822 helper(vc
, cpu_env
, vb
);
823 gen_fp_exc_raise(rc
, fn11
);
825 helper(vc
, cpu_env
, vb
);
829 #define IEEE_INTCVT(name) \
830 static inline void glue(gen_f, name)(DisasContext *ctx, \
831 int rb, int rc, int fn11) \
833 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
838 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
840 TCGv vmask
= tcg_const_i64(mask
);
841 TCGv tmp
= tcg_temp_new_i64();
844 tcg_gen_andc_i64(tmp
, vmask
, va
);
846 tcg_gen_and_i64(tmp
, va
, vmask
);
849 tcg_gen_andc_i64(vc
, vb
, vmask
);
850 tcg_gen_or_i64(vc
, vc
, tmp
);
852 tcg_temp_free(vmask
);
856 static void gen_ieee_arith3(DisasContext
*ctx
,
857 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
858 int ra
, int rb
, int rc
, int fn11
)
862 gen_qual_roundmode(ctx
, fn11
);
863 gen_qual_flushzero(ctx
, fn11
);
866 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
867 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
868 vc
= dest_fpr(ctx
, rc
);
869 helper(vc
, cpu_env
, va
, vb
);
871 gen_fp_exc_raise(rc
, fn11
);
874 #define IEEE_ARITH3(name) \
875 static inline void glue(gen_f, name)(DisasContext *ctx, \
876 int ra, int rb, int rc, int fn11) \
878 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
889 static void gen_ieee_compare(DisasContext
*ctx
,
890 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
891 int ra
, int rb
, int rc
, int fn11
)
897 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
898 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
899 vc
= dest_fpr(ctx
, rc
);
900 helper(vc
, cpu_env
, va
, vb
);
902 gen_fp_exc_raise(rc
, fn11
);
905 #define IEEE_CMP3(name) \
906 static inline void glue(gen_f, name)(DisasContext *ctx, \
907 int ra, int rb, int rc, int fn11) \
909 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
916 static inline uint64_t zapnot_mask(uint8_t lit
)
921 for (i
= 0; i
< 8; ++i
) {
922 if ((lit
>> i
) & 1) {
923 mask
|= 0xffull
<< (i
* 8);
929 /* Implement zapnot with an immediate operand, which expands to some
930 form of immediate AND. This is a basic building block in the
931 definition of many of the other byte manipulation instructions. */
932 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
936 tcg_gen_movi_i64(dest
, 0);
939 tcg_gen_ext8u_i64(dest
, src
);
942 tcg_gen_ext16u_i64(dest
, src
);
945 tcg_gen_ext32u_i64(dest
, src
);
948 tcg_gen_mov_i64(dest
, src
);
951 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
956 /* EXTWH, EXTLH, EXTQH */
957 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
958 uint8_t lit
, uint8_t byte_mask
)
961 tcg_gen_shli_i64(vc
, va
, (64 - lit
* 8) & 0x3f);
963 TCGv tmp
= tcg_temp_new();
964 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
965 tcg_gen_neg_i64(tmp
, tmp
);
966 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
967 tcg_gen_shl_i64(vc
, va
, tmp
);
970 gen_zapnoti(vc
, vc
, byte_mask
);
973 /* EXTBL, EXTWL, EXTLL, EXTQL */
974 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
975 uint8_t lit
, uint8_t byte_mask
)
978 tcg_gen_shri_i64(vc
, va
, (lit
& 7) * 8);
980 TCGv tmp
= tcg_temp_new();
981 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
982 tcg_gen_shli_i64(tmp
, tmp
, 3);
983 tcg_gen_shr_i64(vc
, va
, tmp
);
986 gen_zapnoti(vc
, vc
, byte_mask
);
989 /* INSWH, INSLH, INSQH */
990 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
991 uint8_t lit
, uint8_t byte_mask
)
993 TCGv tmp
= tcg_temp_new();
995 /* The instruction description has us left-shift the byte mask and extract
996 bits <15:8> and apply that zap at the end. This is equivalent to simply
997 performing the zap first and shifting afterward. */
998 gen_zapnoti(tmp
, va
, byte_mask
);
1002 if (unlikely(lit
== 0)) {
1003 tcg_gen_movi_i64(vc
, 0);
1005 tcg_gen_shri_i64(vc
, tmp
, 64 - lit
* 8);
1008 TCGv shift
= tcg_temp_new();
1010 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1011 portably by splitting the shift into two parts: shift_count-1 and 1.
1012 Arrange for the -1 by using ones-complement instead of
1013 twos-complement in the negation: ~(B * 8) & 63. */
1015 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1016 tcg_gen_not_i64(shift
, shift
);
1017 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1019 tcg_gen_shr_i64(vc
, tmp
, shift
);
1020 tcg_gen_shri_i64(vc
, vc
, 1);
1021 tcg_temp_free(shift
);
1026 /* INSBL, INSWL, INSLL, INSQL */
1027 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1028 uint8_t lit
, uint8_t byte_mask
)
1030 TCGv tmp
= tcg_temp_new();
1032 /* The instruction description has us left-shift the byte mask
1033 the same number of byte slots as the data and apply the zap
1034 at the end. This is equivalent to simply performing the zap
1035 first and shifting afterward. */
1036 gen_zapnoti(tmp
, va
, byte_mask
);
1039 tcg_gen_shli_i64(vc
, tmp
, (lit
& 7) * 8);
1041 TCGv shift
= tcg_temp_new();
1042 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1043 tcg_gen_shli_i64(shift
, shift
, 3);
1044 tcg_gen_shl_i64(vc
, tmp
, shift
);
1045 tcg_temp_free(shift
);
1050 /* MSKWH, MSKLH, MSKQH */
1051 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1052 uint8_t lit
, uint8_t byte_mask
)
1055 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1057 TCGv shift
= tcg_temp_new();
1058 TCGv mask
= tcg_temp_new();
1060 /* The instruction description is as above, where the byte_mask
1061 is shifted left, and then we extract bits <15:8>. This can be
1062 emulated with a right-shift on the expanded byte mask. This
1063 requires extra care because for an input <2:0> == 0 we need a
1064 shift of 64 bits in order to generate a zero. This is done by
1065 splitting the shift into two parts, the variable shift - 1
1066 followed by a constant 1 shift. The code we expand below is
1067 equivalent to ~(B * 8) & 63. */
1069 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1070 tcg_gen_not_i64(shift
, shift
);
1071 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1072 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1073 tcg_gen_shr_i64(mask
, mask
, shift
);
1074 tcg_gen_shri_i64(mask
, mask
, 1);
1076 tcg_gen_andc_i64(vc
, va
, mask
);
1078 tcg_temp_free(mask
);
1079 tcg_temp_free(shift
);
1083 /* MSKBL, MSKWL, MSKLL, MSKQL */
1084 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1085 uint8_t lit
, uint8_t byte_mask
)
1088 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1090 TCGv shift
= tcg_temp_new();
1091 TCGv mask
= tcg_temp_new();
1093 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1094 tcg_gen_shli_i64(shift
, shift
, 3);
1095 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1096 tcg_gen_shl_i64(mask
, mask
, shift
);
1098 tcg_gen_andc_i64(vc
, va
, mask
);
1100 tcg_temp_free(mask
);
1101 tcg_temp_free(shift
);
1105 static void gen_rx(int ra
, int set
)
1110 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1113 tmp
= tcg_const_i32(set
);
1114 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1115 tcg_temp_free_i32(tmp
);
1118 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1120 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1121 to internal cpu registers. */
1123 /* Unprivileged PAL call */
1124 if (palcode
>= 0x80 && palcode
< 0xC0) {
1128 /* No-op inside QEMU. */
1132 tcg_gen_ld_i64(cpu_ir
[IR_V0
], cpu_env
,
1133 offsetof(CPUAlphaState
, unique
));
1137 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1138 offsetof(CPUAlphaState
, unique
));
1147 #ifndef CONFIG_USER_ONLY
1148 /* Privileged PAL code */
1149 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1153 /* No-op inside QEMU. */
1157 /* No-op inside QEMU. */
1161 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1162 offsetof(CPUAlphaState
, vptptr
));
1166 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1167 offsetof(CPUAlphaState
, sysval
));
1171 tcg_gen_ld_i64(cpu_ir
[IR_V0
], cpu_env
,
1172 offsetof(CPUAlphaState
, sysval
));
1179 /* Note that we already know we're in kernel mode, so we know
1180 that PS only contains the 3 IPL bits. */
1181 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
,
1182 offsetof(CPUAlphaState
, ps
));
1184 /* But make sure and store only the 3 IPL bits from the user. */
1185 tmp
= tcg_temp_new();
1186 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1187 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1194 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
,
1195 offsetof(CPUAlphaState
, ps
));
1199 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1200 offsetof(CPUAlphaState
, usp
));
1204 tcg_gen_ld_i64(cpu_ir
[IR_V0
], cpu_env
,
1205 offsetof(CPUAlphaState
, usp
));
1209 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1210 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1220 return gen_invalid(ctx
);
1223 #ifdef CONFIG_USER_ONLY
1224 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1227 TCGv pc
= tcg_const_i64(ctx
->pc
);
1228 TCGv entry
= tcg_const_i64(palcode
& 0x80
1229 ? 0x2000 + (palcode
- 0x80) * 64
1230 : 0x1000 + palcode
* 64);
1232 gen_helper_call_pal(cpu_env
, pc
, entry
);
1234 tcg_temp_free(entry
);
1237 /* Since the destination is running in PALmode, we don't really
1238 need the page permissions check. We'll see the existence of
1239 the page when we create the TB, and we'll flush all TBs if
1240 we change the PAL base register. */
1241 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1243 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1244 return EXIT_GOTO_TB
;
1247 return EXIT_PC_UPDATED
;
1252 #ifndef CONFIG_USER_ONLY
1254 #define PR_BYTE 0x100000
1255 #define PR_LONG 0x200000
1257 static int cpu_pr_data(int pr
)
1260 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1261 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1262 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1263 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1264 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1265 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1266 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1267 case 7: return offsetof(CPUAlphaState
, palbr
);
1268 case 8: return offsetof(CPUAlphaState
, ptbr
);
1269 case 9: return offsetof(CPUAlphaState
, vptptr
);
1270 case 10: return offsetof(CPUAlphaState
, unique
);
1271 case 11: return offsetof(CPUAlphaState
, sysval
);
1272 case 12: return offsetof(CPUAlphaState
, usp
);
1275 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1277 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1280 return offsetof(CPUAlphaState
, alarm_expire
);
1285 static ExitStatus
gen_mfpr(TCGv va
, int regno
)
1287 int data
= cpu_pr_data(regno
);
1289 /* Special help for VMTIME and WALLTIME. */
1290 if (regno
== 250 || regno
== 249) {
1291 void (*helper
)(TCGv
) = gen_helper_get_walltime
;
1293 helper
= gen_helper_get_vmtime
;
1299 return EXIT_PC_STALE
;
1306 /* The basic registers are data only, and unknown registers
1307 are read-zero, write-ignore. */
1309 tcg_gen_movi_i64(va
, 0);
1310 } else if (data
& PR_BYTE
) {
1311 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1312 } else if (data
& PR_LONG
) {
1313 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1315 tcg_gen_ld_i64(va
, cpu_env
, data
);
1320 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1328 gen_helper_tbia(cpu_env
);
1333 gen_helper_tbis(cpu_env
, vb
);
1338 tmp
= tcg_const_i64(1);
1339 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1340 offsetof(CPUState
, halted
));
1341 return gen_excp(ctx
, EXCP_HLT
, 0);
1345 gen_helper_halt(vb
);
1346 return EXIT_PC_STALE
;
1350 gen_helper_set_alarm(cpu_env
, vb
);
1355 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1356 /* Changing the PAL base register implies un-chaining all of the TBs
1357 that ended with a CALL_PAL. Since the base register usually only
1358 changes during boot, flushing everything works well. */
1359 gen_helper_tb_flush(cpu_env
);
1360 return EXIT_PC_STALE
;
1363 /* The basic registers are data only, and unknown registers
1364 are read-zero, write-ignore. */
1365 data
= cpu_pr_data(regno
);
1367 if (data
& PR_BYTE
) {
1368 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1369 } else if (data
& PR_LONG
) {
1370 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1372 tcg_gen_st_i64(vb
, cpu_env
, data
);
1380 #endif /* !USER_ONLY*/
1382 #define REQUIRE_TB_FLAG(FLAG) \
1384 if ((ctx->tb->flags & (FLAG)) == 0) { \
1389 #define REQUIRE_REG_31(WHICH) \
1391 if (WHICH != 31) { \
1396 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1398 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1400 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1402 TCGv va
, vb
, vc
, tmp
;
1406 /* Decode all instruction fields */
1407 opc
= extract32(insn
, 26, 6);
1408 ra
= extract32(insn
, 21, 5);
1409 rb
= extract32(insn
, 16, 5);
1410 rc
= extract32(insn
, 0, 5);
1411 islit
= extract32(insn
, 12, 1);
1412 lit
= extract32(insn
, 13, 8);
1414 disp21
= sextract32(insn
, 0, 21);
1415 disp16
= sextract32(insn
, 0, 16);
1416 disp12
= sextract32(insn
, 0, 12);
1418 fn11
= extract32(insn
, 5, 11);
1419 fpfn
= extract32(insn
, 5, 6);
1420 fn7
= extract32(insn
, 5, 7);
1422 if (rb
== 31 && !islit
) {
1431 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1457 disp16
= (uint32_t)disp16
<< 16;
1461 va
= dest_gpr(ctx
, ra
);
1462 /* It's worth special-casing immediate loads. */
1464 tcg_gen_movi_i64(va
, disp16
);
1466 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1472 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1473 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1477 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1481 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1482 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1486 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1487 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1491 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1492 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1496 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1500 vc
= dest_gpr(ctx
, rc
);
1501 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1505 /* Special case ADDL as SEXTL. */
1506 tcg_gen_ext32s_i64(vc
, vb
);
1510 /* Special case SUBQ as NEGQ. */
1511 tcg_gen_neg_i64(vc
, vb
);
1516 va
= load_gpr(ctx
, ra
);
1520 tcg_gen_add_i64(vc
, va
, vb
);
1521 tcg_gen_ext32s_i64(vc
, vc
);
1525 tmp
= tcg_temp_new();
1526 tcg_gen_shli_i64(tmp
, va
, 2);
1527 tcg_gen_add_i64(tmp
, tmp
, vb
);
1528 tcg_gen_ext32s_i64(vc
, tmp
);
1533 tcg_gen_sub_i64(vc
, va
, vb
);
1534 tcg_gen_ext32s_i64(vc
, vc
);
1538 tmp
= tcg_temp_new();
1539 tcg_gen_shli_i64(tmp
, va
, 2);
1540 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1541 tcg_gen_ext32s_i64(vc
, tmp
);
1546 gen_helper_cmpbge(vc
, va
, vb
);
1550 tmp
= tcg_temp_new();
1551 tcg_gen_shli_i64(tmp
, va
, 3);
1552 tcg_gen_add_i64(tmp
, tmp
, vb
);
1553 tcg_gen_ext32s_i64(vc
, tmp
);
1558 tmp
= tcg_temp_new();
1559 tcg_gen_shli_i64(tmp
, va
, 3);
1560 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1561 tcg_gen_ext32s_i64(vc
, tmp
);
1566 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1570 tcg_gen_add_i64(vc
, va
, vb
);
1574 tmp
= tcg_temp_new();
1575 tcg_gen_shli_i64(tmp
, va
, 2);
1576 tcg_gen_add_i64(vc
, tmp
, vb
);
1581 tcg_gen_sub_i64(vc
, va
, vb
);
1585 tmp
= tcg_temp_new();
1586 tcg_gen_shli_i64(tmp
, va
, 2);
1587 tcg_gen_sub_i64(vc
, tmp
, vb
);
1592 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1596 tmp
= tcg_temp_new();
1597 tcg_gen_shli_i64(tmp
, va
, 3);
1598 tcg_gen_add_i64(vc
, tmp
, vb
);
1603 tmp
= tcg_temp_new();
1604 tcg_gen_shli_i64(tmp
, va
, 3);
1605 tcg_gen_sub_i64(vc
, tmp
, vb
);
1610 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1614 gen_helper_addlv(vc
, cpu_env
, va
, vb
);
1618 gen_helper_sublv(vc
, cpu_env
, va
, vb
);
1622 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1626 gen_helper_addqv(vc
, cpu_env
, va
, vb
);
1630 gen_helper_subqv(vc
, cpu_env
, va
, vb
);
1634 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1644 /* Special case BIS as NOP. */
1648 /* Special case BIS as MOV. */
1649 vc
= dest_gpr(ctx
, rc
);
1651 tcg_gen_movi_i64(vc
, lit
);
1653 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1659 vc
= dest_gpr(ctx
, rc
);
1660 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1662 if (fn7
== 0x28 && ra
== 31) {
1663 /* Special case ORNOT as NOT. */
1664 tcg_gen_not_i64(vc
, vb
);
1668 va
= load_gpr(ctx
, ra
);
1672 tcg_gen_and_i64(vc
, va
, vb
);
1676 tcg_gen_andc_i64(vc
, va
, vb
);
1680 tmp
= tcg_temp_new();
1681 tcg_gen_andi_i64(tmp
, va
, 1);
1682 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1683 vb
, load_gpr(ctx
, rc
));
1688 tmp
= tcg_temp_new();
1689 tcg_gen_andi_i64(tmp
, va
, 1);
1690 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1691 vb
, load_gpr(ctx
, rc
));
1696 tcg_gen_or_i64(vc
, va
, vb
);
1700 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1701 vb
, load_gpr(ctx
, rc
));
1705 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1706 vb
, load_gpr(ctx
, rc
));
1710 tcg_gen_orc_i64(vc
, va
, vb
);
1714 tcg_gen_xor_i64(vc
, va
, vb
);
1718 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1719 vb
, load_gpr(ctx
, rc
));
1723 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1724 vb
, load_gpr(ctx
, rc
));
1728 tcg_gen_eqv_i64(vc
, va
, vb
);
1734 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1735 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1740 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1741 vb
, load_gpr(ctx
, rc
));
1745 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1746 vb
, load_gpr(ctx
, rc
));
1751 tcg_gen_movi_i64(vc
, ctx
->implver
);
1759 vc
= dest_gpr(ctx
, rc
);
1760 va
= load_gpr(ctx
, ra
);
1764 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1768 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1772 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1776 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1780 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1784 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1788 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1792 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1796 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1801 gen_zapnoti(vc
, va
, ~lit
);
1803 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1809 gen_zapnoti(vc
, va
, lit
);
1811 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1816 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1821 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1823 tmp
= tcg_temp_new();
1824 vb
= load_gpr(ctx
, rb
);
1825 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1826 tcg_gen_shr_i64(vc
, va
, tmp
);
1832 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1837 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1839 tmp
= tcg_temp_new();
1840 vb
= load_gpr(ctx
, rb
);
1841 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1842 tcg_gen_shl_i64(vc
, va
, tmp
);
1848 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1853 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1855 tmp
= tcg_temp_new();
1856 vb
= load_gpr(ctx
, rb
);
1857 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1858 tcg_gen_sar_i64(vc
, va
, tmp
);
1864 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1868 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1872 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1876 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1880 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1884 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1888 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1892 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1896 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1904 vc
= dest_gpr(ctx
, rc
);
1905 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1906 va
= load_gpr(ctx
, ra
);
1910 tcg_gen_mul_i64(vc
, va
, vb
);
1911 tcg_gen_ext32s_i64(vc
, vc
);
1915 tcg_gen_mul_i64(vc
, va
, vb
);
1919 tmp
= tcg_temp_new();
1920 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1925 gen_helper_mullv(vc
, cpu_env
, va
, vb
);
1929 gen_helper_mulqv(vc
, cpu_env
, va
, vb
);
1937 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
1938 vc
= dest_fpr(ctx
, rc
);
1939 switch (fpfn
) { /* fn11 & 0x3F */
1943 t32
= tcg_temp_new_i32();
1944 va
= load_gpr(ctx
, ra
);
1945 tcg_gen_trunc_i64_i32(t32
, va
);
1946 gen_helper_memory_to_s(vc
, t32
);
1947 tcg_temp_free_i32(t32
);
1952 vb
= load_fpr(ctx
, rb
);
1953 gen_helper_sqrtf(vc
, cpu_env
, vb
);
1958 gen_fsqrts(ctx
, rb
, rc
, fn11
);
1963 t32
= tcg_temp_new_i32();
1964 va
= load_gpr(ctx
, ra
);
1965 tcg_gen_trunc_i64_i32(t32
, va
);
1966 gen_helper_memory_to_f(vc
, t32
);
1967 tcg_temp_free_i32(t32
);
1972 va
= load_gpr(ctx
, ra
);
1973 tcg_gen_mov_i64(vc
, va
);
1978 vb
= load_fpr(ctx
, rb
);
1979 gen_helper_sqrtg(vc
, cpu_env
, vb
);
1984 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
1992 /* VAX floating point */
1993 /* XXX: rounding mode and trap are ignored (!) */
1994 vc
= dest_fpr(ctx
, rc
);
1995 vb
= load_fpr(ctx
, rb
);
1996 va
= load_fpr(ctx
, ra
);
1997 switch (fpfn
) { /* fn11 & 0x3F */
2000 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2004 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2008 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2012 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2020 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2024 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2028 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2032 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2036 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2040 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2044 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2049 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2058 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2063 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2068 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2076 /* IEEE floating-point */
2077 switch (fpfn
) { /* fn11 & 0x3F */
2080 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2084 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2088 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2092 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2096 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2100 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2104 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2108 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2112 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2116 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2120 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2124 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2128 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2130 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2133 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2139 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2144 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2149 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2161 vc
= dest_fpr(ctx
, rc
);
2162 vb
= load_fpr(ctx
, rb
);
2168 /* Special case CPYS as FNOP. */
2170 vc
= dest_fpr(ctx
, rc
);
2171 va
= load_fpr(ctx
, ra
);
2173 /* Special case CPYS as FMOV. */
2174 tcg_gen_mov_i64(vc
, va
);
2176 vb
= load_fpr(ctx
, rb
);
2177 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2183 vc
= dest_fpr(ctx
, rc
);
2184 vb
= load_fpr(ctx
, rb
);
2185 va
= load_fpr(ctx
, ra
);
2186 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2190 vc
= dest_fpr(ctx
, rc
);
2191 vb
= load_fpr(ctx
, rb
);
2192 va
= load_fpr(ctx
, ra
);
2193 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2197 va
= load_fpr(ctx
, ra
);
2198 gen_helper_store_fpcr(cpu_env
, va
);
2202 va
= dest_fpr(ctx
, ra
);
2203 gen_helper_load_fpcr(va
, cpu_env
);
2207 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2211 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2215 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2219 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2223 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2227 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2232 vc
= dest_fpr(ctx
, rc
);
2233 vb
= load_fpr(ctx
, rb
);
2241 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2242 /v doesn't do. The only thing I can think is that /sv is a
2243 valid instruction merely for completeness in the ISA. */
2244 vc
= dest_fpr(ctx
, rc
);
2245 vb
= load_fpr(ctx
, rb
);
2246 gen_helper_fcvtql_v_input(cpu_env
, vb
);
2255 switch ((uint16_t)disp16
) {
2282 va
= dest_gpr(ctx
, ra
);
2285 gen_helper_load_pcc(va
, cpu_env
);
2287 ret
= EXIT_PC_STALE
;
2289 gen_helper_load_pcc(va
, cpu_env
);
2313 /* HW_MFPR (PALcode) */
2314 #ifndef CONFIG_USER_ONLY
2315 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2316 va
= dest_gpr(ctx
, ra
);
2317 ret
= gen_mfpr(va
, insn
& 0xffff);
2324 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2325 prediction stack action, which of course we don't implement. */
2326 vb
= load_gpr(ctx
, rb
);
2327 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2329 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2331 ret
= EXIT_PC_UPDATED
;
2335 /* HW_LD (PALcode) */
2336 #ifndef CONFIG_USER_ONLY
2337 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2339 TCGv addr
= tcg_temp_new();
2340 vb
= load_gpr(ctx
, rb
);
2341 va
= dest_gpr(ctx
, ra
);
2343 tcg_gen_addi_i64(addr
, vb
, disp12
);
2344 switch ((insn
>> 12) & 0xF) {
2346 /* Longword physical access (hw_ldl/p) */
2347 gen_helper_ldl_phys(va
, cpu_env
, addr
);
2350 /* Quadword physical access (hw_ldq/p) */
2351 gen_helper_ldq_phys(va
, cpu_env
, addr
);
2354 /* Longword physical access with lock (hw_ldl_l/p) */
2355 gen_helper_ldl_l_phys(va
, cpu_env
, addr
);
2358 /* Quadword physical access with lock (hw_ldq_l/p) */
2359 gen_helper_ldq_l_phys(va
, cpu_env
, addr
);
2362 /* Longword virtual PTE fetch (hw_ldl/v) */
2365 /* Quadword virtual PTE fetch (hw_ldq/v) */
2369 /* Incpu_ir[ra]id */
2372 /* Incpu_ir[ra]id */
2375 /* Longword virtual access (hw_ldl) */
2378 /* Quadword virtual access (hw_ldq) */
2381 /* Longword virtual access with protection check (hw_ldl/w) */
2382 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2385 /* Quadword virtual access with protection check (hw_ldq/w) */
2386 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2389 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2392 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2395 /* Longword virtual access with alternate access mode and
2396 protection checks (hw_ldl/wa) */
2397 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2400 /* Quadword virtual access with alternate access mode and
2401 protection checks (hw_ldq/wa) */
2402 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2405 tcg_temp_free(addr
);
2413 vc
= dest_gpr(ctx
, rc
);
2416 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2418 va
= load_fpr(ctx
, ra
);
2419 tcg_gen_mov_i64(vc
, va
);
2421 } else if (fn7
== 0x78) {
2423 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2425 t32
= tcg_temp_new_i32();
2426 va
= load_fpr(ctx
, ra
);
2427 gen_helper_s_to_memory(t32
, va
);
2428 tcg_gen_ext_i32_i64(vc
, t32
);
2429 tcg_temp_free_i32(t32
);
2433 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2437 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2439 tcg_gen_ext8s_i64(vc
, vb
);
2443 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2445 tcg_gen_ext16s_i64(vc
, vb
);
2449 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2451 gen_helper_ctpop(vc
, vb
);
2455 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2456 va
= load_gpr(ctx
, ra
);
2457 gen_helper_perr(vc
, va
, vb
);
2461 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2463 gen_helper_ctlz(vc
, vb
);
2467 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2469 gen_helper_cttz(vc
, vb
);
2473 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2475 gen_helper_unpkbw(vc
, vb
);
2479 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2481 gen_helper_unpkbl(vc
, vb
);
2485 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2487 gen_helper_pkwb(vc
, vb
);
2491 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2493 gen_helper_pklb(vc
, vb
);
2497 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2498 va
= load_gpr(ctx
, ra
);
2499 gen_helper_minsb8(vc
, va
, vb
);
2503 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2504 va
= load_gpr(ctx
, ra
);
2505 gen_helper_minsw4(vc
, va
, vb
);
2509 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2510 va
= load_gpr(ctx
, ra
);
2511 gen_helper_minub8(vc
, va
, vb
);
2515 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2516 va
= load_gpr(ctx
, ra
);
2517 gen_helper_minuw4(vc
, va
, vb
);
2521 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2522 va
= load_gpr(ctx
, ra
);
2523 gen_helper_maxub8(vc
, va
, vb
);
2527 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2528 va
= load_gpr(ctx
, ra
);
2529 gen_helper_maxuw4(vc
, va
, vb
);
2533 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2534 va
= load_gpr(ctx
, ra
);
2535 gen_helper_maxsb8(vc
, va
, vb
);
2539 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2540 va
= load_gpr(ctx
, ra
);
2541 gen_helper_maxsw4(vc
, va
, vb
);
2549 /* HW_MTPR (PALcode) */
2550 #ifndef CONFIG_USER_ONLY
2551 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2552 vb
= load_gpr(ctx
, rb
);
2553 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2560 /* HW_RET (PALcode) */
2561 #ifndef CONFIG_USER_ONLY
2562 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2564 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2565 address from EXC_ADDR. This turns out to be useful for our
2566 emulation PALcode, so continue to accept it. */
2567 tmp
= tcg_temp_new();
2568 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2569 gen_helper_hw_ret(cpu_env
, tmp
);
2572 gen_helper_hw_ret(cpu_env
, load_gpr(ctx
, rb
));
2574 ret
= EXIT_PC_UPDATED
;
2581 /* HW_ST (PALcode) */
2582 #ifndef CONFIG_USER_ONLY
2583 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2585 TCGv addr
= tcg_temp_new();
2586 va
= load_gpr(ctx
, ra
);
2587 vb
= load_gpr(ctx
, rb
);
2589 tcg_gen_addi_i64(addr
, vb
, disp12
);
2590 switch ((insn
>> 12) & 0xF) {
2592 /* Longword physical access */
2593 gen_helper_stl_phys(cpu_env
, addr
, va
);
2596 /* Quadword physical access */
2597 gen_helper_stq_phys(cpu_env
, addr
, va
);
2600 /* Longword physical access with lock */
2601 gen_helper_stl_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2604 /* Quadword physical access with lock */
2605 gen_helper_stq_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2608 /* Longword virtual access */
2611 /* Quadword virtual access */
2632 /* Longword virtual access with alternate access mode */
2635 /* Quadword virtual access with alternate access mode */
2644 tcg_temp_free(addr
);
2652 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2656 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2660 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2664 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2668 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2672 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2676 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2680 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2684 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2688 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2692 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2696 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2700 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2704 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2708 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
2712 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
2716 ret
= gen_bdirect(ctx
, ra
, disp21
);
2718 case 0x31: /* FBEQ */
2719 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2721 case 0x32: /* FBLT */
2722 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2724 case 0x33: /* FBLE */
2725 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2729 ret
= gen_bdirect(ctx
, ra
, disp21
);
2731 case 0x35: /* FBNE */
2732 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2734 case 0x36: /* FBGE */
2735 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2737 case 0x37: /* FBGT */
2738 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2742 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2746 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2750 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2754 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2758 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2762 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2766 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2770 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2773 ret
= gen_invalid(ctx
);
2780 static inline void gen_intermediate_code_internal(AlphaCPU
*cpu
,
2781 TranslationBlock
*tb
,
2784 CPUState
*cs
= CPU(cpu
);
2785 CPUAlphaState
*env
= &cpu
->env
;
2786 DisasContext ctx
, *ctxp
= &ctx
;
2787 target_ulong pc_start
;
2788 target_ulong pc_mask
;
2790 uint16_t *gen_opc_end
;
2798 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
2802 ctx
.mem_idx
= cpu_mmu_index(env
);
2803 ctx
.implver
= env
->implver
;
2804 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2806 /* ??? Every TB begins with unset rounding mode, to be initialized on
2807 the first fp insn of the TB. Alternately we could define a proper
2808 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2809 to reset the FP_STATUS to that default at the end of any TB that
2810 changes the default. We could even (gasp) dynamiclly figure out
2811 what default would be most efficient given the running program. */
2813 /* Similarly for flush-to-zero. */
2817 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2818 if (max_insns
== 0) {
2819 max_insns
= CF_COUNT_MASK
;
2822 if (in_superpage(&ctx
, pc_start
)) {
2823 pc_mask
= (1ULL << 41) - 1;
2825 pc_mask
= ~TARGET_PAGE_MASK
;
2830 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
2831 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
2832 if (bp
->pc
== ctx
.pc
) {
2833 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2839 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
2843 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2845 tcg_ctx
.gen_opc_pc
[lj
] = ctx
.pc
;
2846 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
2847 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
2849 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2852 insn
= cpu_ldl_code(env
, ctx
.pc
);
2855 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2856 tcg_gen_debug_insn_start(ctx
.pc
);
2859 TCGV_UNUSED_I64(ctx
.zero
);
2860 TCGV_UNUSED_I64(ctx
.sink
);
2861 TCGV_UNUSED_I64(ctx
.lit
);
2864 ret
= translate_one(ctxp
, insn
);
2866 if (!TCGV_IS_UNUSED_I64(ctx
.sink
)) {
2867 tcg_gen_discard_i64(ctx
.sink
);
2868 tcg_temp_free(ctx
.sink
);
2870 if (!TCGV_IS_UNUSED_I64(ctx
.zero
)) {
2871 tcg_temp_free(ctx
.zero
);
2873 if (!TCGV_IS_UNUSED_I64(ctx
.lit
)) {
2874 tcg_temp_free(ctx
.lit
);
2877 /* If we reach a page boundary, are single stepping,
2878 or exhaust instruction count, stop generation. */
2880 && ((ctx
.pc
& pc_mask
) == 0
2881 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
2882 || num_insns
>= max_insns
2884 || ctx
.singlestep_enabled
)) {
2885 ret
= EXIT_PC_STALE
;
2887 } while (ret
== NO_EXIT
);
2889 if (tb
->cflags
& CF_LAST_IO
) {
2898 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2900 case EXIT_PC_UPDATED
:
2901 if (ctx
.singlestep_enabled
) {
2902 gen_excp_1(EXCP_DEBUG
, 0);
2911 gen_tb_end(tb
, num_insns
);
2912 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
2914 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
2917 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2919 tb
->size
= ctx
.pc
- pc_start
;
2920 tb
->icount
= num_insns
;
2924 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2925 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2926 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 1);
2932 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2934 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, false);
2937 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2939 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, true);
2942 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
2944 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];