2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DISAS(...) do { } while (0)
45 typedef struct DisasContext DisasContext
;
47 struct TranslationBlock
*tb
;
49 #ifndef CONFIG_USER_ONLY
55 /* implver and amask values for this CPU. */
59 /* Current rounding mode for this TB. */
61 /* Current flush-to-zero setting for this TB. */
64 /* The set of registers active in the current context. */
67 /* Temporaries for $31 and $f31 as source and destination. */
70 /* Temporary for immediate constants. */
73 bool singlestep_enabled
;
76 /* Return values from translate_one, indicating the state of the TB.
77 Note that zero indicates that we are not exiting the TB. */
82 /* We have emitted one or more goto_tb. No fixup required. */
85 /* We are not using a goto_tb (for whatever reason), but have updated
86 the PC (for whatever reason), so there's no need to do it again on
89 EXIT_PC_UPDATED_NOCHAIN
,
91 /* We are exiting the TB, but have neither emitted a goto_tb, nor
92 updated the PC for the next instruction to be executed. */
95 /* We are exiting the TB due to page crossing or space constraints. */
98 /* We are ending the TB with a noreturn function call, e.g. longjmp.
99 No following code will be executed. */
103 /* global register indexes */
104 static TCGv_env cpu_env
;
105 static TCGv cpu_std_ir
[31];
106 static TCGv cpu_fir
[31];
108 static TCGv cpu_lock_addr
;
109 static TCGv cpu_lock_value
;
111 #ifndef CONFIG_USER_ONLY
112 static TCGv cpu_pal_ir
[31];
115 #include "exec/gen-icount.h"
117 void alpha_translate_init(void)
119 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
121 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
122 static const GlobalVar vars
[] = {
130 /* Use the symbolic register names that match the disassembler. */
131 static const char greg_names
[31][4] = {
132 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
133 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
134 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
135 "t10", "t11", "ra", "t12", "at", "gp", "sp"
137 static const char freg_names
[31][4] = {
138 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
139 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
140 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
141 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
143 #ifndef CONFIG_USER_ONLY
144 static const char shadow_names
[8][8] = {
145 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
146 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
150 static bool done_init
= 0;
158 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
159 tcg_ctx
.tcg_env
= cpu_env
;
161 for (i
= 0; i
< 31; i
++) {
162 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
163 offsetof(CPUAlphaState
, ir
[i
]),
167 for (i
= 0; i
< 31; i
++) {
168 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
169 offsetof(CPUAlphaState
, fir
[i
]),
173 #ifndef CONFIG_USER_ONLY
174 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
175 for (i
= 0; i
< 8; i
++) {
176 int r
= (i
== 7 ? 25 : i
+ 8);
177 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
178 offsetof(CPUAlphaState
,
184 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
185 const GlobalVar
*v
= &vars
[i
];
186 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
190 static TCGv
load_zero(DisasContext
*ctx
)
192 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
193 ctx
->zero
= tcg_const_i64(0);
198 static TCGv
dest_sink(DisasContext
*ctx
)
200 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
201 ctx
->sink
= tcg_temp_new();
206 static void free_context_temps(DisasContext
*ctx
)
208 if (!TCGV_IS_UNUSED_I64(ctx
->sink
)) {
209 tcg_gen_discard_i64(ctx
->sink
);
210 tcg_temp_free(ctx
->sink
);
211 TCGV_UNUSED_I64(ctx
->sink
);
213 if (!TCGV_IS_UNUSED_I64(ctx
->zero
)) {
214 tcg_temp_free(ctx
->zero
);
215 TCGV_UNUSED_I64(ctx
->zero
);
217 if (!TCGV_IS_UNUSED_I64(ctx
->lit
)) {
218 tcg_temp_free(ctx
->lit
);
219 TCGV_UNUSED_I64(ctx
->lit
);
223 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
225 if (likely(reg
< 31)) {
228 return load_zero(ctx
);
232 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
233 uint8_t lit
, bool islit
)
236 ctx
->lit
= tcg_const_i64(lit
);
238 } else if (likely(reg
< 31)) {
241 return load_zero(ctx
);
245 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
247 if (likely(reg
< 31)) {
250 return dest_sink(ctx
);
254 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
256 if (likely(reg
< 31)) {
259 return load_zero(ctx
);
263 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
265 if (likely(reg
< 31)) {
268 return dest_sink(ctx
);
272 static int get_flag_ofs(unsigned shift
)
274 int ofs
= offsetof(CPUAlphaState
, flags
);
275 #ifdef HOST_WORDS_BIGENDIAN
276 ofs
+= 3 - (shift
/ 8);
283 static void ld_flag_byte(TCGv val
, unsigned shift
)
285 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
288 static void st_flag_byte(TCGv val
, unsigned shift
)
290 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
293 static void gen_excp_1(int exception
, int error_code
)
297 tmp1
= tcg_const_i32(exception
);
298 tmp2
= tcg_const_i32(error_code
);
299 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
300 tcg_temp_free_i32(tmp2
);
301 tcg_temp_free_i32(tmp1
);
304 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
306 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
307 gen_excp_1(exception
, error_code
);
308 return EXIT_NORETURN
;
311 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
313 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
316 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
318 TCGv_i32 tmp32
= tcg_temp_new_i32();
319 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
320 gen_helper_memory_to_f(t0
, tmp32
);
321 tcg_temp_free_i32(tmp32
);
324 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
326 TCGv tmp
= tcg_temp_new();
327 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
328 gen_helper_memory_to_g(t0
, tmp
);
332 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
334 TCGv_i32 tmp32
= tcg_temp_new_i32();
335 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
336 gen_helper_memory_to_s(t0
, tmp32
);
337 tcg_temp_free_i32(tmp32
);
340 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
342 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
343 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
344 tcg_gen_mov_i64(cpu_lock_value
, t0
);
347 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
349 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
350 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
351 tcg_gen_mov_i64(cpu_lock_value
, t0
);
354 static inline void gen_load_mem(DisasContext
*ctx
,
355 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
357 int ra
, int rb
, int32_t disp16
, bool fp
,
362 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
363 prefetches, which we can treat as nops. No worries about
364 missed exceptions here. */
365 if (unlikely(ra
== 31)) {
369 tmp
= tcg_temp_new();
370 addr
= load_gpr(ctx
, rb
);
373 tcg_gen_addi_i64(tmp
, addr
, disp16
);
377 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
381 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
382 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
387 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
389 TCGv_i32 tmp32
= tcg_temp_new_i32();
390 gen_helper_f_to_memory(tmp32
, t0
);
391 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
392 tcg_temp_free_i32(tmp32
);
395 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
397 TCGv tmp
= tcg_temp_new();
398 gen_helper_g_to_memory(tmp
, t0
);
399 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
403 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
405 TCGv_i32 tmp32
= tcg_temp_new_i32();
406 gen_helper_s_to_memory(tmp32
, t0
);
407 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
408 tcg_temp_free_i32(tmp32
);
411 static inline void gen_store_mem(DisasContext
*ctx
,
412 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
414 int ra
, int rb
, int32_t disp16
, bool fp
,
419 tmp
= tcg_temp_new();
420 addr
= load_gpr(ctx
, rb
);
423 tcg_gen_addi_i64(tmp
, addr
, disp16
);
427 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
431 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
432 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
437 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
438 int32_t disp16
, int mem_idx
,
441 TCGLabel
*lab_fail
, *lab_done
;
444 addr
= tcg_temp_new_i64();
445 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
446 free_context_temps(ctx
);
448 lab_fail
= gen_new_label();
449 lab_done
= gen_new_label();
450 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
451 tcg_temp_free_i64(addr
);
453 val
= tcg_temp_new_i64();
454 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
455 load_gpr(ctx
, ra
), mem_idx
, op
);
456 free_context_temps(ctx
);
459 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
461 tcg_temp_free_i64(val
);
462 tcg_gen_br(lab_done
);
464 gen_set_label(lab_fail
);
466 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
469 gen_set_label(lab_done
);
470 tcg_gen_movi_i64(cpu_lock_addr
, -1);
474 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
476 #ifndef CONFIG_USER_ONLY
477 return ((ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0
478 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
479 && ((addr
>> 41) & 3) == 2);
485 static bool use_exit_tb(DisasContext
*ctx
)
487 return ((ctx
->tb
->cflags
& CF_LAST_IO
)
488 || ctx
->singlestep_enabled
492 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
494 /* Suppress goto_tb in the case of single-steping and IO. */
495 if (unlikely(use_exit_tb(ctx
))) {
498 #ifndef CONFIG_USER_ONLY
499 /* If the destination is in the superpage, the page perms can't change. */
500 if (in_superpage(ctx
, dest
)) {
503 /* Check for the dest on the same page as the start of the TB. */
504 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
510 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
512 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
515 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
518 /* Notice branch-to-next; used to initialize RA with the PC. */
521 } else if (use_goto_tb(ctx
, dest
)) {
523 tcg_gen_movi_i64(cpu_pc
, dest
);
524 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
527 tcg_gen_movi_i64(cpu_pc
, dest
);
528 return EXIT_PC_UPDATED
;
532 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
533 TCGv cmp
, int32_t disp
)
535 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
536 TCGLabel
*lab_true
= gen_new_label();
538 if (use_goto_tb(ctx
, dest
)) {
539 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
542 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
543 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
545 gen_set_label(lab_true
);
547 tcg_gen_movi_i64(cpu_pc
, dest
);
548 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
552 TCGv_i64 z
= tcg_const_i64(0);
553 TCGv_i64 d
= tcg_const_i64(dest
);
554 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
556 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
558 tcg_temp_free_i64(z
);
559 tcg_temp_free_i64(d
);
560 tcg_temp_free_i64(p
);
561 return EXIT_PC_UPDATED
;
565 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
566 int32_t disp
, int mask
)
569 TCGv tmp
= tcg_temp_new();
572 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
573 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
577 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
580 /* Fold -0.0 for comparison with COND. */
582 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
584 uint64_t mzero
= 1ull << 63;
589 /* For <= or >, the -0.0 value directly compares the way we want. */
590 tcg_gen_mov_i64(dest
, src
);
595 /* For == or !=, we can simply mask off the sign bit and compare. */
596 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
601 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
602 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
603 tcg_gen_neg_i64(dest
, dest
);
604 tcg_gen_and_i64(dest
, dest
, src
);
612 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
615 TCGv cmp_tmp
= tcg_temp_new();
618 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
619 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
620 tcg_temp_free(cmp_tmp
);
624 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
629 vb
= load_fpr(ctx
, rb
);
631 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
633 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
638 #define QUAL_RM_N 0x080 /* Round mode nearest even */
639 #define QUAL_RM_C 0x000 /* Round mode chopped */
640 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
641 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
642 #define QUAL_RM_MASK 0x0c0
644 #define QUAL_U 0x100 /* Underflow enable (fp output) */
645 #define QUAL_V 0x100 /* Overflow enable (int output) */
646 #define QUAL_S 0x400 /* Software completion enable */
647 #define QUAL_I 0x200 /* Inexact detection enable */
649 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
653 fn11
&= QUAL_RM_MASK
;
654 if (fn11
== ctx
->tb_rm
) {
659 tmp
= tcg_temp_new_i32();
662 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
665 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
668 tcg_gen_movi_i32(tmp
, float_round_down
);
671 tcg_gen_ld8u_i32(tmp
, cpu_env
,
672 offsetof(CPUAlphaState
, fpcr_dyn_round
));
676 #if defined(CONFIG_SOFTFLOAT_INLINE)
677 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
678 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
679 sets the one field. */
680 tcg_gen_st8_i32(tmp
, cpu_env
,
681 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
683 gen_helper_setroundmode(tmp
);
686 tcg_temp_free_i32(tmp
);
689 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
694 if (fn11
== ctx
->tb_ftz
) {
699 tmp
= tcg_temp_new_i32();
701 /* Underflow is enabled, use the FPCR setting. */
702 tcg_gen_ld8u_i32(tmp
, cpu_env
,
703 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
705 /* Underflow is disabled, force flush-to-zero. */
706 tcg_gen_movi_i32(tmp
, 1);
709 #if defined(CONFIG_SOFTFLOAT_INLINE)
710 tcg_gen_st8_i32(tmp
, cpu_env
,
711 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
713 gen_helper_setflushzero(tmp
);
716 tcg_temp_free_i32(tmp
);
719 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
723 if (unlikely(reg
== 31)) {
724 val
= load_zero(ctx
);
727 if ((fn11
& QUAL_S
) == 0) {
729 gen_helper_ieee_input_cmp(cpu_env
, val
);
731 gen_helper_ieee_input(cpu_env
, val
);
734 #ifndef CONFIG_USER_ONLY
735 /* In system mode, raise exceptions for denormals like real
736 hardware. In user mode, proceed as if the OS completion
737 handler is handling the denormal as per spec. */
738 gen_helper_ieee_input_s(cpu_env
, val
);
745 static void gen_fp_exc_raise(int rc
, int fn11
)
747 /* ??? We ought to be able to do something with imprecise exceptions.
748 E.g. notice we're still in the trap shadow of something within the
749 TB and do not generate the code to signal the exception; end the TB
750 when an exception is forced to arrive, either by consumption of a
751 register value or TRAPB or EXCB. */
755 if (!(fn11
& QUAL_U
)) {
756 /* Note that QUAL_U == QUAL_V, so ignore either. */
757 ignore
|= FPCR_UNF
| FPCR_IOV
;
759 if (!(fn11
& QUAL_I
)) {
762 ign
= tcg_const_i32(ignore
);
764 /* ??? Pass in the regno of the destination so that the helper can
765 set EXC_MASK, which contains a bitmask of destination registers
766 that have caused arithmetic traps. A simple userspace emulation
767 does not require this. We do need it for a guest kernel's entArith,
768 or if we were to do something clever with imprecise exceptions. */
769 reg
= tcg_const_i32(rc
+ 32);
771 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
773 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
776 tcg_temp_free_i32(reg
);
777 tcg_temp_free_i32(ign
);
780 static void gen_cvtlq(TCGv vc
, TCGv vb
)
782 TCGv tmp
= tcg_temp_new();
784 /* The arithmetic right shift here, plus the sign-extended mask below
785 yields a sign-extended result without an explicit ext32s_i64. */
786 tcg_gen_shri_i64(tmp
, vb
, 29);
787 tcg_gen_sari_i64(vc
, vb
, 32);
788 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
793 static void gen_ieee_arith2(DisasContext
*ctx
,
794 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
795 int rb
, int rc
, int fn11
)
799 gen_qual_roundmode(ctx
, fn11
);
800 gen_qual_flushzero(ctx
, fn11
);
802 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
803 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
805 gen_fp_exc_raise(rc
, fn11
);
808 #define IEEE_ARITH2(name) \
809 static inline void glue(gen_, name)(DisasContext *ctx, \
810 int rb, int rc, int fn11) \
812 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
819 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
823 /* No need to set flushzero, since we have an integer output. */
824 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
825 vc
= dest_fpr(ctx
, rc
);
827 /* Almost all integer conversions use cropped rounding;
828 special case that. */
829 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
830 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
832 gen_qual_roundmode(ctx
, fn11
);
833 gen_helper_cvttq(vc
, cpu_env
, vb
);
835 gen_fp_exc_raise(rc
, fn11
);
838 static void gen_ieee_intcvt(DisasContext
*ctx
,
839 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
840 int rb
, int rc
, int fn11
)
844 gen_qual_roundmode(ctx
, fn11
);
845 vb
= load_fpr(ctx
, rb
);
846 vc
= dest_fpr(ctx
, rc
);
848 /* The only exception that can be raised by integer conversion
849 is inexact. Thus we only need to worry about exceptions when
850 inexact handling is requested. */
852 helper(vc
, cpu_env
, vb
);
853 gen_fp_exc_raise(rc
, fn11
);
855 helper(vc
, cpu_env
, vb
);
859 #define IEEE_INTCVT(name) \
860 static inline void glue(gen_, name)(DisasContext *ctx, \
861 int rb, int rc, int fn11) \
863 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
868 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
870 TCGv vmask
= tcg_const_i64(mask
);
871 TCGv tmp
= tcg_temp_new_i64();
874 tcg_gen_andc_i64(tmp
, vmask
, va
);
876 tcg_gen_and_i64(tmp
, va
, vmask
);
879 tcg_gen_andc_i64(vc
, vb
, vmask
);
880 tcg_gen_or_i64(vc
, vc
, tmp
);
882 tcg_temp_free(vmask
);
886 static void gen_ieee_arith3(DisasContext
*ctx
,
887 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
888 int ra
, int rb
, int rc
, int fn11
)
892 gen_qual_roundmode(ctx
, fn11
);
893 gen_qual_flushzero(ctx
, fn11
);
895 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
896 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
897 vc
= dest_fpr(ctx
, rc
);
898 helper(vc
, cpu_env
, va
, vb
);
900 gen_fp_exc_raise(rc
, fn11
);
903 #define IEEE_ARITH3(name) \
904 static inline void glue(gen_, name)(DisasContext *ctx, \
905 int ra, int rb, int rc, int fn11) \
907 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
918 static void gen_ieee_compare(DisasContext
*ctx
,
919 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
920 int ra
, int rb
, int rc
, int fn11
)
924 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
925 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
926 vc
= dest_fpr(ctx
, rc
);
927 helper(vc
, cpu_env
, va
, vb
);
929 gen_fp_exc_raise(rc
, fn11
);
932 #define IEEE_CMP3(name) \
933 static inline void glue(gen_, name)(DisasContext *ctx, \
934 int ra, int rb, int rc, int fn11) \
936 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
943 static inline uint64_t zapnot_mask(uint8_t lit
)
948 for (i
= 0; i
< 8; ++i
) {
949 if ((lit
>> i
) & 1) {
950 mask
|= 0xffull
<< (i
* 8);
956 /* Implement zapnot with an immediate operand, which expands to some
957 form of immediate AND. This is a basic building block in the
958 definition of many of the other byte manipulation instructions. */
959 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
963 tcg_gen_movi_i64(dest
, 0);
966 tcg_gen_ext8u_i64(dest
, src
);
969 tcg_gen_ext16u_i64(dest
, src
);
972 tcg_gen_ext32u_i64(dest
, src
);
975 tcg_gen_mov_i64(dest
, src
);
978 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
983 /* EXTWH, EXTLH, EXTQH */
984 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
985 uint8_t lit
, uint8_t byte_mask
)
988 int pos
= (64 - lit
* 8) & 0x3f;
989 int len
= cto32(byte_mask
) * 8;
991 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
993 tcg_gen_movi_i64(vc
, 0);
996 TCGv tmp
= tcg_temp_new();
997 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
998 tcg_gen_neg_i64(tmp
, tmp
);
999 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
1000 tcg_gen_shl_i64(vc
, va
, tmp
);
1003 gen_zapnoti(vc
, vc
, byte_mask
);
1006 /* EXTBL, EXTWL, EXTLL, EXTQL */
1007 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1008 uint8_t lit
, uint8_t byte_mask
)
1011 int pos
= (lit
& 7) * 8;
1012 int len
= cto32(byte_mask
) * 8;
1013 if (pos
+ len
>= 64) {
1016 tcg_gen_extract_i64(vc
, va
, pos
, len
);
1018 TCGv tmp
= tcg_temp_new();
1019 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
1020 tcg_gen_shli_i64(tmp
, tmp
, 3);
1021 tcg_gen_shr_i64(vc
, va
, tmp
);
1023 gen_zapnoti(vc
, vc
, byte_mask
);
1027 /* INSWH, INSLH, INSQH */
1028 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1029 uint8_t lit
, uint8_t byte_mask
)
1032 int pos
= 64 - (lit
& 7) * 8;
1033 int len
= cto32(byte_mask
) * 8;
1035 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
1037 tcg_gen_movi_i64(vc
, 0);
1040 TCGv tmp
= tcg_temp_new();
1041 TCGv shift
= tcg_temp_new();
1043 /* The instruction description has us left-shift the byte mask
1044 and extract bits <15:8> and apply that zap at the end. This
1045 is equivalent to simply performing the zap first and shifting
1047 gen_zapnoti(tmp
, va
, byte_mask
);
1049 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1050 portably by splitting the shift into two parts: shift_count-1 and 1.
1051 Arrange for the -1 by using ones-complement instead of
1052 twos-complement in the negation: ~(B * 8) & 63. */
1054 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1055 tcg_gen_not_i64(shift
, shift
);
1056 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1058 tcg_gen_shr_i64(vc
, tmp
, shift
);
1059 tcg_gen_shri_i64(vc
, vc
, 1);
1060 tcg_temp_free(shift
);
1065 /* INSBL, INSWL, INSLL, INSQL */
1066 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1067 uint8_t lit
, uint8_t byte_mask
)
1070 int pos
= (lit
& 7) * 8;
1071 int len
= cto32(byte_mask
) * 8;
1072 if (pos
+ len
> 64) {
1075 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1077 TCGv tmp
= tcg_temp_new();
1078 TCGv shift
= tcg_temp_new();
1080 /* The instruction description has us left-shift the byte mask
1081 and extract bits <15:8> and apply that zap at the end. This
1082 is equivalent to simply performing the zap first and shifting
1084 gen_zapnoti(tmp
, va
, byte_mask
);
1086 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1087 tcg_gen_shli_i64(shift
, shift
, 3);
1088 tcg_gen_shl_i64(vc
, tmp
, shift
);
1089 tcg_temp_free(shift
);
1094 /* MSKWH, MSKLH, MSKQH */
1095 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1096 uint8_t lit
, uint8_t byte_mask
)
1099 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1101 TCGv shift
= tcg_temp_new();
1102 TCGv mask
= tcg_temp_new();
1104 /* The instruction description is as above, where the byte_mask
1105 is shifted left, and then we extract bits <15:8>. This can be
1106 emulated with a right-shift on the expanded byte mask. This
1107 requires extra care because for an input <2:0> == 0 we need a
1108 shift of 64 bits in order to generate a zero. This is done by
1109 splitting the shift into two parts, the variable shift - 1
1110 followed by a constant 1 shift. The code we expand below is
1111 equivalent to ~(B * 8) & 63. */
1113 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1114 tcg_gen_not_i64(shift
, shift
);
1115 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1116 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1117 tcg_gen_shr_i64(mask
, mask
, shift
);
1118 tcg_gen_shri_i64(mask
, mask
, 1);
1120 tcg_gen_andc_i64(vc
, va
, mask
);
1122 tcg_temp_free(mask
);
1123 tcg_temp_free(shift
);
1127 /* MSKBL, MSKWL, MSKLL, MSKQL */
1128 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1129 uint8_t lit
, uint8_t byte_mask
)
1132 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1134 TCGv shift
= tcg_temp_new();
1135 TCGv mask
= tcg_temp_new();
1137 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1138 tcg_gen_shli_i64(shift
, shift
, 3);
1139 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1140 tcg_gen_shl_i64(mask
, mask
, shift
);
1142 tcg_gen_andc_i64(vc
, va
, mask
);
1144 tcg_temp_free(mask
);
1145 tcg_temp_free(shift
);
1149 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1154 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1157 tmp
= tcg_const_i64(set
);
1158 st_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1162 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1164 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1165 to internal cpu registers. */
1167 /* Unprivileged PAL call */
1168 if (palcode
>= 0x80 && palcode
< 0xC0) {
1172 /* No-op inside QEMU. */
1176 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1177 offsetof(CPUAlphaState
, unique
));
1181 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1182 offsetof(CPUAlphaState
, unique
));
1191 #ifndef CONFIG_USER_ONLY
1192 /* Privileged PAL code */
1193 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1197 /* No-op inside QEMU. */
1201 /* No-op inside QEMU. */
1205 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1206 offsetof(CPUAlphaState
, vptptr
));
1210 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1211 offsetof(CPUAlphaState
, sysval
));
1215 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1216 offsetof(CPUAlphaState
, sysval
));
1221 /* Note that we already know we're in kernel mode, so we know
1222 that PS only contains the 3 IPL bits. */
1223 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1225 /* But make sure and store only the 3 IPL bits from the user. */
1227 TCGv tmp
= tcg_temp_new();
1228 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1229 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1233 /* Allow interrupts to be recognized right away. */
1234 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
1235 return EXIT_PC_UPDATED_NOCHAIN
;
1239 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1244 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1245 offsetof(CPUAlphaState
, usp
));
1249 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1250 offsetof(CPUAlphaState
, usp
));
1254 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1255 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1261 TCGv_i32 tmp
= tcg_const_i32(1);
1262 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1263 offsetof(CPUState
, halted
));
1264 tcg_temp_free_i32(tmp
);
1266 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1267 return gen_excp(ctx
, EXCP_HALTED
, 0);
1276 return gen_invalid(ctx
);
1279 #ifdef CONFIG_USER_ONLY
1280 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1283 TCGv tmp
= tcg_temp_new();
1284 uint64_t exc_addr
= ctx
->pc
;
1285 uint64_t entry
= ctx
->palbr
;
1287 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1290 tcg_gen_movi_i64(tmp
, 1);
1291 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1294 tcg_gen_movi_i64(tmp
, exc_addr
);
1295 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1298 entry
+= (palcode
& 0x80
1299 ? 0x2000 + (palcode
- 0x80) * 64
1300 : 0x1000 + palcode
* 64);
1302 /* Since the destination is running in PALmode, we don't really
1303 need the page permissions check. We'll see the existence of
1304 the page when we create the TB, and we'll flush all TBs if
1305 we change the PAL base register. */
1306 if (!use_exit_tb(ctx
)) {
1308 tcg_gen_movi_i64(cpu_pc
, entry
);
1309 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1310 return EXIT_GOTO_TB
;
1312 tcg_gen_movi_i64(cpu_pc
, entry
);
1313 return EXIT_PC_UPDATED
;
1319 #ifndef CONFIG_USER_ONLY
1321 #define PR_LONG 0x200000
1323 static int cpu_pr_data(int pr
)
1326 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1327 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1328 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1329 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1330 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1331 case 7: return offsetof(CPUAlphaState
, palbr
);
1332 case 8: return offsetof(CPUAlphaState
, ptbr
);
1333 case 9: return offsetof(CPUAlphaState
, vptptr
);
1334 case 10: return offsetof(CPUAlphaState
, unique
);
1335 case 11: return offsetof(CPUAlphaState
, sysval
);
1336 case 12: return offsetof(CPUAlphaState
, usp
);
1339 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1342 return offsetof(CPUAlphaState
, alarm_expire
);
1347 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1349 void (*helper
)(TCGv
);
1354 /* Accessing the "non-shadow" general registers. */
1355 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1356 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1359 case 250: /* WALLTIME */
1360 helper
= gen_helper_get_walltime
;
1362 case 249: /* VMTIME */
1363 helper
= gen_helper_get_vmtime
;
1369 return EXIT_PC_STALE
;
1376 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1379 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1383 /* The basic registers are data only, and unknown registers
1384 are read-zero, write-ignore. */
1385 data
= cpu_pr_data(regno
);
1387 tcg_gen_movi_i64(va
, 0);
1388 } else if (data
& PR_LONG
) {
1389 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1391 tcg_gen_ld_i64(va
, cpu_env
, data
);
1399 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1406 gen_helper_tbia(cpu_env
);
1411 gen_helper_tbis(cpu_env
, vb
);
1417 TCGv_i32 tmp
= tcg_const_i32(1);
1418 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1419 offsetof(CPUState
, halted
));
1420 tcg_temp_free_i32(tmp
);
1422 return gen_excp(ctx
, EXCP_HALTED
, 0);
1426 gen_helper_halt(vb
);
1427 return EXIT_PC_STALE
;
1431 gen_helper_set_alarm(cpu_env
, vb
);
1436 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1437 /* Changing the PAL base register implies un-chaining all of the TBs
1438 that ended with a CALL_PAL. Since the base register usually only
1439 changes during boot, flushing everything works well. */
1440 gen_helper_tb_flush(cpu_env
);
1441 return EXIT_PC_STALE
;
1444 /* Accessing the "non-shadow" general registers. */
1445 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1446 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1450 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1453 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1457 /* The basic registers are data only, and unknown registers
1458 are read-zero, write-ignore. */
1459 data
= cpu_pr_data(regno
);
1461 if (data
& PR_LONG
) {
1462 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1464 tcg_gen_st_i64(vb
, cpu_env
, data
);
1472 #endif /* !USER_ONLY*/
1474 #define REQUIRE_NO_LIT \
1481 #define REQUIRE_AMASK(FLAG) \
1483 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1488 #define REQUIRE_TB_FLAG(FLAG) \
1490 if ((ctx->tbflags & (FLAG)) == 0) { \
1495 #define REQUIRE_REG_31(WHICH) \
1497 if (WHICH != 31) { \
1502 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1504 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1506 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1507 bool islit
, real_islit
;
1508 TCGv va
, vb
, vc
, tmp
, tmp2
;
1512 /* Decode all instruction fields */
1513 opc
= extract32(insn
, 26, 6);
1514 ra
= extract32(insn
, 21, 5);
1515 rb
= extract32(insn
, 16, 5);
1516 rc
= extract32(insn
, 0, 5);
1517 real_islit
= islit
= extract32(insn
, 12, 1);
1518 lit
= extract32(insn
, 13, 8);
1520 disp21
= sextract32(insn
, 0, 21);
1521 disp16
= sextract32(insn
, 0, 16);
1522 disp12
= sextract32(insn
, 0, 12);
1524 fn11
= extract32(insn
, 5, 11);
1525 fpfn
= extract32(insn
, 5, 6);
1526 fn7
= extract32(insn
, 5, 7);
1528 if (rb
== 31 && !islit
) {
1537 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1563 disp16
= (uint32_t)disp16
<< 16;
1567 va
= dest_gpr(ctx
, ra
);
1568 /* It's worth special-casing immediate loads. */
1570 tcg_gen_movi_i64(va
, disp16
);
1572 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1579 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1583 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1588 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1593 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1598 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1602 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1606 vc
= dest_gpr(ctx
, rc
);
1607 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1611 /* Special case ADDL as SEXTL. */
1612 tcg_gen_ext32s_i64(vc
, vb
);
1616 /* Special case SUBQ as NEGQ. */
1617 tcg_gen_neg_i64(vc
, vb
);
1622 va
= load_gpr(ctx
, ra
);
1626 tcg_gen_add_i64(vc
, va
, vb
);
1627 tcg_gen_ext32s_i64(vc
, vc
);
1631 tmp
= tcg_temp_new();
1632 tcg_gen_shli_i64(tmp
, va
, 2);
1633 tcg_gen_add_i64(tmp
, tmp
, vb
);
1634 tcg_gen_ext32s_i64(vc
, tmp
);
1639 tcg_gen_sub_i64(vc
, va
, vb
);
1640 tcg_gen_ext32s_i64(vc
, vc
);
1644 tmp
= tcg_temp_new();
1645 tcg_gen_shli_i64(tmp
, va
, 2);
1646 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1647 tcg_gen_ext32s_i64(vc
, tmp
);
1653 /* Special case 0 >= X as X == 0. */
1654 gen_helper_cmpbe0(vc
, vb
);
1656 gen_helper_cmpbge(vc
, va
, vb
);
1661 tmp
= tcg_temp_new();
1662 tcg_gen_shli_i64(tmp
, va
, 3);
1663 tcg_gen_add_i64(tmp
, tmp
, vb
);
1664 tcg_gen_ext32s_i64(vc
, tmp
);
1669 tmp
= tcg_temp_new();
1670 tcg_gen_shli_i64(tmp
, va
, 3);
1671 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1672 tcg_gen_ext32s_i64(vc
, tmp
);
1677 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1681 tcg_gen_add_i64(vc
, va
, vb
);
1685 tmp
= tcg_temp_new();
1686 tcg_gen_shli_i64(tmp
, va
, 2);
1687 tcg_gen_add_i64(vc
, tmp
, vb
);
1692 tcg_gen_sub_i64(vc
, va
, vb
);
1696 tmp
= tcg_temp_new();
1697 tcg_gen_shli_i64(tmp
, va
, 2);
1698 tcg_gen_sub_i64(vc
, tmp
, vb
);
1703 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1707 tmp
= tcg_temp_new();
1708 tcg_gen_shli_i64(tmp
, va
, 3);
1709 tcg_gen_add_i64(vc
, tmp
, vb
);
1714 tmp
= tcg_temp_new();
1715 tcg_gen_shli_i64(tmp
, va
, 3);
1716 tcg_gen_sub_i64(vc
, tmp
, vb
);
1721 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1725 tmp
= tcg_temp_new();
1726 tcg_gen_ext32s_i64(tmp
, va
);
1727 tcg_gen_ext32s_i64(vc
, vb
);
1728 tcg_gen_add_i64(tmp
, tmp
, vc
);
1729 tcg_gen_ext32s_i64(vc
, tmp
);
1730 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1735 tmp
= tcg_temp_new();
1736 tcg_gen_ext32s_i64(tmp
, va
);
1737 tcg_gen_ext32s_i64(vc
, vb
);
1738 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1739 tcg_gen_ext32s_i64(vc
, tmp
);
1740 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1745 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1749 tmp
= tcg_temp_new();
1750 tmp2
= tcg_temp_new();
1751 tcg_gen_eqv_i64(tmp
, va
, vb
);
1752 tcg_gen_mov_i64(tmp2
, va
);
1753 tcg_gen_add_i64(vc
, va
, vb
);
1754 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1755 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1756 tcg_gen_shri_i64(tmp
, tmp
, 63);
1757 tcg_gen_movi_i64(tmp2
, 0);
1758 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1760 tcg_temp_free(tmp2
);
1764 tmp
= tcg_temp_new();
1765 tmp2
= tcg_temp_new();
1766 tcg_gen_xor_i64(tmp
, va
, vb
);
1767 tcg_gen_mov_i64(tmp2
, va
);
1768 tcg_gen_sub_i64(vc
, va
, vb
);
1769 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1770 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1771 tcg_gen_shri_i64(tmp
, tmp
, 63);
1772 tcg_gen_movi_i64(tmp2
, 0);
1773 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1775 tcg_temp_free(tmp2
);
1779 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1789 /* Special case BIS as NOP. */
1793 /* Special case BIS as MOV. */
1794 vc
= dest_gpr(ctx
, rc
);
1796 tcg_gen_movi_i64(vc
, lit
);
1798 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1804 vc
= dest_gpr(ctx
, rc
);
1805 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1807 if (fn7
== 0x28 && ra
== 31) {
1808 /* Special case ORNOT as NOT. */
1809 tcg_gen_not_i64(vc
, vb
);
1813 va
= load_gpr(ctx
, ra
);
1817 tcg_gen_and_i64(vc
, va
, vb
);
1821 tcg_gen_andc_i64(vc
, va
, vb
);
1825 tmp
= tcg_temp_new();
1826 tcg_gen_andi_i64(tmp
, va
, 1);
1827 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1828 vb
, load_gpr(ctx
, rc
));
1833 tmp
= tcg_temp_new();
1834 tcg_gen_andi_i64(tmp
, va
, 1);
1835 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1836 vb
, load_gpr(ctx
, rc
));
1841 tcg_gen_or_i64(vc
, va
, vb
);
1845 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1846 vb
, load_gpr(ctx
, rc
));
1850 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1851 vb
, load_gpr(ctx
, rc
));
1855 tcg_gen_orc_i64(vc
, va
, vb
);
1859 tcg_gen_xor_i64(vc
, va
, vb
);
1863 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1864 vb
, load_gpr(ctx
, rc
));
1868 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1869 vb
, load_gpr(ctx
, rc
));
1873 tcg_gen_eqv_i64(vc
, va
, vb
);
1878 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1882 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1883 vb
, load_gpr(ctx
, rc
));
1887 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1888 vb
, load_gpr(ctx
, rc
));
1893 tcg_gen_movi_i64(vc
, ctx
->implver
);
1901 vc
= dest_gpr(ctx
, rc
);
1902 va
= load_gpr(ctx
, ra
);
1906 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1910 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1914 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1918 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1922 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1926 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1930 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1934 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1938 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1943 gen_zapnoti(vc
, va
, ~lit
);
1945 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1951 gen_zapnoti(vc
, va
, lit
);
1953 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1958 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1963 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1965 tmp
= tcg_temp_new();
1966 vb
= load_gpr(ctx
, rb
);
1967 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1968 tcg_gen_shr_i64(vc
, va
, tmp
);
1974 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1979 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1981 tmp
= tcg_temp_new();
1982 vb
= load_gpr(ctx
, rb
);
1983 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1984 tcg_gen_shl_i64(vc
, va
, tmp
);
1990 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1995 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1997 tmp
= tcg_temp_new();
1998 vb
= load_gpr(ctx
, rb
);
1999 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
2000 tcg_gen_sar_i64(vc
, va
, tmp
);
2006 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
2010 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
2014 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
2018 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
2022 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
2026 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
2030 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2034 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2038 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
2046 vc
= dest_gpr(ctx
, rc
);
2047 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2048 va
= load_gpr(ctx
, ra
);
2052 tcg_gen_mul_i64(vc
, va
, vb
);
2053 tcg_gen_ext32s_i64(vc
, vc
);
2057 tcg_gen_mul_i64(vc
, va
, vb
);
2061 tmp
= tcg_temp_new();
2062 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
2067 tmp
= tcg_temp_new();
2068 tcg_gen_ext32s_i64(tmp
, va
);
2069 tcg_gen_ext32s_i64(vc
, vb
);
2070 tcg_gen_mul_i64(tmp
, tmp
, vc
);
2071 tcg_gen_ext32s_i64(vc
, tmp
);
2072 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2077 tmp
= tcg_temp_new();
2078 tmp2
= tcg_temp_new();
2079 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2080 tcg_gen_sari_i64(tmp2
, vc
, 63);
2081 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2083 tcg_temp_free(tmp2
);
2092 vc
= dest_fpr(ctx
, rc
);
2093 switch (fpfn
) { /* fn11 & 0x3F */
2097 t32
= tcg_temp_new_i32();
2098 va
= load_gpr(ctx
, ra
);
2099 tcg_gen_extrl_i64_i32(t32
, va
);
2100 gen_helper_memory_to_s(vc
, t32
);
2101 tcg_temp_free_i32(t32
);
2106 vb
= load_fpr(ctx
, rb
);
2107 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2112 gen_sqrts(ctx
, rb
, rc
, fn11
);
2117 t32
= tcg_temp_new_i32();
2118 va
= load_gpr(ctx
, ra
);
2119 tcg_gen_extrl_i64_i32(t32
, va
);
2120 gen_helper_memory_to_f(vc
, t32
);
2121 tcg_temp_free_i32(t32
);
2126 va
= load_gpr(ctx
, ra
);
2127 tcg_gen_mov_i64(vc
, va
);
2132 vb
= load_fpr(ctx
, rb
);
2133 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2138 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2146 /* VAX floating point */
2147 /* XXX: rounding mode and trap are ignored (!) */
2148 vc
= dest_fpr(ctx
, rc
);
2149 vb
= load_fpr(ctx
, rb
);
2150 va
= load_fpr(ctx
, ra
);
2151 switch (fpfn
) { /* fn11 & 0x3F */
2154 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2158 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2162 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2166 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2174 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2178 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2182 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2186 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2190 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2194 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2198 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2203 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2212 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2217 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2222 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2230 /* IEEE floating-point */
2231 switch (fpfn
) { /* fn11 & 0x3F */
2234 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2238 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2242 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2246 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2250 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2254 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2258 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2262 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2266 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2270 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2274 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2278 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2282 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2284 gen_cvtst(ctx
, rb
, rc
, fn11
);
2287 gen_cvtts(ctx
, rb
, rc
, fn11
);
2293 gen_cvttq(ctx
, rb
, rc
, fn11
);
2298 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2303 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2315 vc
= dest_fpr(ctx
, rc
);
2316 vb
= load_fpr(ctx
, rb
);
2322 /* Special case CPYS as FNOP. */
2324 vc
= dest_fpr(ctx
, rc
);
2325 va
= load_fpr(ctx
, ra
);
2327 /* Special case CPYS as FMOV. */
2328 tcg_gen_mov_i64(vc
, va
);
2330 vb
= load_fpr(ctx
, rb
);
2331 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2337 vc
= dest_fpr(ctx
, rc
);
2338 vb
= load_fpr(ctx
, rb
);
2339 va
= load_fpr(ctx
, ra
);
2340 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2344 vc
= dest_fpr(ctx
, rc
);
2345 vb
= load_fpr(ctx
, rb
);
2346 va
= load_fpr(ctx
, ra
);
2347 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2351 va
= load_fpr(ctx
, ra
);
2352 gen_helper_store_fpcr(cpu_env
, va
);
2353 if (ctx
->tb_rm
== QUAL_RM_D
) {
2354 /* Re-do the copy of the rounding mode to fp_status
2355 the next time we use dynamic rounding. */
2361 va
= dest_fpr(ctx
, ra
);
2362 gen_helper_load_fpcr(va
, cpu_env
);
2366 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2370 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2374 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2378 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2382 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2386 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2388 case 0x030: /* CVTQL */
2389 case 0x130: /* CVTQL/V */
2390 case 0x530: /* CVTQL/SV */
2392 vc
= dest_fpr(ctx
, rc
);
2393 vb
= load_fpr(ctx
, rb
);
2394 gen_helper_cvtql(vc
, cpu_env
, vb
);
2395 gen_fp_exc_raise(rc
, fn11
);
2403 switch ((uint16_t)disp16
) {
2414 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2418 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2430 va
= dest_gpr(ctx
, ra
);
2431 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2433 gen_helper_load_pcc(va
, cpu_env
);
2435 ret
= EXIT_PC_STALE
;
2437 gen_helper_load_pcc(va
, cpu_env
);
2465 /* HW_MFPR (PALcode) */
2466 #ifndef CONFIG_USER_ONLY
2467 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2468 va
= dest_gpr(ctx
, ra
);
2469 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2476 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2477 prediction stack action, which of course we don't implement. */
2478 vb
= load_gpr(ctx
, rb
);
2479 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2481 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2483 ret
= EXIT_PC_UPDATED
;
2487 /* HW_LD (PALcode) */
2488 #ifndef CONFIG_USER_ONLY
2489 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2491 TCGv addr
= tcg_temp_new();
2492 vb
= load_gpr(ctx
, rb
);
2493 va
= dest_gpr(ctx
, ra
);
2495 tcg_gen_addi_i64(addr
, vb
, disp12
);
2496 switch ((insn
>> 12) & 0xF) {
2498 /* Longword physical access (hw_ldl/p) */
2499 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2502 /* Quadword physical access (hw_ldq/p) */
2503 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2506 /* Longword physical access with lock (hw_ldl_l/p) */
2507 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2510 /* Quadword physical access with lock (hw_ldq_l/p) */
2511 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2514 /* Longword virtual PTE fetch (hw_ldl/v) */
2517 /* Quadword virtual PTE fetch (hw_ldq/v) */
2527 /* Longword virtual access (hw_ldl) */
2530 /* Quadword virtual access (hw_ldq) */
2533 /* Longword virtual access with protection check (hw_ldl/w) */
2534 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2537 /* Quadword virtual access with protection check (hw_ldq/w) */
2538 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2541 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2544 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2547 /* Longword virtual access with alternate access mode and
2548 protection checks (hw_ldl/wa) */
2549 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2552 /* Quadword virtual access with alternate access mode and
2553 protection checks (hw_ldq/wa) */
2554 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2557 tcg_temp_free(addr
);
2565 vc
= dest_gpr(ctx
, rc
);
2570 va
= load_fpr(ctx
, ra
);
2571 tcg_gen_mov_i64(vc
, va
);
2573 } else if (fn7
== 0x78) {
2577 t32
= tcg_temp_new_i32();
2578 va
= load_fpr(ctx
, ra
);
2579 gen_helper_s_to_memory(t32
, va
);
2580 tcg_gen_ext_i32_i64(vc
, t32
);
2581 tcg_temp_free_i32(t32
);
2585 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2591 tcg_gen_ext8s_i64(vc
, vb
);
2597 tcg_gen_ext16s_i64(vc
, vb
);
2604 tcg_gen_ctpop_i64(vc
, vb
);
2610 va
= load_gpr(ctx
, ra
);
2611 gen_helper_perr(vc
, va
, vb
);
2618 tcg_gen_clzi_i64(vc
, vb
, 64);
2625 tcg_gen_ctzi_i64(vc
, vb
, 64);
2632 gen_helper_unpkbw(vc
, vb
);
2639 gen_helper_unpkbl(vc
, vb
);
2646 gen_helper_pkwb(vc
, vb
);
2653 gen_helper_pklb(vc
, vb
);
2658 va
= load_gpr(ctx
, ra
);
2659 gen_helper_minsb8(vc
, va
, vb
);
2664 va
= load_gpr(ctx
, ra
);
2665 gen_helper_minsw4(vc
, va
, vb
);
2670 va
= load_gpr(ctx
, ra
);
2671 gen_helper_minub8(vc
, va
, vb
);
2676 va
= load_gpr(ctx
, ra
);
2677 gen_helper_minuw4(vc
, va
, vb
);
2682 va
= load_gpr(ctx
, ra
);
2683 gen_helper_maxub8(vc
, va
, vb
);
2688 va
= load_gpr(ctx
, ra
);
2689 gen_helper_maxuw4(vc
, va
, vb
);
2694 va
= load_gpr(ctx
, ra
);
2695 gen_helper_maxsb8(vc
, va
, vb
);
2700 va
= load_gpr(ctx
, ra
);
2701 gen_helper_maxsw4(vc
, va
, vb
);
2709 /* HW_MTPR (PALcode) */
2710 #ifndef CONFIG_USER_ONLY
2711 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2712 vb
= load_gpr(ctx
, rb
);
2713 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2720 /* HW_RET (PALcode) */
2721 #ifndef CONFIG_USER_ONLY
2722 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2724 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2725 address from EXC_ADDR. This turns out to be useful for our
2726 emulation PALcode, so continue to accept it. */
2727 ctx
->lit
= vb
= tcg_temp_new();
2728 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2730 vb
= load_gpr(ctx
, rb
);
2732 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2733 tmp
= tcg_temp_new();
2734 tcg_gen_movi_i64(tmp
, 0);
2735 st_flag_byte(tmp
, ENV_FLAG_RX_SHIFT
);
2736 tcg_gen_andi_i64(tmp
, vb
, 1);
2737 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2739 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2740 /* Allow interrupts to be recognized right away. */
2741 ret
= EXIT_PC_UPDATED_NOCHAIN
;
2748 /* HW_ST (PALcode) */
2749 #ifndef CONFIG_USER_ONLY
2750 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2752 switch ((insn
>> 12) & 0xF) {
2754 /* Longword physical access */
2755 va
= load_gpr(ctx
, ra
);
2756 vb
= load_gpr(ctx
, rb
);
2757 tmp
= tcg_temp_new();
2758 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2759 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2763 /* Quadword physical access */
2764 va
= load_gpr(ctx
, ra
);
2765 vb
= load_gpr(ctx
, rb
);
2766 tmp
= tcg_temp_new();
2767 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2768 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2772 /* Longword physical access with lock */
2773 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2774 MMU_PHYS_IDX
, MO_LESL
);
2777 /* Quadword physical access with lock */
2778 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2779 MMU_PHYS_IDX
, MO_LEQ
);
2782 /* Longword virtual access */
2785 /* Quadword virtual access */
2806 /* Longword virtual access with alternate access mode */
2809 /* Quadword virtual access with alternate access mode */
2825 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2829 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2833 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2837 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2841 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2845 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2849 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2853 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2857 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2861 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2865 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2869 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2873 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2877 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2881 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2882 ctx
->mem_idx
, MO_LESL
);
2886 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2887 ctx
->mem_idx
, MO_LEQ
);
2891 ret
= gen_bdirect(ctx
, ra
, disp21
);
2893 case 0x31: /* FBEQ */
2894 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2896 case 0x32: /* FBLT */
2897 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2899 case 0x33: /* FBLE */
2900 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2904 ret
= gen_bdirect(ctx
, ra
, disp21
);
2906 case 0x35: /* FBNE */
2907 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2909 case 0x36: /* FBGE */
2910 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2912 case 0x37: /* FBGT */
2913 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2917 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2921 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2925 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2929 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2933 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2937 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2941 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2945 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2948 ret
= gen_invalid(ctx
);
2955 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
2957 CPUAlphaState
*env
= cs
->env_ptr
;
2958 DisasContext ctx
, *ctxp
= &ctx
;
2959 target_ulong pc_start
;
2960 target_ulong pc_mask
;
2970 ctx
.tbflags
= tb
->flags
;
2971 ctx
.mem_idx
= cpu_mmu_index(env
, false);
2972 ctx
.implver
= env
->implver
;
2973 ctx
.amask
= env
->amask
;
2974 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2976 #ifdef CONFIG_USER_ONLY
2977 ctx
.ir
= cpu_std_ir
;
2979 ctx
.palbr
= env
->palbr
;
2980 ctx
.ir
= (ctx
.tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2983 /* ??? Every TB begins with unset rounding mode, to be initialized on
2984 the first fp insn of the TB. Alternately we could define a proper
2985 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2986 to reset the FP_STATUS to that default at the end of any TB that
2987 changes the default. We could even (gasp) dynamiclly figure out
2988 what default would be most efficient given the running program. */
2990 /* Similarly for flush-to-zero. */
2993 TCGV_UNUSED_I64(ctx
.zero
);
2994 TCGV_UNUSED_I64(ctx
.sink
);
2995 TCGV_UNUSED_I64(ctx
.lit
);
2998 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2999 if (max_insns
== 0) {
3000 max_insns
= CF_COUNT_MASK
;
3002 if (max_insns
> TCG_MAX_INSNS
) {
3003 max_insns
= TCG_MAX_INSNS
;
3006 if (in_superpage(&ctx
, pc_start
)) {
3007 pc_mask
= (1ULL << 41) - 1;
3009 pc_mask
= ~TARGET_PAGE_MASK
;
3013 tcg_clear_temp_count();
3016 tcg_gen_insn_start(ctx
.pc
);
3019 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
3020 ret
= gen_excp(&ctx
, EXCP_DEBUG
, 0);
3021 /* The address covered by the breakpoint must be included in
3022 [tb->pc, tb->pc + tb->size) in order to for it to be
3023 properly cleared -- thus we increment the PC here so that
3024 the logic setting tb->size below does the right thing. */
3028 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
3031 insn
= cpu_ldl_code(env
, ctx
.pc
);
3034 ret
= translate_one(ctxp
, insn
);
3035 free_context_temps(ctxp
);
3037 if (tcg_check_temp_count()) {
3038 qemu_log("TCG temporary leak before "TARGET_FMT_lx
"\n", ctx
.pc
);
3041 /* If we reach a page boundary, are single stepping,
3042 or exhaust instruction count, stop generation. */
3044 && ((ctx
.pc
& pc_mask
) == 0
3045 || tcg_op_buf_full()
3046 || num_insns
>= max_insns
3048 || ctx
.singlestep_enabled
)) {
3049 ret
= EXIT_FALLTHRU
;
3051 } while (ret
== NO_EXIT
);
3053 if (tb
->cflags
& CF_LAST_IO
) {
3062 if (use_goto_tb(&ctx
, ctx
.pc
)) {
3064 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3065 tcg_gen_exit_tb((uintptr_t)ctx
.tb
);
3069 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3071 case EXIT_PC_UPDATED
:
3072 if (!use_exit_tb(&ctx
)) {
3073 tcg_gen_lookup_and_goto_ptr(cpu_pc
);
3077 case EXIT_PC_UPDATED_NOCHAIN
:
3078 if (ctx
.singlestep_enabled
) {
3079 gen_excp_1(EXCP_DEBUG
, 0);
3085 g_assert_not_reached();
3088 gen_tb_end(tb
, num_insns
);
3090 tb
->size
= ctx
.pc
- pc_start
;
3091 tb
->icount
= num_insns
;
3094 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
3095 && qemu_log_in_addr_range(pc_start
)) {
3097 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3098 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
3105 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,