2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 # define LOG_DISAS(...) do { } while (0)
45 typedef struct DisasContext DisasContext
;
47 struct TranslationBlock
*tb
;
49 #ifndef CONFIG_USER_ONLY
54 /* Current rounding mode for this TB. */
56 /* Current flush-to-zero setting for this TB. */
59 /* implver value for this CPU. */
62 /* The set of registers active in the current context. */
65 /* Temporaries for $31 and $f31 as source and destination. */
68 /* Temporary for immediate constants. */
71 bool singlestep_enabled
;
74 /* Return values from translate_one, indicating the state of the TB.
75 Note that zero indicates that we are not exiting the TB. */
80 /* We have emitted one or more goto_tb. No fixup required. */
83 /* We are not using a goto_tb (for whatever reason), but have updated
84 the PC (for whatever reason), so there's no need to do it again on
88 /* We are exiting the TB, but have neither emitted a goto_tb, nor
89 updated the PC for the next instruction to be executed. */
92 /* We are exiting the TB due to page crossing or space constraints. */
95 /* We are ending the TB with a noreturn function call, e.g. longjmp.
96 No following code will be executed. */
100 /* global register indexes */
101 static TCGv_env cpu_env
;
102 static TCGv cpu_std_ir
[31];
103 static TCGv cpu_fir
[31];
105 static TCGv cpu_lock_addr
;
106 static TCGv cpu_lock_value
;
108 #ifndef CONFIG_USER_ONLY
109 static TCGv cpu_pal_ir
[31];
112 #include "exec/gen-icount.h"
114 void alpha_translate_init(void)
116 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
118 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
119 static const GlobalVar vars
[] = {
127 /* Use the symbolic register names that match the disassembler. */
128 static const char greg_names
[31][4] = {
129 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
130 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
131 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
132 "t10", "t11", "ra", "t12", "at", "gp", "sp"
134 static const char freg_names
[31][4] = {
135 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
136 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
137 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
138 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
140 #ifndef CONFIG_USER_ONLY
141 static const char shadow_names
[8][8] = {
142 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
143 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
147 static bool done_init
= 0;
155 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
156 tcg_ctx
.tcg_env
= cpu_env
;
158 for (i
= 0; i
< 31; i
++) {
159 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
160 offsetof(CPUAlphaState
, ir
[i
]),
164 for (i
= 0; i
< 31; i
++) {
165 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
166 offsetof(CPUAlphaState
, fir
[i
]),
170 #ifndef CONFIG_USER_ONLY
171 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
172 for (i
= 0; i
< 8; i
++) {
173 int r
= (i
== 7 ? 25 : i
+ 8);
174 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
175 offsetof(CPUAlphaState
,
181 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
182 const GlobalVar
*v
= &vars
[i
];
183 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
187 static TCGv
load_zero(DisasContext
*ctx
)
189 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
190 ctx
->zero
= tcg_const_i64(0);
195 static TCGv
dest_sink(DisasContext
*ctx
)
197 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
198 ctx
->sink
= tcg_temp_new();
203 static void free_context_temps(DisasContext
*ctx
)
205 if (!TCGV_IS_UNUSED_I64(ctx
->sink
)) {
206 tcg_gen_discard_i64(ctx
->sink
);
207 tcg_temp_free(ctx
->sink
);
208 TCGV_UNUSED_I64(ctx
->sink
);
210 if (!TCGV_IS_UNUSED_I64(ctx
->zero
)) {
211 tcg_temp_free(ctx
->zero
);
212 TCGV_UNUSED_I64(ctx
->zero
);
214 if (!TCGV_IS_UNUSED_I64(ctx
->lit
)) {
215 tcg_temp_free(ctx
->lit
);
216 TCGV_UNUSED_I64(ctx
->lit
);
220 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
222 if (likely(reg
< 31)) {
225 return load_zero(ctx
);
229 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
230 uint8_t lit
, bool islit
)
233 ctx
->lit
= tcg_const_i64(lit
);
235 } else if (likely(reg
< 31)) {
238 return load_zero(ctx
);
242 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
244 if (likely(reg
< 31)) {
247 return dest_sink(ctx
);
251 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
253 if (likely(reg
< 31)) {
256 return load_zero(ctx
);
260 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
262 if (likely(reg
< 31)) {
265 return dest_sink(ctx
);
269 static void gen_excp_1(int exception
, int error_code
)
273 tmp1
= tcg_const_i32(exception
);
274 tmp2
= tcg_const_i32(error_code
);
275 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
276 tcg_temp_free_i32(tmp2
);
277 tcg_temp_free_i32(tmp1
);
280 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
282 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
283 gen_excp_1(exception
, error_code
);
284 return EXIT_NORETURN
;
287 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
289 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
292 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
294 TCGv_i32 tmp32
= tcg_temp_new_i32();
295 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
296 gen_helper_memory_to_f(t0
, tmp32
);
297 tcg_temp_free_i32(tmp32
);
300 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
302 TCGv tmp
= tcg_temp_new();
303 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
304 gen_helper_memory_to_g(t0
, tmp
);
308 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
310 TCGv_i32 tmp32
= tcg_temp_new_i32();
311 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
312 gen_helper_memory_to_s(t0
, tmp32
);
313 tcg_temp_free_i32(tmp32
);
316 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
318 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
319 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
320 tcg_gen_mov_i64(cpu_lock_value
, t0
);
323 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
325 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
326 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
327 tcg_gen_mov_i64(cpu_lock_value
, t0
);
330 static inline void gen_load_mem(DisasContext
*ctx
,
331 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
333 int ra
, int rb
, int32_t disp16
, bool fp
,
338 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
339 prefetches, which we can treat as nops. No worries about
340 missed exceptions here. */
341 if (unlikely(ra
== 31)) {
345 tmp
= tcg_temp_new();
346 addr
= load_gpr(ctx
, rb
);
349 tcg_gen_addi_i64(tmp
, addr
, disp16
);
353 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
357 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
358 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
363 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
365 TCGv_i32 tmp32
= tcg_temp_new_i32();
366 gen_helper_f_to_memory(tmp32
, t0
);
367 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
368 tcg_temp_free_i32(tmp32
);
371 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
373 TCGv tmp
= tcg_temp_new();
374 gen_helper_g_to_memory(tmp
, t0
);
375 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
379 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
381 TCGv_i32 tmp32
= tcg_temp_new_i32();
382 gen_helper_s_to_memory(tmp32
, t0
);
383 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
384 tcg_temp_free_i32(tmp32
);
387 static inline void gen_store_mem(DisasContext
*ctx
,
388 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
390 int ra
, int rb
, int32_t disp16
, bool fp
,
395 tmp
= tcg_temp_new();
396 addr
= load_gpr(ctx
, rb
);
399 tcg_gen_addi_i64(tmp
, addr
, disp16
);
403 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
407 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
408 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
413 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
414 int32_t disp16
, int mem_idx
,
417 TCGLabel
*lab_fail
, *lab_done
;
420 addr
= tcg_temp_new_i64();
421 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
422 free_context_temps(ctx
);
424 lab_fail
= gen_new_label();
425 lab_done
= gen_new_label();
426 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
427 tcg_temp_free_i64(addr
);
429 val
= tcg_temp_new_i64();
430 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
431 load_gpr(ctx
, ra
), mem_idx
, op
);
432 free_context_temps(ctx
);
435 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
437 tcg_temp_free_i64(val
);
438 tcg_gen_br(lab_done
);
440 gen_set_label(lab_fail
);
442 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
445 gen_set_label(lab_done
);
446 tcg_gen_movi_i64(cpu_lock_addr
, -1);
450 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
452 #ifndef CONFIG_USER_ONLY
453 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
454 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
455 && ((addr
>> 41) & 3) == 2);
461 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
463 /* Suppress goto_tb in the case of single-steping and IO. */
464 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
465 || ctx
->singlestep_enabled
|| singlestep
) {
468 #ifndef CONFIG_USER_ONLY
469 /* If the destination is in the superpage, the page perms can't change. */
470 if (in_superpage(ctx
, dest
)) {
473 /* Check for the dest on the same page as the start of the TB. */
474 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
480 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
482 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
485 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
488 /* Notice branch-to-next; used to initialize RA with the PC. */
491 } else if (use_goto_tb(ctx
, dest
)) {
493 tcg_gen_movi_i64(cpu_pc
, dest
);
494 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
497 tcg_gen_movi_i64(cpu_pc
, dest
);
498 return EXIT_PC_UPDATED
;
502 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
503 TCGv cmp
, int32_t disp
)
505 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
506 TCGLabel
*lab_true
= gen_new_label();
508 if (use_goto_tb(ctx
, dest
)) {
509 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
512 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
513 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
515 gen_set_label(lab_true
);
517 tcg_gen_movi_i64(cpu_pc
, dest
);
518 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
522 TCGv_i64 z
= tcg_const_i64(0);
523 TCGv_i64 d
= tcg_const_i64(dest
);
524 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
526 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
528 tcg_temp_free_i64(z
);
529 tcg_temp_free_i64(d
);
530 tcg_temp_free_i64(p
);
531 return EXIT_PC_UPDATED
;
535 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
536 int32_t disp
, int mask
)
541 cmp_tmp
= tcg_temp_new();
542 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
544 cmp_tmp
= load_gpr(ctx
, ra
);
547 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
550 /* Fold -0.0 for comparison with COND. */
552 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
554 uint64_t mzero
= 1ull << 63;
559 /* For <= or >, the -0.0 value directly compares the way we want. */
560 tcg_gen_mov_i64(dest
, src
);
565 /* For == or !=, we can simply mask off the sign bit and compare. */
566 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
571 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
572 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
573 tcg_gen_neg_i64(dest
, dest
);
574 tcg_gen_and_i64(dest
, dest
, src
);
582 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
585 TCGv cmp_tmp
= tcg_temp_new();
586 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
587 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
590 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
595 vb
= load_fpr(ctx
, rb
);
597 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
599 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
604 #define QUAL_RM_N 0x080 /* Round mode nearest even */
605 #define QUAL_RM_C 0x000 /* Round mode chopped */
606 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
607 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
608 #define QUAL_RM_MASK 0x0c0
610 #define QUAL_U 0x100 /* Underflow enable (fp output) */
611 #define QUAL_V 0x100 /* Overflow enable (int output) */
612 #define QUAL_S 0x400 /* Software completion enable */
613 #define QUAL_I 0x200 /* Inexact detection enable */
615 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
619 fn11
&= QUAL_RM_MASK
;
620 if (fn11
== ctx
->tb_rm
) {
625 tmp
= tcg_temp_new_i32();
628 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
631 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
634 tcg_gen_movi_i32(tmp
, float_round_down
);
637 tcg_gen_ld8u_i32(tmp
, cpu_env
,
638 offsetof(CPUAlphaState
, fpcr_dyn_round
));
642 #if defined(CONFIG_SOFTFLOAT_INLINE)
643 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
644 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
645 sets the one field. */
646 tcg_gen_st8_i32(tmp
, cpu_env
,
647 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
649 gen_helper_setroundmode(tmp
);
652 tcg_temp_free_i32(tmp
);
655 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
660 if (fn11
== ctx
->tb_ftz
) {
665 tmp
= tcg_temp_new_i32();
667 /* Underflow is enabled, use the FPCR setting. */
668 tcg_gen_ld8u_i32(tmp
, cpu_env
,
669 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
671 /* Underflow is disabled, force flush-to-zero. */
672 tcg_gen_movi_i32(tmp
, 1);
675 #if defined(CONFIG_SOFTFLOAT_INLINE)
676 tcg_gen_st8_i32(tmp
, cpu_env
,
677 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
679 gen_helper_setflushzero(tmp
);
682 tcg_temp_free_i32(tmp
);
685 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
689 if (unlikely(reg
== 31)) {
690 val
= load_zero(ctx
);
693 if ((fn11
& QUAL_S
) == 0) {
695 gen_helper_ieee_input_cmp(cpu_env
, val
);
697 gen_helper_ieee_input(cpu_env
, val
);
700 #ifndef CONFIG_USER_ONLY
701 /* In system mode, raise exceptions for denormals like real
702 hardware. In user mode, proceed as if the OS completion
703 handler is handling the denormal as per spec. */
704 gen_helper_ieee_input_s(cpu_env
, val
);
711 static void gen_fp_exc_raise(int rc
, int fn11
)
713 /* ??? We ought to be able to do something with imprecise exceptions.
714 E.g. notice we're still in the trap shadow of something within the
715 TB and do not generate the code to signal the exception; end the TB
716 when an exception is forced to arrive, either by consumption of a
717 register value or TRAPB or EXCB. */
721 if (!(fn11
& QUAL_U
)) {
722 /* Note that QUAL_U == QUAL_V, so ignore either. */
723 ignore
|= FPCR_UNF
| FPCR_IOV
;
725 if (!(fn11
& QUAL_I
)) {
728 ign
= tcg_const_i32(ignore
);
730 /* ??? Pass in the regno of the destination so that the helper can
731 set EXC_MASK, which contains a bitmask of destination registers
732 that have caused arithmetic traps. A simple userspace emulation
733 does not require this. We do need it for a guest kernel's entArith,
734 or if we were to do something clever with imprecise exceptions. */
735 reg
= tcg_const_i32(rc
+ 32);
737 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
739 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
742 tcg_temp_free_i32(reg
);
743 tcg_temp_free_i32(ign
);
746 static void gen_cvtlq(TCGv vc
, TCGv vb
)
748 TCGv tmp
= tcg_temp_new();
750 /* The arithmetic right shift here, plus the sign-extended mask below
751 yields a sign-extended result without an explicit ext32s_i64. */
752 tcg_gen_sari_i64(tmp
, vb
, 32);
753 tcg_gen_shri_i64(vc
, vb
, 29);
754 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
755 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
756 tcg_gen_or_i64(vc
, vc
, tmp
);
761 static void gen_ieee_arith2(DisasContext
*ctx
,
762 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
763 int rb
, int rc
, int fn11
)
767 gen_qual_roundmode(ctx
, fn11
);
768 gen_qual_flushzero(ctx
, fn11
);
770 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
771 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
773 gen_fp_exc_raise(rc
, fn11
);
776 #define IEEE_ARITH2(name) \
777 static inline void glue(gen_, name)(DisasContext *ctx, \
778 int rb, int rc, int fn11) \
780 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
787 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
791 /* No need to set flushzero, since we have an integer output. */
792 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
793 vc
= dest_fpr(ctx
, rc
);
795 /* Almost all integer conversions use cropped rounding;
796 special case that. */
797 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
798 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
800 gen_qual_roundmode(ctx
, fn11
);
801 gen_helper_cvttq(vc
, cpu_env
, vb
);
803 gen_fp_exc_raise(rc
, fn11
);
806 static void gen_ieee_intcvt(DisasContext
*ctx
,
807 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
808 int rb
, int rc
, int fn11
)
812 gen_qual_roundmode(ctx
, fn11
);
813 vb
= load_fpr(ctx
, rb
);
814 vc
= dest_fpr(ctx
, rc
);
816 /* The only exception that can be raised by integer conversion
817 is inexact. Thus we only need to worry about exceptions when
818 inexact handling is requested. */
820 helper(vc
, cpu_env
, vb
);
821 gen_fp_exc_raise(rc
, fn11
);
823 helper(vc
, cpu_env
, vb
);
827 #define IEEE_INTCVT(name) \
828 static inline void glue(gen_, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
831 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
836 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
838 TCGv vmask
= tcg_const_i64(mask
);
839 TCGv tmp
= tcg_temp_new_i64();
842 tcg_gen_andc_i64(tmp
, vmask
, va
);
844 tcg_gen_and_i64(tmp
, va
, vmask
);
847 tcg_gen_andc_i64(vc
, vb
, vmask
);
848 tcg_gen_or_i64(vc
, vc
, tmp
);
850 tcg_temp_free(vmask
);
854 static void gen_ieee_arith3(DisasContext
*ctx
,
855 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
856 int ra
, int rb
, int rc
, int fn11
)
860 gen_qual_roundmode(ctx
, fn11
);
861 gen_qual_flushzero(ctx
, fn11
);
863 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
864 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
865 vc
= dest_fpr(ctx
, rc
);
866 helper(vc
, cpu_env
, va
, vb
);
868 gen_fp_exc_raise(rc
, fn11
);
871 #define IEEE_ARITH3(name) \
872 static inline void glue(gen_, name)(DisasContext *ctx, \
873 int ra, int rb, int rc, int fn11) \
875 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
886 static void gen_ieee_compare(DisasContext
*ctx
,
887 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
888 int ra
, int rb
, int rc
, int fn11
)
892 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
893 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
894 vc
= dest_fpr(ctx
, rc
);
895 helper(vc
, cpu_env
, va
, vb
);
897 gen_fp_exc_raise(rc
, fn11
);
900 #define IEEE_CMP3(name) \
901 static inline void glue(gen_, name)(DisasContext *ctx, \
902 int ra, int rb, int rc, int fn11) \
904 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
911 static inline uint64_t zapnot_mask(uint8_t lit
)
916 for (i
= 0; i
< 8; ++i
) {
917 if ((lit
>> i
) & 1) {
918 mask
|= 0xffull
<< (i
* 8);
924 /* Implement zapnot with an immediate operand, which expands to some
925 form of immediate AND. This is a basic building block in the
926 definition of many of the other byte manipulation instructions. */
927 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
931 tcg_gen_movi_i64(dest
, 0);
934 tcg_gen_ext8u_i64(dest
, src
);
937 tcg_gen_ext16u_i64(dest
, src
);
940 tcg_gen_ext32u_i64(dest
, src
);
943 tcg_gen_mov_i64(dest
, src
);
946 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
951 /* EXTWH, EXTLH, EXTQH */
952 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
953 uint8_t lit
, uint8_t byte_mask
)
956 int pos
= (64 - lit
* 8) & 0x3f;
957 int len
= cto32(byte_mask
) * 8;
959 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
961 tcg_gen_movi_i64(vc
, 0);
964 TCGv tmp
= tcg_temp_new();
965 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
966 tcg_gen_neg_i64(tmp
, tmp
);
967 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
968 tcg_gen_shl_i64(vc
, va
, tmp
);
971 gen_zapnoti(vc
, vc
, byte_mask
);
974 /* EXTBL, EXTWL, EXTLL, EXTQL */
975 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
976 uint8_t lit
, uint8_t byte_mask
)
979 int pos
= (lit
& 7) * 8;
980 int len
= cto32(byte_mask
) * 8;
981 if (pos
+ len
>= 64) {
984 tcg_gen_extract_i64(vc
, va
, pos
, len
);
986 TCGv tmp
= tcg_temp_new();
987 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
988 tcg_gen_shli_i64(tmp
, tmp
, 3);
989 tcg_gen_shr_i64(vc
, va
, tmp
);
991 gen_zapnoti(vc
, vc
, byte_mask
);
995 /* INSWH, INSLH, INSQH */
996 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
997 uint8_t lit
, uint8_t byte_mask
)
1000 int pos
= 64 - (lit
& 7) * 8;
1001 int len
= cto32(byte_mask
) * 8;
1003 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
1005 tcg_gen_movi_i64(vc
, 0);
1008 TCGv tmp
= tcg_temp_new();
1009 TCGv shift
= tcg_temp_new();
1011 /* The instruction description has us left-shift the byte mask
1012 and extract bits <15:8> and apply that zap at the end. This
1013 is equivalent to simply performing the zap first and shifting
1015 gen_zapnoti(tmp
, va
, byte_mask
);
1017 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1018 portably by splitting the shift into two parts: shift_count-1 and 1.
1019 Arrange for the -1 by using ones-complement instead of
1020 twos-complement in the negation: ~(B * 8) & 63. */
1022 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1023 tcg_gen_not_i64(shift
, shift
);
1024 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1026 tcg_gen_shr_i64(vc
, tmp
, shift
);
1027 tcg_gen_shri_i64(vc
, vc
, 1);
1028 tcg_temp_free(shift
);
1033 /* INSBL, INSWL, INSLL, INSQL */
1034 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1035 uint8_t lit
, uint8_t byte_mask
)
1038 int pos
= (lit
& 7) * 8;
1039 int len
= cto32(byte_mask
) * 8;
1040 if (pos
+ len
> 64) {
1043 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1045 TCGv tmp
= tcg_temp_new();
1046 TCGv shift
= tcg_temp_new();
1048 /* The instruction description has us left-shift the byte mask
1049 and extract bits <15:8> and apply that zap at the end. This
1050 is equivalent to simply performing the zap first and shifting
1052 gen_zapnoti(tmp
, va
, byte_mask
);
1054 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1055 tcg_gen_shli_i64(shift
, shift
, 3);
1056 tcg_gen_shl_i64(vc
, tmp
, shift
);
1057 tcg_temp_free(shift
);
1062 /* MSKWH, MSKLH, MSKQH */
1063 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1064 uint8_t lit
, uint8_t byte_mask
)
1067 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1069 TCGv shift
= tcg_temp_new();
1070 TCGv mask
= tcg_temp_new();
1072 /* The instruction description is as above, where the byte_mask
1073 is shifted left, and then we extract bits <15:8>. This can be
1074 emulated with a right-shift on the expanded byte mask. This
1075 requires extra care because for an input <2:0> == 0 we need a
1076 shift of 64 bits in order to generate a zero. This is done by
1077 splitting the shift into two parts, the variable shift - 1
1078 followed by a constant 1 shift. The code we expand below is
1079 equivalent to ~(B * 8) & 63. */
1081 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1082 tcg_gen_not_i64(shift
, shift
);
1083 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1084 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1085 tcg_gen_shr_i64(mask
, mask
, shift
);
1086 tcg_gen_shri_i64(mask
, mask
, 1);
1088 tcg_gen_andc_i64(vc
, va
, mask
);
1090 tcg_temp_free(mask
);
1091 tcg_temp_free(shift
);
1095 /* MSKBL, MSKWL, MSKLL, MSKQL */
1096 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1097 uint8_t lit
, uint8_t byte_mask
)
1100 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1102 TCGv shift
= tcg_temp_new();
1103 TCGv mask
= tcg_temp_new();
1105 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1106 tcg_gen_shli_i64(shift
, shift
, 3);
1107 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1108 tcg_gen_shl_i64(mask
, mask
, shift
);
1110 tcg_gen_andc_i64(vc
, va
, mask
);
1112 tcg_temp_free(mask
);
1113 tcg_temp_free(shift
);
1117 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1122 tcg_gen_ld8u_i64(ctx
->ir
[ra
], cpu_env
,
1123 offsetof(CPUAlphaState
, intr_flag
));
1126 tmp
= tcg_const_i32(set
);
1127 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1128 tcg_temp_free_i32(tmp
);
1131 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1133 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1134 to internal cpu registers. */
1136 /* Unprivileged PAL call */
1137 if (palcode
>= 0x80 && palcode
< 0xC0) {
1141 /* No-op inside QEMU. */
1145 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1146 offsetof(CPUAlphaState
, unique
));
1150 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1151 offsetof(CPUAlphaState
, unique
));
1160 #ifndef CONFIG_USER_ONLY
1161 /* Privileged PAL code */
1162 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1167 /* No-op inside QEMU. */
1171 /* No-op inside QEMU. */
1175 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1176 offsetof(CPUAlphaState
, vptptr
));
1180 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1181 offsetof(CPUAlphaState
, sysval
));
1185 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1186 offsetof(CPUAlphaState
, sysval
));
1191 /* Note that we already know we're in kernel mode, so we know
1192 that PS only contains the 3 IPL bits. */
1193 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1194 offsetof(CPUAlphaState
, ps
));
1196 /* But make sure and store only the 3 IPL bits from the user. */
1197 tmp
= tcg_temp_new();
1198 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1199 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1205 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1206 offsetof(CPUAlphaState
, ps
));
1210 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1211 offsetof(CPUAlphaState
, usp
));
1215 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1216 offsetof(CPUAlphaState
, usp
));
1220 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1221 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1226 tmp
= tcg_const_i64(1);
1227 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1228 offsetof(CPUState
, halted
));
1229 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1230 return gen_excp(ctx
, EXCP_HALTED
, 0);
1239 return gen_invalid(ctx
);
1242 #ifdef CONFIG_USER_ONLY
1243 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1246 TCGv tmp
= tcg_temp_new();
1247 uint64_t exc_addr
= ctx
->pc
;
1248 uint64_t entry
= ctx
->palbr
;
1250 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
1253 tcg_gen_movi_i64(tmp
, 1);
1254 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
1257 tcg_gen_movi_i64(tmp
, exc_addr
);
1258 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1261 entry
+= (palcode
& 0x80
1262 ? 0x2000 + (palcode
- 0x80) * 64
1263 : 0x1000 + palcode
* 64);
1265 /* Since the destination is running in PALmode, we don't really
1266 need the page permissions check. We'll see the existence of
1267 the page when we create the TB, and we'll flush all TBs if
1268 we change the PAL base register. */
1269 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1271 tcg_gen_movi_i64(cpu_pc
, entry
);
1272 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1273 return EXIT_GOTO_TB
;
1275 tcg_gen_movi_i64(cpu_pc
, entry
);
1276 return EXIT_PC_UPDATED
;
1282 #ifndef CONFIG_USER_ONLY
1284 #define PR_BYTE 0x100000
1285 #define PR_LONG 0x200000
1287 static int cpu_pr_data(int pr
)
1290 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1291 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1292 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1293 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1294 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1295 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1296 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1297 case 7: return offsetof(CPUAlphaState
, palbr
);
1298 case 8: return offsetof(CPUAlphaState
, ptbr
);
1299 case 9: return offsetof(CPUAlphaState
, vptptr
);
1300 case 10: return offsetof(CPUAlphaState
, unique
);
1301 case 11: return offsetof(CPUAlphaState
, sysval
);
1302 case 12: return offsetof(CPUAlphaState
, usp
);
1305 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1308 return offsetof(CPUAlphaState
, alarm_expire
);
1313 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1315 void (*helper
)(TCGv
);
1320 /* Accessing the "non-shadow" general registers. */
1321 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1322 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1325 case 250: /* WALLTIME */
1326 helper
= gen_helper_get_walltime
;
1328 case 249: /* VMTIME */
1329 helper
= gen_helper_get_vmtime
;
1335 return EXIT_PC_STALE
;
1342 /* The basic registers are data only, and unknown registers
1343 are read-zero, write-ignore. */
1344 data
= cpu_pr_data(regno
);
1346 tcg_gen_movi_i64(va
, 0);
1347 } else if (data
& PR_BYTE
) {
1348 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1349 } else if (data
& PR_LONG
) {
1350 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1352 tcg_gen_ld_i64(va
, cpu_env
, data
);
1360 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1368 gen_helper_tbia(cpu_env
);
1373 gen_helper_tbis(cpu_env
, vb
);
1378 tmp
= tcg_const_i64(1);
1379 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1380 offsetof(CPUState
, halted
));
1381 return gen_excp(ctx
, EXCP_HALTED
, 0);
1385 gen_helper_halt(vb
);
1386 return EXIT_PC_STALE
;
1390 gen_helper_set_alarm(cpu_env
, vb
);
1395 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1396 /* Changing the PAL base register implies un-chaining all of the TBs
1397 that ended with a CALL_PAL. Since the base register usually only
1398 changes during boot, flushing everything works well. */
1399 gen_helper_tb_flush(cpu_env
);
1400 return EXIT_PC_STALE
;
1403 /* Accessing the "non-shadow" general registers. */
1404 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1405 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1409 /* The basic registers are data only, and unknown registers
1410 are read-zero, write-ignore. */
1411 data
= cpu_pr_data(regno
);
1413 if (data
& PR_BYTE
) {
1414 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1415 } else if (data
& PR_LONG
) {
1416 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1418 tcg_gen_st_i64(vb
, cpu_env
, data
);
1426 #endif /* !USER_ONLY*/
1428 #define REQUIRE_NO_LIT \
1435 #define REQUIRE_TB_FLAG(FLAG) \
1437 if ((ctx->tb->flags & (FLAG)) == 0) { \
1442 #define REQUIRE_REG_31(WHICH) \
1444 if (WHICH != 31) { \
1449 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1451 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1453 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1454 bool islit
, real_islit
;
1455 TCGv va
, vb
, vc
, tmp
, tmp2
;
1459 /* Decode all instruction fields */
1460 opc
= extract32(insn
, 26, 6);
1461 ra
= extract32(insn
, 21, 5);
1462 rb
= extract32(insn
, 16, 5);
1463 rc
= extract32(insn
, 0, 5);
1464 real_islit
= islit
= extract32(insn
, 12, 1);
1465 lit
= extract32(insn
, 13, 8);
1467 disp21
= sextract32(insn
, 0, 21);
1468 disp16
= sextract32(insn
, 0, 16);
1469 disp12
= sextract32(insn
, 0, 12);
1471 fn11
= extract32(insn
, 5, 11);
1472 fpfn
= extract32(insn
, 5, 6);
1473 fn7
= extract32(insn
, 5, 7);
1475 if (rb
== 31 && !islit
) {
1484 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1510 disp16
= (uint32_t)disp16
<< 16;
1514 va
= dest_gpr(ctx
, ra
);
1515 /* It's worth special-casing immediate loads. */
1517 tcg_gen_movi_i64(va
, disp16
);
1519 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1526 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1530 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1534 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1535 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1539 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1540 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1544 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1545 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1549 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1553 vc
= dest_gpr(ctx
, rc
);
1554 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1558 /* Special case ADDL as SEXTL. */
1559 tcg_gen_ext32s_i64(vc
, vb
);
1563 /* Special case SUBQ as NEGQ. */
1564 tcg_gen_neg_i64(vc
, vb
);
1569 va
= load_gpr(ctx
, ra
);
1573 tcg_gen_add_i64(vc
, va
, vb
);
1574 tcg_gen_ext32s_i64(vc
, vc
);
1578 tmp
= tcg_temp_new();
1579 tcg_gen_shli_i64(tmp
, va
, 2);
1580 tcg_gen_add_i64(tmp
, tmp
, vb
);
1581 tcg_gen_ext32s_i64(vc
, tmp
);
1586 tcg_gen_sub_i64(vc
, va
, vb
);
1587 tcg_gen_ext32s_i64(vc
, vc
);
1591 tmp
= tcg_temp_new();
1592 tcg_gen_shli_i64(tmp
, va
, 2);
1593 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1594 tcg_gen_ext32s_i64(vc
, tmp
);
1600 /* Special case 0 >= X as X == 0. */
1601 gen_helper_cmpbe0(vc
, vb
);
1603 gen_helper_cmpbge(vc
, va
, vb
);
1608 tmp
= tcg_temp_new();
1609 tcg_gen_shli_i64(tmp
, va
, 3);
1610 tcg_gen_add_i64(tmp
, tmp
, vb
);
1611 tcg_gen_ext32s_i64(vc
, tmp
);
1616 tmp
= tcg_temp_new();
1617 tcg_gen_shli_i64(tmp
, va
, 3);
1618 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1619 tcg_gen_ext32s_i64(vc
, tmp
);
1624 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1628 tcg_gen_add_i64(vc
, va
, vb
);
1632 tmp
= tcg_temp_new();
1633 tcg_gen_shli_i64(tmp
, va
, 2);
1634 tcg_gen_add_i64(vc
, tmp
, vb
);
1639 tcg_gen_sub_i64(vc
, va
, vb
);
1643 tmp
= tcg_temp_new();
1644 tcg_gen_shli_i64(tmp
, va
, 2);
1645 tcg_gen_sub_i64(vc
, tmp
, vb
);
1650 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1654 tmp
= tcg_temp_new();
1655 tcg_gen_shli_i64(tmp
, va
, 3);
1656 tcg_gen_add_i64(vc
, tmp
, vb
);
1661 tmp
= tcg_temp_new();
1662 tcg_gen_shli_i64(tmp
, va
, 3);
1663 tcg_gen_sub_i64(vc
, tmp
, vb
);
1668 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1672 tmp
= tcg_temp_new();
1673 tcg_gen_ext32s_i64(tmp
, va
);
1674 tcg_gen_ext32s_i64(vc
, vb
);
1675 tcg_gen_add_i64(tmp
, tmp
, vc
);
1676 tcg_gen_ext32s_i64(vc
, tmp
);
1677 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1682 tmp
= tcg_temp_new();
1683 tcg_gen_ext32s_i64(tmp
, va
);
1684 tcg_gen_ext32s_i64(vc
, vb
);
1685 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1686 tcg_gen_ext32s_i64(vc
, tmp
);
1687 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1692 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1696 tmp
= tcg_temp_new();
1697 tmp2
= tcg_temp_new();
1698 tcg_gen_eqv_i64(tmp
, va
, vb
);
1699 tcg_gen_mov_i64(tmp2
, va
);
1700 tcg_gen_add_i64(vc
, va
, vb
);
1701 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1702 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1703 tcg_gen_shri_i64(tmp
, tmp
, 63);
1704 tcg_gen_movi_i64(tmp2
, 0);
1705 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1707 tcg_temp_free(tmp2
);
1711 tmp
= tcg_temp_new();
1712 tmp2
= tcg_temp_new();
1713 tcg_gen_xor_i64(tmp
, va
, vb
);
1714 tcg_gen_mov_i64(tmp2
, va
);
1715 tcg_gen_sub_i64(vc
, va
, vb
);
1716 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1717 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1718 tcg_gen_shri_i64(tmp
, tmp
, 63);
1719 tcg_gen_movi_i64(tmp2
, 0);
1720 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1722 tcg_temp_free(tmp2
);
1726 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1736 /* Special case BIS as NOP. */
1740 /* Special case BIS as MOV. */
1741 vc
= dest_gpr(ctx
, rc
);
1743 tcg_gen_movi_i64(vc
, lit
);
1745 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1751 vc
= dest_gpr(ctx
, rc
);
1752 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1754 if (fn7
== 0x28 && ra
== 31) {
1755 /* Special case ORNOT as NOT. */
1756 tcg_gen_not_i64(vc
, vb
);
1760 va
= load_gpr(ctx
, ra
);
1764 tcg_gen_and_i64(vc
, va
, vb
);
1768 tcg_gen_andc_i64(vc
, va
, vb
);
1772 tmp
= tcg_temp_new();
1773 tcg_gen_andi_i64(tmp
, va
, 1);
1774 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1775 vb
, load_gpr(ctx
, rc
));
1780 tmp
= tcg_temp_new();
1781 tcg_gen_andi_i64(tmp
, va
, 1);
1782 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1783 vb
, load_gpr(ctx
, rc
));
1788 tcg_gen_or_i64(vc
, va
, vb
);
1792 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1793 vb
, load_gpr(ctx
, rc
));
1797 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1798 vb
, load_gpr(ctx
, rc
));
1802 tcg_gen_orc_i64(vc
, va
, vb
);
1806 tcg_gen_xor_i64(vc
, va
, vb
);
1810 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1811 vb
, load_gpr(ctx
, rc
));
1815 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1816 vb
, load_gpr(ctx
, rc
));
1820 tcg_gen_eqv_i64(vc
, va
, vb
);
1826 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1827 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1832 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1833 vb
, load_gpr(ctx
, rc
));
1837 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1838 vb
, load_gpr(ctx
, rc
));
1843 tcg_gen_movi_i64(vc
, ctx
->implver
);
1851 vc
= dest_gpr(ctx
, rc
);
1852 va
= load_gpr(ctx
, ra
);
1856 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1860 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1864 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1868 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1872 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1876 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1880 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1884 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1888 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1893 gen_zapnoti(vc
, va
, ~lit
);
1895 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1901 gen_zapnoti(vc
, va
, lit
);
1903 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1908 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1913 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1915 tmp
= tcg_temp_new();
1916 vb
= load_gpr(ctx
, rb
);
1917 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1918 tcg_gen_shr_i64(vc
, va
, tmp
);
1924 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1929 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1931 tmp
= tcg_temp_new();
1932 vb
= load_gpr(ctx
, rb
);
1933 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1934 tcg_gen_shl_i64(vc
, va
, tmp
);
1940 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1945 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1947 tmp
= tcg_temp_new();
1948 vb
= load_gpr(ctx
, rb
);
1949 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1950 tcg_gen_sar_i64(vc
, va
, tmp
);
1956 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1960 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1964 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1968 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1972 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1976 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1980 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1984 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1988 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1996 vc
= dest_gpr(ctx
, rc
);
1997 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1998 va
= load_gpr(ctx
, ra
);
2002 tcg_gen_mul_i64(vc
, va
, vb
);
2003 tcg_gen_ext32s_i64(vc
, vc
);
2007 tcg_gen_mul_i64(vc
, va
, vb
);
2011 tmp
= tcg_temp_new();
2012 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
2017 tmp
= tcg_temp_new();
2018 tcg_gen_ext32s_i64(tmp
, va
);
2019 tcg_gen_ext32s_i64(vc
, vb
);
2020 tcg_gen_mul_i64(tmp
, tmp
, vc
);
2021 tcg_gen_ext32s_i64(vc
, tmp
);
2022 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2027 tmp
= tcg_temp_new();
2028 tmp2
= tcg_temp_new();
2029 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2030 tcg_gen_sari_i64(tmp2
, vc
, 63);
2031 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2033 tcg_temp_free(tmp2
);
2041 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2042 vc
= dest_fpr(ctx
, rc
);
2043 switch (fpfn
) { /* fn11 & 0x3F */
2047 t32
= tcg_temp_new_i32();
2048 va
= load_gpr(ctx
, ra
);
2049 tcg_gen_extrl_i64_i32(t32
, va
);
2050 gen_helper_memory_to_s(vc
, t32
);
2051 tcg_temp_free_i32(t32
);
2056 vb
= load_fpr(ctx
, rb
);
2057 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2062 gen_sqrts(ctx
, rb
, rc
, fn11
);
2067 t32
= tcg_temp_new_i32();
2068 va
= load_gpr(ctx
, ra
);
2069 tcg_gen_extrl_i64_i32(t32
, va
);
2070 gen_helper_memory_to_f(vc
, t32
);
2071 tcg_temp_free_i32(t32
);
2076 va
= load_gpr(ctx
, ra
);
2077 tcg_gen_mov_i64(vc
, va
);
2082 vb
= load_fpr(ctx
, rb
);
2083 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2088 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2096 /* VAX floating point */
2097 /* XXX: rounding mode and trap are ignored (!) */
2098 vc
= dest_fpr(ctx
, rc
);
2099 vb
= load_fpr(ctx
, rb
);
2100 va
= load_fpr(ctx
, ra
);
2101 switch (fpfn
) { /* fn11 & 0x3F */
2104 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2108 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2112 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2116 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2124 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2128 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2132 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2136 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2140 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2144 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2148 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2153 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2162 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2167 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2172 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2180 /* IEEE floating-point */
2181 switch (fpfn
) { /* fn11 & 0x3F */
2184 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2188 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2192 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2196 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2200 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2204 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2208 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2212 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2216 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2220 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2224 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2228 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2232 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2234 gen_cvtst(ctx
, rb
, rc
, fn11
);
2237 gen_cvtts(ctx
, rb
, rc
, fn11
);
2243 gen_cvttq(ctx
, rb
, rc
, fn11
);
2248 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2253 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2265 vc
= dest_fpr(ctx
, rc
);
2266 vb
= load_fpr(ctx
, rb
);
2272 /* Special case CPYS as FNOP. */
2274 vc
= dest_fpr(ctx
, rc
);
2275 va
= load_fpr(ctx
, ra
);
2277 /* Special case CPYS as FMOV. */
2278 tcg_gen_mov_i64(vc
, va
);
2280 vb
= load_fpr(ctx
, rb
);
2281 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2287 vc
= dest_fpr(ctx
, rc
);
2288 vb
= load_fpr(ctx
, rb
);
2289 va
= load_fpr(ctx
, ra
);
2290 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2294 vc
= dest_fpr(ctx
, rc
);
2295 vb
= load_fpr(ctx
, rb
);
2296 va
= load_fpr(ctx
, ra
);
2297 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2301 va
= load_fpr(ctx
, ra
);
2302 gen_helper_store_fpcr(cpu_env
, va
);
2303 if (ctx
->tb_rm
== QUAL_RM_D
) {
2304 /* Re-do the copy of the rounding mode to fp_status
2305 the next time we use dynamic rounding. */
2311 va
= dest_fpr(ctx
, ra
);
2312 gen_helper_load_fpcr(va
, cpu_env
);
2316 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2320 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2324 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2328 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2332 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2336 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2338 case 0x030: /* CVTQL */
2339 case 0x130: /* CVTQL/V */
2340 case 0x530: /* CVTQL/SV */
2342 vc
= dest_fpr(ctx
, rc
);
2343 vb
= load_fpr(ctx
, rb
);
2344 gen_helper_cvtql(vc
, cpu_env
, vb
);
2345 gen_fp_exc_raise(rc
, fn11
);
2353 switch ((uint16_t)disp16
) {
2364 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2368 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2380 va
= dest_gpr(ctx
, ra
);
2381 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2383 gen_helper_load_pcc(va
, cpu_env
);
2385 ret
= EXIT_PC_STALE
;
2387 gen_helper_load_pcc(va
, cpu_env
);
2415 /* HW_MFPR (PALcode) */
2416 #ifndef CONFIG_USER_ONLY
2417 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2418 va
= dest_gpr(ctx
, ra
);
2419 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2426 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2427 prediction stack action, which of course we don't implement. */
2428 vb
= load_gpr(ctx
, rb
);
2429 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2431 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2433 ret
= EXIT_PC_UPDATED
;
2437 /* HW_LD (PALcode) */
2438 #ifndef CONFIG_USER_ONLY
2439 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2441 TCGv addr
= tcg_temp_new();
2442 vb
= load_gpr(ctx
, rb
);
2443 va
= dest_gpr(ctx
, ra
);
2445 tcg_gen_addi_i64(addr
, vb
, disp12
);
2446 switch ((insn
>> 12) & 0xF) {
2448 /* Longword physical access (hw_ldl/p) */
2449 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2452 /* Quadword physical access (hw_ldq/p) */
2453 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2456 /* Longword physical access with lock (hw_ldl_l/p) */
2457 gen_qemu_ldl_l(va
, addr
, MMU_PHYS_IDX
);
2460 /* Quadword physical access with lock (hw_ldq_l/p) */
2461 gen_qemu_ldq_l(va
, addr
, MMU_PHYS_IDX
);
2464 /* Longword virtual PTE fetch (hw_ldl/v) */
2467 /* Quadword virtual PTE fetch (hw_ldq/v) */
2477 /* Longword virtual access (hw_ldl) */
2480 /* Quadword virtual access (hw_ldq) */
2483 /* Longword virtual access with protection check (hw_ldl/w) */
2484 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2487 /* Quadword virtual access with protection check (hw_ldq/w) */
2488 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2491 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2494 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2497 /* Longword virtual access with alternate access mode and
2498 protection checks (hw_ldl/wa) */
2499 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2502 /* Quadword virtual access with alternate access mode and
2503 protection checks (hw_ldq/wa) */
2504 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2507 tcg_temp_free(addr
);
2515 vc
= dest_gpr(ctx
, rc
);
2518 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2520 va
= load_fpr(ctx
, ra
);
2521 tcg_gen_mov_i64(vc
, va
);
2523 } else if (fn7
== 0x78) {
2525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2527 t32
= tcg_temp_new_i32();
2528 va
= load_fpr(ctx
, ra
);
2529 gen_helper_s_to_memory(t32
, va
);
2530 tcg_gen_ext_i32_i64(vc
, t32
);
2531 tcg_temp_free_i32(t32
);
2535 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2539 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2541 tcg_gen_ext8s_i64(vc
, vb
);
2545 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2547 tcg_gen_ext16s_i64(vc
, vb
);
2551 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2554 tcg_gen_ctpop_i64(vc
, vb
);
2558 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2560 va
= load_gpr(ctx
, ra
);
2561 gen_helper_perr(vc
, va
, vb
);
2565 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2568 tcg_gen_clzi_i64(vc
, vb
, 64);
2572 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2575 tcg_gen_ctzi_i64(vc
, vb
, 64);
2579 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2582 gen_helper_unpkbw(vc
, vb
);
2586 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2589 gen_helper_unpkbl(vc
, vb
);
2593 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2596 gen_helper_pkwb(vc
, vb
);
2600 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2603 gen_helper_pklb(vc
, vb
);
2607 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2608 va
= load_gpr(ctx
, ra
);
2609 gen_helper_minsb8(vc
, va
, vb
);
2613 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2614 va
= load_gpr(ctx
, ra
);
2615 gen_helper_minsw4(vc
, va
, vb
);
2619 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2620 va
= load_gpr(ctx
, ra
);
2621 gen_helper_minub8(vc
, va
, vb
);
2625 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2626 va
= load_gpr(ctx
, ra
);
2627 gen_helper_minuw4(vc
, va
, vb
);
2631 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2632 va
= load_gpr(ctx
, ra
);
2633 gen_helper_maxub8(vc
, va
, vb
);
2637 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2638 va
= load_gpr(ctx
, ra
);
2639 gen_helper_maxuw4(vc
, va
, vb
);
2643 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2644 va
= load_gpr(ctx
, ra
);
2645 gen_helper_maxsb8(vc
, va
, vb
);
2649 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2650 va
= load_gpr(ctx
, ra
);
2651 gen_helper_maxsw4(vc
, va
, vb
);
2659 /* HW_MTPR (PALcode) */
2660 #ifndef CONFIG_USER_ONLY
2661 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2662 vb
= load_gpr(ctx
, rb
);
2663 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2670 /* HW_RET (PALcode) */
2671 #ifndef CONFIG_USER_ONLY
2672 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2674 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2675 address from EXC_ADDR. This turns out to be useful for our
2676 emulation PALcode, so continue to accept it. */
2677 ctx
->lit
= vb
= tcg_temp_new();
2678 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2680 vb
= load_gpr(ctx
, rb
);
2682 tmp
= tcg_temp_new();
2683 tcg_gen_movi_i64(tmp
, 0);
2684 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
2685 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2686 tcg_gen_andi_i64(tmp
, vb
, 1);
2687 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
2688 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2689 ret
= EXIT_PC_UPDATED
;
2696 /* HW_ST (PALcode) */
2697 #ifndef CONFIG_USER_ONLY
2698 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2700 switch ((insn
>> 12) & 0xF) {
2702 /* Longword physical access */
2703 va
= load_gpr(ctx
, ra
);
2704 vb
= load_gpr(ctx
, rb
);
2705 tmp
= tcg_temp_new();
2706 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2707 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2711 /* Quadword physical access */
2712 va
= load_gpr(ctx
, ra
);
2713 vb
= load_gpr(ctx
, rb
);
2714 tmp
= tcg_temp_new();
2715 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2716 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2720 /* Longword physical access with lock */
2721 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2722 MMU_PHYS_IDX
, MO_LESL
);
2725 /* Quadword physical access with lock */
2726 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2727 MMU_PHYS_IDX
, MO_LEQ
);
2730 /* Longword virtual access */
2733 /* Quadword virtual access */
2754 /* Longword virtual access with alternate access mode */
2757 /* Quadword virtual access with alternate access mode */
2773 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2777 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2781 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2785 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2789 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2793 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2797 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2801 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2805 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2809 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2813 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2817 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2821 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2825 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2829 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2830 ctx
->mem_idx
, MO_LESL
);
2834 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2835 ctx
->mem_idx
, MO_LEQ
);
2839 ret
= gen_bdirect(ctx
, ra
, disp21
);
2841 case 0x31: /* FBEQ */
2842 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2844 case 0x32: /* FBLT */
2845 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2847 case 0x33: /* FBLE */
2848 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2852 ret
= gen_bdirect(ctx
, ra
, disp21
);
2854 case 0x35: /* FBNE */
2855 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2857 case 0x36: /* FBGE */
2858 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2860 case 0x37: /* FBGT */
2861 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2865 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2869 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2873 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2877 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2881 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2885 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2889 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2893 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2896 ret
= gen_invalid(ctx
);
2903 void gen_intermediate_code(CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2905 AlphaCPU
*cpu
= alpha_env_get_cpu(env
);
2906 CPUState
*cs
= CPU(cpu
);
2907 DisasContext ctx
, *ctxp
= &ctx
;
2908 target_ulong pc_start
;
2909 target_ulong pc_mask
;
2919 ctx
.mem_idx
= cpu_mmu_index(env
, false);
2920 ctx
.implver
= env
->implver
;
2921 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2923 #ifdef CONFIG_USER_ONLY
2924 ctx
.ir
= cpu_std_ir
;
2926 ctx
.palbr
= env
->palbr
;
2927 ctx
.ir
= (tb
->flags
& TB_FLAGS_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2930 /* ??? Every TB begins with unset rounding mode, to be initialized on
2931 the first fp insn of the TB. Alternately we could define a proper
2932 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2933 to reset the FP_STATUS to that default at the end of any TB that
2934 changes the default. We could even (gasp) dynamiclly figure out
2935 what default would be most efficient given the running program. */
2937 /* Similarly for flush-to-zero. */
2940 TCGV_UNUSED_I64(ctx
.zero
);
2941 TCGV_UNUSED_I64(ctx
.sink
);
2942 TCGV_UNUSED_I64(ctx
.lit
);
2945 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2946 if (max_insns
== 0) {
2947 max_insns
= CF_COUNT_MASK
;
2949 if (max_insns
> TCG_MAX_INSNS
) {
2950 max_insns
= TCG_MAX_INSNS
;
2953 if (in_superpage(&ctx
, pc_start
)) {
2954 pc_mask
= (1ULL << 41) - 1;
2956 pc_mask
= ~TARGET_PAGE_MASK
;
2961 tcg_gen_insn_start(ctx
.pc
);
2964 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
2965 ret
= gen_excp(&ctx
, EXCP_DEBUG
, 0);
2966 /* The address covered by the breakpoint must be included in
2967 [tb->pc, tb->pc + tb->size) in order to for it to be
2968 properly cleared -- thus we increment the PC here so that
2969 the logic setting tb->size below does the right thing. */
2973 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2976 insn
= cpu_ldl_code(env
, ctx
.pc
);
2979 ret
= translate_one(ctxp
, insn
);
2980 free_context_temps(ctxp
);
2982 /* If we reach a page boundary, are single stepping,
2983 or exhaust instruction count, stop generation. */
2985 && ((ctx
.pc
& pc_mask
) == 0
2986 || tcg_op_buf_full()
2987 || num_insns
>= max_insns
2989 || ctx
.singlestep_enabled
)) {
2990 ret
= EXIT_FALLTHRU
;
2992 } while (ret
== NO_EXIT
);
2994 if (tb
->cflags
& CF_LAST_IO
) {
3003 if (use_goto_tb(&ctx
, ctx
.pc
)) {
3005 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3006 tcg_gen_exit_tb((uintptr_t)ctx
.tb
);
3010 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3012 case EXIT_PC_UPDATED
:
3013 if (ctx
.singlestep_enabled
) {
3014 gen_excp_1(EXCP_DEBUG
, 0);
3020 g_assert_not_reached();
3023 gen_tb_end(tb
, num_insns
);
3025 tb
->size
= ctx
.pc
- pc_start
;
3026 tb
->icount
= num_insns
;
3029 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
3030 && qemu_log_in_addr_range(pc_start
)) {
3032 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3033 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
3040 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,