2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
33 #undef ALPHA_DEBUG_DISAS
34 #define CONFIG_SOFTFLOAT_INLINE
36 #ifdef ALPHA_DEBUG_DISAS
37 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 # define LOG_DISAS(...) do { } while (0)
42 typedef struct DisasContext DisasContext
;
44 struct TranslationBlock
*tb
;
46 #ifndef CONFIG_USER_ONLY
51 /* Current rounding mode for this TB. */
53 /* Current flush-to-zero setting for this TB. */
56 /* implver value for this CPU. */
59 /* The set of registers active in the current context. */
62 /* Temporaries for $31 and $f31 as source and destination. */
65 /* Temporary for immediate constants. */
68 bool singlestep_enabled
;
71 /* Return values from translate_one, indicating the state of the TB.
72 Note that zero indicates that we are not exiting the TB. */
77 /* We have emitted one or more goto_tb. No fixup required. */
80 /* We are not using a goto_tb (for whatever reason), but have updated
81 the PC (for whatever reason), so there's no need to do it again on
85 /* We are exiting the TB, but have neither emitted a goto_tb, nor
86 updated the PC for the next instruction to be executed. */
89 /* We are ending the TB with a noreturn function call, e.g. longjmp.
90 No following code will be executed. */
94 /* global register indexes */
95 static TCGv_ptr cpu_env
;
96 static TCGv cpu_std_ir
[31];
97 static TCGv cpu_fir
[31];
99 static TCGv cpu_lock_addr
;
100 static TCGv cpu_lock_st_addr
;
101 static TCGv cpu_lock_value
;
103 #ifndef CONFIG_USER_ONLY
104 static TCGv cpu_pal_ir
[31];
107 #include "exec/gen-icount.h"
109 void alpha_translate_init(void)
111 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
113 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
114 static const GlobalVar vars
[] = {
117 DEF_VAR(lock_st_addr
),
123 /* Use the symbolic register names that match the disassembler. */
124 static const char greg_names
[31][4] = {
125 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
126 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
127 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
128 "t10", "t11", "ra", "t12", "at", "gp", "sp"
130 static const char freg_names
[31][4] = {
131 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
132 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
133 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
134 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
136 #ifndef CONFIG_USER_ONLY
137 static const char shadow_names
[8][8] = {
138 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
139 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
143 static bool done_init
= 0;
151 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
153 for (i
= 0; i
< 31; i
++) {
154 cpu_std_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
155 offsetof(CPUAlphaState
, ir
[i
]),
159 for (i
= 0; i
< 31; i
++) {
160 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
161 offsetof(CPUAlphaState
, fir
[i
]),
165 #ifndef CONFIG_USER_ONLY
166 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
167 for (i
= 0; i
< 8; i
++) {
168 int r
= (i
== 7 ? 25 : i
+ 8);
169 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(TCG_AREG0
,
170 offsetof(CPUAlphaState
,
176 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
177 const GlobalVar
*v
= &vars
[i
];
178 *v
->var
= tcg_global_mem_new_i64(TCG_AREG0
, v
->ofs
, v
->name
);
182 static TCGv
load_zero(DisasContext
*ctx
)
184 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
185 ctx
->zero
= tcg_const_i64(0);
190 static TCGv
dest_sink(DisasContext
*ctx
)
192 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
193 ctx
->sink
= tcg_temp_new();
198 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
200 if (likely(reg
< 31)) {
203 return load_zero(ctx
);
207 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
208 uint8_t lit
, bool islit
)
211 ctx
->lit
= tcg_const_i64(lit
);
213 } else if (likely(reg
< 31)) {
216 return load_zero(ctx
);
220 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
222 if (likely(reg
< 31)) {
225 return dest_sink(ctx
);
229 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
231 if (likely(reg
< 31)) {
234 return load_zero(ctx
);
238 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
240 if (likely(reg
< 31)) {
243 return dest_sink(ctx
);
247 static void gen_excp_1(int exception
, int error_code
)
251 tmp1
= tcg_const_i32(exception
);
252 tmp2
= tcg_const_i32(error_code
);
253 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
254 tcg_temp_free_i32(tmp2
);
255 tcg_temp_free_i32(tmp1
);
258 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
260 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
261 gen_excp_1(exception
, error_code
);
262 return EXIT_NORETURN
;
265 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
267 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
270 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
272 TCGv_i32 tmp32
= tcg_temp_new_i32();
273 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
274 gen_helper_memory_to_f(t0
, tmp32
);
275 tcg_temp_free_i32(tmp32
);
278 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
280 TCGv tmp
= tcg_temp_new();
281 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
282 gen_helper_memory_to_g(t0
, tmp
);
286 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
288 TCGv_i32 tmp32
= tcg_temp_new_i32();
289 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
290 gen_helper_memory_to_s(t0
, tmp32
);
291 tcg_temp_free_i32(tmp32
);
294 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
296 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
297 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
298 tcg_gen_mov_i64(cpu_lock_value
, t0
);
301 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
303 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
304 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
305 tcg_gen_mov_i64(cpu_lock_value
, t0
);
308 static inline void gen_load_mem(DisasContext
*ctx
,
309 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
311 int ra
, int rb
, int32_t disp16
, bool fp
,
316 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
317 prefetches, which we can treat as nops. No worries about
318 missed exceptions here. */
319 if (unlikely(ra
== 31)) {
323 tmp
= tcg_temp_new();
324 addr
= load_gpr(ctx
, rb
);
327 tcg_gen_addi_i64(tmp
, addr
, disp16
);
331 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
335 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
336 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
341 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
343 TCGv_i32 tmp32
= tcg_temp_new_i32();
344 gen_helper_f_to_memory(tmp32
, t0
);
345 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
346 tcg_temp_free_i32(tmp32
);
349 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
351 TCGv tmp
= tcg_temp_new();
352 gen_helper_g_to_memory(tmp
, t0
);
353 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
357 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
359 TCGv_i32 tmp32
= tcg_temp_new_i32();
360 gen_helper_s_to_memory(tmp32
, t0
);
361 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
362 tcg_temp_free_i32(tmp32
);
365 static inline void gen_store_mem(DisasContext
*ctx
,
366 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
368 int ra
, int rb
, int32_t disp16
, bool fp
,
373 tmp
= tcg_temp_new();
374 addr
= load_gpr(ctx
, rb
);
377 tcg_gen_addi_i64(tmp
, addr
, disp16
);
381 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
385 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
386 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
391 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
392 int32_t disp16
, int quad
)
397 /* ??? Don't bother storing anything. The user can't tell
398 the difference, since the zero register always reads zero. */
402 #if defined(CONFIG_USER_ONLY)
403 addr
= cpu_lock_st_addr
;
405 addr
= tcg_temp_local_new();
408 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
410 #if defined(CONFIG_USER_ONLY)
411 /* ??? This is handled via a complicated version of compare-and-swap
412 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
413 in TCG so that this isn't necessary. */
414 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
416 /* ??? In system mode we are never multi-threaded, so CAS can be
417 implemented via a non-atomic load-compare-store sequence. */
419 TCGLabel
*lab_fail
, *lab_done
;
422 lab_fail
= gen_new_label();
423 lab_done
= gen_new_label();
424 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
426 val
= tcg_temp_new();
427 tcg_gen_qemu_ld_i64(val
, addr
, ctx
->mem_idx
, quad
? MO_LEQ
: MO_LESL
);
428 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
430 tcg_gen_qemu_st_i64(ctx
->ir
[ra
], addr
, ctx
->mem_idx
,
431 quad
? MO_LEQ
: MO_LEUL
);
432 tcg_gen_movi_i64(ctx
->ir
[ra
], 1);
433 tcg_gen_br(lab_done
);
435 gen_set_label(lab_fail
);
436 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
438 gen_set_label(lab_done
);
439 tcg_gen_movi_i64(cpu_lock_addr
, -1);
447 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
449 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
451 && ((addr
>> 41) & 3) == 2
452 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
455 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
457 /* Suppress goto_tb in the case of single-steping and IO. */
458 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
459 || ctx
->singlestep_enabled
|| singlestep
) {
462 /* If the destination is in the superpage, the page perms can't change. */
463 if (in_superpage(ctx
, dest
)) {
466 /* Check for the dest on the same page as the start of the TB. */
467 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
470 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
472 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
475 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
478 /* Notice branch-to-next; used to initialize RA with the PC. */
481 } else if (use_goto_tb(ctx
, dest
)) {
483 tcg_gen_movi_i64(cpu_pc
, dest
);
484 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
487 tcg_gen_movi_i64(cpu_pc
, dest
);
488 return EXIT_PC_UPDATED
;
492 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
493 TCGv cmp
, int32_t disp
)
495 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
496 TCGLabel
*lab_true
= gen_new_label();
498 if (use_goto_tb(ctx
, dest
)) {
499 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
502 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
503 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
505 gen_set_label(lab_true
);
507 tcg_gen_movi_i64(cpu_pc
, dest
);
508 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
512 TCGv_i64 z
= tcg_const_i64(0);
513 TCGv_i64 d
= tcg_const_i64(dest
);
514 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
516 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
518 tcg_temp_free_i64(z
);
519 tcg_temp_free_i64(d
);
520 tcg_temp_free_i64(p
);
521 return EXIT_PC_UPDATED
;
525 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
526 int32_t disp
, int mask
)
531 cmp_tmp
= tcg_temp_new();
532 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
534 cmp_tmp
= load_gpr(ctx
, ra
);
537 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
540 /* Fold -0.0 for comparison with COND. */
542 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
544 uint64_t mzero
= 1ull << 63;
549 /* For <= or >, the -0.0 value directly compares the way we want. */
550 tcg_gen_mov_i64(dest
, src
);
555 /* For == or !=, we can simply mask off the sign bit and compare. */
556 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
561 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
562 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
563 tcg_gen_neg_i64(dest
, dest
);
564 tcg_gen_and_i64(dest
, dest
, src
);
572 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
575 TCGv cmp_tmp
= tcg_temp_new();
576 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
577 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
580 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
585 vb
= load_fpr(ctx
, rb
);
587 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
589 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
594 #define QUAL_RM_N 0x080 /* Round mode nearest even */
595 #define QUAL_RM_C 0x000 /* Round mode chopped */
596 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
597 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
598 #define QUAL_RM_MASK 0x0c0
600 #define QUAL_U 0x100 /* Underflow enable (fp output) */
601 #define QUAL_V 0x100 /* Overflow enable (int output) */
602 #define QUAL_S 0x400 /* Software completion enable */
603 #define QUAL_I 0x200 /* Inexact detection enable */
605 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
609 fn11
&= QUAL_RM_MASK
;
610 if (fn11
== ctx
->tb_rm
) {
615 tmp
= tcg_temp_new_i32();
618 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
621 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
624 tcg_gen_movi_i32(tmp
, float_round_down
);
627 tcg_gen_ld8u_i32(tmp
, cpu_env
,
628 offsetof(CPUAlphaState
, fpcr_dyn_round
));
632 #if defined(CONFIG_SOFTFLOAT_INLINE)
633 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
634 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
635 sets the one field. */
636 tcg_gen_st8_i32(tmp
, cpu_env
,
637 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
639 gen_helper_setroundmode(tmp
);
642 tcg_temp_free_i32(tmp
);
645 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
650 if (fn11
== ctx
->tb_ftz
) {
655 tmp
= tcg_temp_new_i32();
657 /* Underflow is enabled, use the FPCR setting. */
658 tcg_gen_ld8u_i32(tmp
, cpu_env
,
659 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
661 /* Underflow is disabled, force flush-to-zero. */
662 tcg_gen_movi_i32(tmp
, 1);
665 #if defined(CONFIG_SOFTFLOAT_INLINE)
666 tcg_gen_st8_i32(tmp
, cpu_env
,
667 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
669 gen_helper_setflushzero(tmp
);
672 tcg_temp_free_i32(tmp
);
675 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
679 if (unlikely(reg
== 31)) {
680 val
= load_zero(ctx
);
683 if ((fn11
& QUAL_S
) == 0) {
685 gen_helper_ieee_input_cmp(cpu_env
, val
);
687 gen_helper_ieee_input(cpu_env
, val
);
690 #ifndef CONFIG_USER_ONLY
691 /* In system mode, raise exceptions for denormals like real
692 hardware. In user mode, proceed as if the OS completion
693 handler is handling the denormal as per spec. */
694 gen_helper_ieee_input_s(cpu_env
, val
);
701 static void gen_fp_exc_raise(int rc
, int fn11
)
703 /* ??? We ought to be able to do something with imprecise exceptions.
704 E.g. notice we're still in the trap shadow of something within the
705 TB and do not generate the code to signal the exception; end the TB
706 when an exception is forced to arrive, either by consumption of a
707 register value or TRAPB or EXCB. */
711 if (!(fn11
& QUAL_U
)) {
712 /* Note that QUAL_U == QUAL_V, so ignore either. */
713 ignore
|= FPCR_UNF
| FPCR_IOV
;
715 if (!(fn11
& QUAL_I
)) {
718 ign
= tcg_const_i32(ignore
);
720 /* ??? Pass in the regno of the destination so that the helper can
721 set EXC_MASK, which contains a bitmask of destination registers
722 that have caused arithmetic traps. A simple userspace emulation
723 does not require this. We do need it for a guest kernel's entArith,
724 or if we were to do something clever with imprecise exceptions. */
725 reg
= tcg_const_i32(rc
+ 32);
727 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
729 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
732 tcg_temp_free_i32(reg
);
733 tcg_temp_free_i32(ign
);
736 static void gen_cvtlq(TCGv vc
, TCGv vb
)
738 TCGv tmp
= tcg_temp_new();
740 /* The arithmetic right shift here, plus the sign-extended mask below
741 yields a sign-extended result without an explicit ext32s_i64. */
742 tcg_gen_sari_i64(tmp
, vb
, 32);
743 tcg_gen_shri_i64(vc
, vb
, 29);
744 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
745 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
746 tcg_gen_or_i64(vc
, vc
, tmp
);
751 static void gen_ieee_arith2(DisasContext
*ctx
,
752 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
753 int rb
, int rc
, int fn11
)
757 gen_qual_roundmode(ctx
, fn11
);
758 gen_qual_flushzero(ctx
, fn11
);
760 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
761 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
763 gen_fp_exc_raise(rc
, fn11
);
766 #define IEEE_ARITH2(name) \
767 static inline void glue(gen_, name)(DisasContext *ctx, \
768 int rb, int rc, int fn11) \
770 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
777 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
781 /* No need to set flushzero, since we have an integer output. */
782 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
783 vc
= dest_fpr(ctx
, rc
);
785 /* Almost all integer conversions use cropped rounding;
786 special case that. */
787 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
788 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
790 gen_qual_roundmode(ctx
, fn11
);
791 gen_helper_cvttq(vc
, cpu_env
, vb
);
793 gen_fp_exc_raise(rc
, fn11
);
796 static void gen_ieee_intcvt(DisasContext
*ctx
,
797 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
798 int rb
, int rc
, int fn11
)
802 gen_qual_roundmode(ctx
, fn11
);
803 vb
= load_fpr(ctx
, rb
);
804 vc
= dest_fpr(ctx
, rc
);
806 /* The only exception that can be raised by integer conversion
807 is inexact. Thus we only need to worry about exceptions when
808 inexact handling is requested. */
810 helper(vc
, cpu_env
, vb
);
811 gen_fp_exc_raise(rc
, fn11
);
813 helper(vc
, cpu_env
, vb
);
817 #define IEEE_INTCVT(name) \
818 static inline void glue(gen_, name)(DisasContext *ctx, \
819 int rb, int rc, int fn11) \
821 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
826 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
828 TCGv vmask
= tcg_const_i64(mask
);
829 TCGv tmp
= tcg_temp_new_i64();
832 tcg_gen_andc_i64(tmp
, vmask
, va
);
834 tcg_gen_and_i64(tmp
, va
, vmask
);
837 tcg_gen_andc_i64(vc
, vb
, vmask
);
838 tcg_gen_or_i64(vc
, vc
, tmp
);
840 tcg_temp_free(vmask
);
844 static void gen_ieee_arith3(DisasContext
*ctx
,
845 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
846 int ra
, int rb
, int rc
, int fn11
)
850 gen_qual_roundmode(ctx
, fn11
);
851 gen_qual_flushzero(ctx
, fn11
);
853 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
854 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
855 vc
= dest_fpr(ctx
, rc
);
856 helper(vc
, cpu_env
, va
, vb
);
858 gen_fp_exc_raise(rc
, fn11
);
861 #define IEEE_ARITH3(name) \
862 static inline void glue(gen_, name)(DisasContext *ctx, \
863 int ra, int rb, int rc, int fn11) \
865 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
876 static void gen_ieee_compare(DisasContext
*ctx
,
877 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
878 int ra
, int rb
, int rc
, int fn11
)
882 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
883 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
884 vc
= dest_fpr(ctx
, rc
);
885 helper(vc
, cpu_env
, va
, vb
);
887 gen_fp_exc_raise(rc
, fn11
);
890 #define IEEE_CMP3(name) \
891 static inline void glue(gen_, name)(DisasContext *ctx, \
892 int ra, int rb, int rc, int fn11) \
894 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
901 static inline uint64_t zapnot_mask(uint8_t lit
)
906 for (i
= 0; i
< 8; ++i
) {
907 if ((lit
>> i
) & 1) {
908 mask
|= 0xffull
<< (i
* 8);
914 /* Implement zapnot with an immediate operand, which expands to some
915 form of immediate AND. This is a basic building block in the
916 definition of many of the other byte manipulation instructions. */
917 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
921 tcg_gen_movi_i64(dest
, 0);
924 tcg_gen_ext8u_i64(dest
, src
);
927 tcg_gen_ext16u_i64(dest
, src
);
930 tcg_gen_ext32u_i64(dest
, src
);
933 tcg_gen_mov_i64(dest
, src
);
936 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
941 /* EXTWH, EXTLH, EXTQH */
942 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
943 uint8_t lit
, uint8_t byte_mask
)
946 tcg_gen_shli_i64(vc
, va
, (64 - lit
* 8) & 0x3f);
948 TCGv tmp
= tcg_temp_new();
949 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
950 tcg_gen_neg_i64(tmp
, tmp
);
951 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
952 tcg_gen_shl_i64(vc
, va
, tmp
);
955 gen_zapnoti(vc
, vc
, byte_mask
);
958 /* EXTBL, EXTWL, EXTLL, EXTQL */
959 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
960 uint8_t lit
, uint8_t byte_mask
)
963 tcg_gen_shri_i64(vc
, va
, (lit
& 7) * 8);
965 TCGv tmp
= tcg_temp_new();
966 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
967 tcg_gen_shli_i64(tmp
, tmp
, 3);
968 tcg_gen_shr_i64(vc
, va
, tmp
);
971 gen_zapnoti(vc
, vc
, byte_mask
);
974 /* INSWH, INSLH, INSQH */
975 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
976 uint8_t lit
, uint8_t byte_mask
)
978 TCGv tmp
= tcg_temp_new();
980 /* The instruction description has us left-shift the byte mask and extract
981 bits <15:8> and apply that zap at the end. This is equivalent to simply
982 performing the zap first and shifting afterward. */
983 gen_zapnoti(tmp
, va
, byte_mask
);
987 if (unlikely(lit
== 0)) {
988 tcg_gen_movi_i64(vc
, 0);
990 tcg_gen_shri_i64(vc
, tmp
, 64 - lit
* 8);
993 TCGv shift
= tcg_temp_new();
995 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
996 portably by splitting the shift into two parts: shift_count-1 and 1.
997 Arrange for the -1 by using ones-complement instead of
998 twos-complement in the negation: ~(B * 8) & 63. */
1000 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1001 tcg_gen_not_i64(shift
, shift
);
1002 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1004 tcg_gen_shr_i64(vc
, tmp
, shift
);
1005 tcg_gen_shri_i64(vc
, vc
, 1);
1006 tcg_temp_free(shift
);
1011 /* INSBL, INSWL, INSLL, INSQL */
1012 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1013 uint8_t lit
, uint8_t byte_mask
)
1015 TCGv tmp
= tcg_temp_new();
1017 /* The instruction description has us left-shift the byte mask
1018 the same number of byte slots as the data and apply the zap
1019 at the end. This is equivalent to simply performing the zap
1020 first and shifting afterward. */
1021 gen_zapnoti(tmp
, va
, byte_mask
);
1024 tcg_gen_shli_i64(vc
, tmp
, (lit
& 7) * 8);
1026 TCGv shift
= tcg_temp_new();
1027 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1028 tcg_gen_shli_i64(shift
, shift
, 3);
1029 tcg_gen_shl_i64(vc
, tmp
, shift
);
1030 tcg_temp_free(shift
);
1035 /* MSKWH, MSKLH, MSKQH */
1036 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1037 uint8_t lit
, uint8_t byte_mask
)
1040 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1042 TCGv shift
= tcg_temp_new();
1043 TCGv mask
= tcg_temp_new();
1045 /* The instruction description is as above, where the byte_mask
1046 is shifted left, and then we extract bits <15:8>. This can be
1047 emulated with a right-shift on the expanded byte mask. This
1048 requires extra care because for an input <2:0> == 0 we need a
1049 shift of 64 bits in order to generate a zero. This is done by
1050 splitting the shift into two parts, the variable shift - 1
1051 followed by a constant 1 shift. The code we expand below is
1052 equivalent to ~(B * 8) & 63. */
1054 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1055 tcg_gen_not_i64(shift
, shift
);
1056 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1057 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1058 tcg_gen_shr_i64(mask
, mask
, shift
);
1059 tcg_gen_shri_i64(mask
, mask
, 1);
1061 tcg_gen_andc_i64(vc
, va
, mask
);
1063 tcg_temp_free(mask
);
1064 tcg_temp_free(shift
);
1068 /* MSKBL, MSKWL, MSKLL, MSKQL */
1069 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1070 uint8_t lit
, uint8_t byte_mask
)
1073 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1075 TCGv shift
= tcg_temp_new();
1076 TCGv mask
= tcg_temp_new();
1078 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1079 tcg_gen_shli_i64(shift
, shift
, 3);
1080 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1081 tcg_gen_shl_i64(mask
, mask
, shift
);
1083 tcg_gen_andc_i64(vc
, va
, mask
);
1085 tcg_temp_free(mask
);
1086 tcg_temp_free(shift
);
1090 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1095 tcg_gen_ld8u_i64(ctx
->ir
[ra
], cpu_env
,
1096 offsetof(CPUAlphaState
, intr_flag
));
1099 tmp
= tcg_const_i32(set
);
1100 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1101 tcg_temp_free_i32(tmp
);
1104 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1106 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1107 to internal cpu registers. */
1109 /* Unprivileged PAL call */
1110 if (palcode
>= 0x80 && palcode
< 0xC0) {
1114 /* No-op inside QEMU. */
1118 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1119 offsetof(CPUAlphaState
, unique
));
1123 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1124 offsetof(CPUAlphaState
, unique
));
1133 #ifndef CONFIG_USER_ONLY
1134 /* Privileged PAL code */
1135 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1139 /* No-op inside QEMU. */
1143 /* No-op inside QEMU. */
1147 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1148 offsetof(CPUAlphaState
, vptptr
));
1152 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1153 offsetof(CPUAlphaState
, sysval
));
1157 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1158 offsetof(CPUAlphaState
, sysval
));
1165 /* Note that we already know we're in kernel mode, so we know
1166 that PS only contains the 3 IPL bits. */
1167 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1168 offsetof(CPUAlphaState
, ps
));
1170 /* But make sure and store only the 3 IPL bits from the user. */
1171 tmp
= tcg_temp_new();
1172 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1173 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1180 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1181 offsetof(CPUAlphaState
, ps
));
1185 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1186 offsetof(CPUAlphaState
, usp
));
1190 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1191 offsetof(CPUAlphaState
, usp
));
1195 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1196 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1206 return gen_invalid(ctx
);
1209 #ifdef CONFIG_USER_ONLY
1210 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1213 TCGv tmp
= tcg_temp_new();
1214 uint64_t exc_addr
= ctx
->pc
;
1215 uint64_t entry
= ctx
->palbr
;
1217 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
1220 tcg_gen_movi_i64(tmp
, 1);
1221 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
1224 tcg_gen_movi_i64(tmp
, exc_addr
);
1225 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1228 entry
+= (palcode
& 0x80
1229 ? 0x2000 + (palcode
- 0x80) * 64
1230 : 0x1000 + palcode
* 64);
1232 /* Since the destination is running in PALmode, we don't really
1233 need the page permissions check. We'll see the existence of
1234 the page when we create the TB, and we'll flush all TBs if
1235 we change the PAL base register. */
1236 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1238 tcg_gen_movi_i64(cpu_pc
, entry
);
1239 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1240 return EXIT_GOTO_TB
;
1242 tcg_gen_movi_i64(cpu_pc
, entry
);
1243 return EXIT_PC_UPDATED
;
1249 #ifndef CONFIG_USER_ONLY
1251 #define PR_BYTE 0x100000
1252 #define PR_LONG 0x200000
1254 static int cpu_pr_data(int pr
)
1257 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1258 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1259 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1260 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1261 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1262 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1263 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1264 case 7: return offsetof(CPUAlphaState
, palbr
);
1265 case 8: return offsetof(CPUAlphaState
, ptbr
);
1266 case 9: return offsetof(CPUAlphaState
, vptptr
);
1267 case 10: return offsetof(CPUAlphaState
, unique
);
1268 case 11: return offsetof(CPUAlphaState
, sysval
);
1269 case 12: return offsetof(CPUAlphaState
, usp
);
1272 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1275 return offsetof(CPUAlphaState
, alarm_expire
);
1280 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1282 void (*helper
)(TCGv
);
1287 /* Accessing the "non-shadow" general registers. */
1288 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1289 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1292 case 250: /* WALLTIME */
1293 helper
= gen_helper_get_walltime
;
1295 case 249: /* VMTIME */
1296 helper
= gen_helper_get_vmtime
;
1302 return EXIT_PC_STALE
;
1309 /* The basic registers are data only, and unknown registers
1310 are read-zero, write-ignore. */
1311 data
= cpu_pr_data(regno
);
1313 tcg_gen_movi_i64(va
, 0);
1314 } else if (data
& PR_BYTE
) {
1315 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1316 } else if (data
& PR_LONG
) {
1317 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1319 tcg_gen_ld_i64(va
, cpu_env
, data
);
1327 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1335 gen_helper_tbia(cpu_env
);
1340 gen_helper_tbis(cpu_env
, vb
);
1345 tmp
= tcg_const_i64(1);
1346 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1347 offsetof(CPUState
, halted
));
1348 return gen_excp(ctx
, EXCP_HLT
, 0);
1352 gen_helper_halt(vb
);
1353 return EXIT_PC_STALE
;
1357 gen_helper_set_alarm(cpu_env
, vb
);
1362 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1363 /* Changing the PAL base register implies un-chaining all of the TBs
1364 that ended with a CALL_PAL. Since the base register usually only
1365 changes during boot, flushing everything works well. */
1366 gen_helper_tb_flush(cpu_env
);
1367 return EXIT_PC_STALE
;
1370 /* Accessing the "non-shadow" general registers. */
1371 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1372 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1376 /* The basic registers are data only, and unknown registers
1377 are read-zero, write-ignore. */
1378 data
= cpu_pr_data(regno
);
1380 if (data
& PR_BYTE
) {
1381 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1382 } else if (data
& PR_LONG
) {
1383 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1385 tcg_gen_st_i64(vb
, cpu_env
, data
);
1393 #endif /* !USER_ONLY*/
1395 #define REQUIRE_NO_LIT \
1402 #define REQUIRE_TB_FLAG(FLAG) \
1404 if ((ctx->tb->flags & (FLAG)) == 0) { \
1409 #define REQUIRE_REG_31(WHICH) \
1411 if (WHICH != 31) { \
1416 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1418 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1420 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1421 bool islit
, real_islit
;
1422 TCGv va
, vb
, vc
, tmp
, tmp2
;
1426 /* Decode all instruction fields */
1427 opc
= extract32(insn
, 26, 6);
1428 ra
= extract32(insn
, 21, 5);
1429 rb
= extract32(insn
, 16, 5);
1430 rc
= extract32(insn
, 0, 5);
1431 real_islit
= islit
= extract32(insn
, 12, 1);
1432 lit
= extract32(insn
, 13, 8);
1434 disp21
= sextract32(insn
, 0, 21);
1435 disp16
= sextract32(insn
, 0, 16);
1436 disp12
= sextract32(insn
, 0, 12);
1438 fn11
= extract32(insn
, 5, 11);
1439 fpfn
= extract32(insn
, 5, 6);
1440 fn7
= extract32(insn
, 5, 7);
1442 if (rb
== 31 && !islit
) {
1451 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1477 disp16
= (uint32_t)disp16
<< 16;
1481 va
= dest_gpr(ctx
, ra
);
1482 /* It's worth special-casing immediate loads. */
1484 tcg_gen_movi_i64(va
, disp16
);
1486 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1492 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1493 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1497 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1501 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1502 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1507 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1511 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1512 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1516 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1520 vc
= dest_gpr(ctx
, rc
);
1521 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1525 /* Special case ADDL as SEXTL. */
1526 tcg_gen_ext32s_i64(vc
, vb
);
1530 /* Special case SUBQ as NEGQ. */
1531 tcg_gen_neg_i64(vc
, vb
);
1536 va
= load_gpr(ctx
, ra
);
1540 tcg_gen_add_i64(vc
, va
, vb
);
1541 tcg_gen_ext32s_i64(vc
, vc
);
1545 tmp
= tcg_temp_new();
1546 tcg_gen_shli_i64(tmp
, va
, 2);
1547 tcg_gen_add_i64(tmp
, tmp
, vb
);
1548 tcg_gen_ext32s_i64(vc
, tmp
);
1553 tcg_gen_sub_i64(vc
, va
, vb
);
1554 tcg_gen_ext32s_i64(vc
, vc
);
1558 tmp
= tcg_temp_new();
1559 tcg_gen_shli_i64(tmp
, va
, 2);
1560 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1561 tcg_gen_ext32s_i64(vc
, tmp
);
1567 /* Special case 0 >= X as X == 0. */
1568 gen_helper_cmpbe0(vc
, vb
);
1570 gen_helper_cmpbge(vc
, va
, vb
);
1575 tmp
= tcg_temp_new();
1576 tcg_gen_shli_i64(tmp
, va
, 3);
1577 tcg_gen_add_i64(tmp
, tmp
, vb
);
1578 tcg_gen_ext32s_i64(vc
, tmp
);
1583 tmp
= tcg_temp_new();
1584 tcg_gen_shli_i64(tmp
, va
, 3);
1585 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1586 tcg_gen_ext32s_i64(vc
, tmp
);
1591 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1595 tcg_gen_add_i64(vc
, va
, vb
);
1599 tmp
= tcg_temp_new();
1600 tcg_gen_shli_i64(tmp
, va
, 2);
1601 tcg_gen_add_i64(vc
, tmp
, vb
);
1606 tcg_gen_sub_i64(vc
, va
, vb
);
1610 tmp
= tcg_temp_new();
1611 tcg_gen_shli_i64(tmp
, va
, 2);
1612 tcg_gen_sub_i64(vc
, tmp
, vb
);
1617 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1621 tmp
= tcg_temp_new();
1622 tcg_gen_shli_i64(tmp
, va
, 3);
1623 tcg_gen_add_i64(vc
, tmp
, vb
);
1628 tmp
= tcg_temp_new();
1629 tcg_gen_shli_i64(tmp
, va
, 3);
1630 tcg_gen_sub_i64(vc
, tmp
, vb
);
1635 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1639 tmp
= tcg_temp_new();
1640 tcg_gen_ext32s_i64(tmp
, va
);
1641 tcg_gen_ext32s_i64(vc
, vb
);
1642 tcg_gen_add_i64(tmp
, tmp
, vc
);
1643 tcg_gen_ext32s_i64(vc
, tmp
);
1644 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1649 tmp
= tcg_temp_new();
1650 tcg_gen_ext32s_i64(tmp
, va
);
1651 tcg_gen_ext32s_i64(vc
, vb
);
1652 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1653 tcg_gen_ext32s_i64(vc
, tmp
);
1654 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1659 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1663 tmp
= tcg_temp_new();
1664 tmp2
= tcg_temp_new();
1665 tcg_gen_eqv_i64(tmp
, va
, vb
);
1666 tcg_gen_mov_i64(tmp2
, va
);
1667 tcg_gen_add_i64(vc
, va
, vb
);
1668 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1669 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1670 tcg_gen_shri_i64(tmp
, tmp
, 63);
1671 tcg_gen_movi_i64(tmp2
, 0);
1672 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1674 tcg_temp_free(tmp2
);
1678 tmp
= tcg_temp_new();
1679 tmp2
= tcg_temp_new();
1680 tcg_gen_xor_i64(tmp
, va
, vb
);
1681 tcg_gen_mov_i64(tmp2
, va
);
1682 tcg_gen_sub_i64(vc
, va
, vb
);
1683 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1684 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1685 tcg_gen_shri_i64(tmp
, tmp
, 63);
1686 tcg_gen_movi_i64(tmp2
, 0);
1687 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1689 tcg_temp_free(tmp2
);
1693 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1703 /* Special case BIS as NOP. */
1707 /* Special case BIS as MOV. */
1708 vc
= dest_gpr(ctx
, rc
);
1710 tcg_gen_movi_i64(vc
, lit
);
1712 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1718 vc
= dest_gpr(ctx
, rc
);
1719 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1721 if (fn7
== 0x28 && ra
== 31) {
1722 /* Special case ORNOT as NOT. */
1723 tcg_gen_not_i64(vc
, vb
);
1727 va
= load_gpr(ctx
, ra
);
1731 tcg_gen_and_i64(vc
, va
, vb
);
1735 tcg_gen_andc_i64(vc
, va
, vb
);
1739 tmp
= tcg_temp_new();
1740 tcg_gen_andi_i64(tmp
, va
, 1);
1741 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1742 vb
, load_gpr(ctx
, rc
));
1747 tmp
= tcg_temp_new();
1748 tcg_gen_andi_i64(tmp
, va
, 1);
1749 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1750 vb
, load_gpr(ctx
, rc
));
1755 tcg_gen_or_i64(vc
, va
, vb
);
1759 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1760 vb
, load_gpr(ctx
, rc
));
1764 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1765 vb
, load_gpr(ctx
, rc
));
1769 tcg_gen_orc_i64(vc
, va
, vb
);
1773 tcg_gen_xor_i64(vc
, va
, vb
);
1777 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1778 vb
, load_gpr(ctx
, rc
));
1782 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1783 vb
, load_gpr(ctx
, rc
));
1787 tcg_gen_eqv_i64(vc
, va
, vb
);
1793 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1794 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1799 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1800 vb
, load_gpr(ctx
, rc
));
1804 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1805 vb
, load_gpr(ctx
, rc
));
1810 tcg_gen_movi_i64(vc
, ctx
->implver
);
1818 vc
= dest_gpr(ctx
, rc
);
1819 va
= load_gpr(ctx
, ra
);
1823 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1827 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1831 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1835 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1839 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1843 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1847 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1851 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1855 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1860 gen_zapnoti(vc
, va
, ~lit
);
1862 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1868 gen_zapnoti(vc
, va
, lit
);
1870 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1875 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1880 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1882 tmp
= tcg_temp_new();
1883 vb
= load_gpr(ctx
, rb
);
1884 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1885 tcg_gen_shr_i64(vc
, va
, tmp
);
1891 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1896 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1898 tmp
= tcg_temp_new();
1899 vb
= load_gpr(ctx
, rb
);
1900 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1901 tcg_gen_shl_i64(vc
, va
, tmp
);
1907 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1912 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1914 tmp
= tcg_temp_new();
1915 vb
= load_gpr(ctx
, rb
);
1916 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1917 tcg_gen_sar_i64(vc
, va
, tmp
);
1923 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1927 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1931 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1935 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1939 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1943 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1947 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1951 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1955 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1963 vc
= dest_gpr(ctx
, rc
);
1964 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1965 va
= load_gpr(ctx
, ra
);
1969 tcg_gen_mul_i64(vc
, va
, vb
);
1970 tcg_gen_ext32s_i64(vc
, vc
);
1974 tcg_gen_mul_i64(vc
, va
, vb
);
1978 tmp
= tcg_temp_new();
1979 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1984 tmp
= tcg_temp_new();
1985 tcg_gen_ext32s_i64(tmp
, va
);
1986 tcg_gen_ext32s_i64(vc
, vb
);
1987 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1988 tcg_gen_ext32s_i64(vc
, tmp
);
1989 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1994 tmp
= tcg_temp_new();
1995 tmp2
= tcg_temp_new();
1996 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1997 tcg_gen_sari_i64(tmp2
, vc
, 63);
1998 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2000 tcg_temp_free(tmp2
);
2008 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2009 vc
= dest_fpr(ctx
, rc
);
2010 switch (fpfn
) { /* fn11 & 0x3F */
2014 t32
= tcg_temp_new_i32();
2015 va
= load_gpr(ctx
, ra
);
2016 tcg_gen_extrl_i64_i32(t32
, va
);
2017 gen_helper_memory_to_s(vc
, t32
);
2018 tcg_temp_free_i32(t32
);
2023 vb
= load_fpr(ctx
, rb
);
2024 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2029 gen_sqrts(ctx
, rb
, rc
, fn11
);
2034 t32
= tcg_temp_new_i32();
2035 va
= load_gpr(ctx
, ra
);
2036 tcg_gen_extrl_i64_i32(t32
, va
);
2037 gen_helper_memory_to_f(vc
, t32
);
2038 tcg_temp_free_i32(t32
);
2043 va
= load_gpr(ctx
, ra
);
2044 tcg_gen_mov_i64(vc
, va
);
2049 vb
= load_fpr(ctx
, rb
);
2050 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2055 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2063 /* VAX floating point */
2064 /* XXX: rounding mode and trap are ignored (!) */
2065 vc
= dest_fpr(ctx
, rc
);
2066 vb
= load_fpr(ctx
, rb
);
2067 va
= load_fpr(ctx
, ra
);
2068 switch (fpfn
) { /* fn11 & 0x3F */
2071 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2075 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2079 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2083 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2091 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2095 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2099 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2103 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2107 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2111 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2115 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2120 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2129 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2134 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2139 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2147 /* IEEE floating-point */
2148 switch (fpfn
) { /* fn11 & 0x3F */
2151 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2155 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2159 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2163 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2167 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2171 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2175 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2179 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2183 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2187 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2191 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2195 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2199 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2201 gen_cvtst(ctx
, rb
, rc
, fn11
);
2204 gen_cvtts(ctx
, rb
, rc
, fn11
);
2210 gen_cvttq(ctx
, rb
, rc
, fn11
);
2215 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2220 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2232 vc
= dest_fpr(ctx
, rc
);
2233 vb
= load_fpr(ctx
, rb
);
2239 /* Special case CPYS as FNOP. */
2241 vc
= dest_fpr(ctx
, rc
);
2242 va
= load_fpr(ctx
, ra
);
2244 /* Special case CPYS as FMOV. */
2245 tcg_gen_mov_i64(vc
, va
);
2247 vb
= load_fpr(ctx
, rb
);
2248 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2254 vc
= dest_fpr(ctx
, rc
);
2255 vb
= load_fpr(ctx
, rb
);
2256 va
= load_fpr(ctx
, ra
);
2257 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2261 vc
= dest_fpr(ctx
, rc
);
2262 vb
= load_fpr(ctx
, rb
);
2263 va
= load_fpr(ctx
, ra
);
2264 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2268 va
= load_fpr(ctx
, ra
);
2269 gen_helper_store_fpcr(cpu_env
, va
);
2270 if (ctx
->tb_rm
== QUAL_RM_D
) {
2271 /* Re-do the copy of the rounding mode to fp_status
2272 the next time we use dynamic rounding. */
2278 va
= dest_fpr(ctx
, ra
);
2279 gen_helper_load_fpcr(va
, cpu_env
);
2283 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2287 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2291 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2295 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2299 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2303 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2305 case 0x030: /* CVTQL */
2306 case 0x130: /* CVTQL/V */
2307 case 0x530: /* CVTQL/SV */
2309 vc
= dest_fpr(ctx
, rc
);
2310 vb
= load_fpr(ctx
, rb
);
2311 gen_helper_cvtql(vc
, cpu_env
, vb
);
2312 gen_fp_exc_raise(rc
, fn11
);
2320 switch ((uint16_t)disp16
) {
2347 va
= dest_gpr(ctx
, ra
);
2348 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2350 gen_helper_load_pcc(va
, cpu_env
);
2352 ret
= EXIT_PC_STALE
;
2354 gen_helper_load_pcc(va
, cpu_env
);
2382 /* HW_MFPR (PALcode) */
2383 #ifndef CONFIG_USER_ONLY
2384 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2385 va
= dest_gpr(ctx
, ra
);
2386 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2393 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2394 prediction stack action, which of course we don't implement. */
2395 vb
= load_gpr(ctx
, rb
);
2396 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2398 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2400 ret
= EXIT_PC_UPDATED
;
2404 /* HW_LD (PALcode) */
2405 #ifndef CONFIG_USER_ONLY
2406 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2408 TCGv addr
= tcg_temp_new();
2409 vb
= load_gpr(ctx
, rb
);
2410 va
= dest_gpr(ctx
, ra
);
2412 tcg_gen_addi_i64(addr
, vb
, disp12
);
2413 switch ((insn
>> 12) & 0xF) {
2415 /* Longword physical access (hw_ldl/p) */
2416 gen_helper_ldl_phys(va
, cpu_env
, addr
);
2419 /* Quadword physical access (hw_ldq/p) */
2420 gen_helper_ldq_phys(va
, cpu_env
, addr
);
2423 /* Longword physical access with lock (hw_ldl_l/p) */
2424 gen_helper_ldl_l_phys(va
, cpu_env
, addr
);
2427 /* Quadword physical access with lock (hw_ldq_l/p) */
2428 gen_helper_ldq_l_phys(va
, cpu_env
, addr
);
2431 /* Longword virtual PTE fetch (hw_ldl/v) */
2434 /* Quadword virtual PTE fetch (hw_ldq/v) */
2444 /* Longword virtual access (hw_ldl) */
2447 /* Quadword virtual access (hw_ldq) */
2450 /* Longword virtual access with protection check (hw_ldl/w) */
2451 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2454 /* Quadword virtual access with protection check (hw_ldq/w) */
2455 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2458 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2461 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2464 /* Longword virtual access with alternate access mode and
2465 protection checks (hw_ldl/wa) */
2466 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2469 /* Quadword virtual access with alternate access mode and
2470 protection checks (hw_ldq/wa) */
2471 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2474 tcg_temp_free(addr
);
2482 vc
= dest_gpr(ctx
, rc
);
2485 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2487 va
= load_fpr(ctx
, ra
);
2488 tcg_gen_mov_i64(vc
, va
);
2490 } else if (fn7
== 0x78) {
2492 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2494 t32
= tcg_temp_new_i32();
2495 va
= load_fpr(ctx
, ra
);
2496 gen_helper_s_to_memory(t32
, va
);
2497 tcg_gen_ext_i32_i64(vc
, t32
);
2498 tcg_temp_free_i32(t32
);
2502 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2508 tcg_gen_ext8s_i64(vc
, vb
);
2512 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2514 tcg_gen_ext16s_i64(vc
, vb
);
2518 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2521 gen_helper_ctpop(vc
, vb
);
2525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2527 va
= load_gpr(ctx
, ra
);
2528 gen_helper_perr(vc
, va
, vb
);
2532 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2535 gen_helper_ctlz(vc
, vb
);
2539 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2542 gen_helper_cttz(vc
, vb
);
2546 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2549 gen_helper_unpkbw(vc
, vb
);
2553 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2556 gen_helper_unpkbl(vc
, vb
);
2560 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2563 gen_helper_pkwb(vc
, vb
);
2567 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2570 gen_helper_pklb(vc
, vb
);
2574 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2575 va
= load_gpr(ctx
, ra
);
2576 gen_helper_minsb8(vc
, va
, vb
);
2580 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2581 va
= load_gpr(ctx
, ra
);
2582 gen_helper_minsw4(vc
, va
, vb
);
2586 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2587 va
= load_gpr(ctx
, ra
);
2588 gen_helper_minub8(vc
, va
, vb
);
2592 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2593 va
= load_gpr(ctx
, ra
);
2594 gen_helper_minuw4(vc
, va
, vb
);
2598 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2599 va
= load_gpr(ctx
, ra
);
2600 gen_helper_maxub8(vc
, va
, vb
);
2604 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2605 va
= load_gpr(ctx
, ra
);
2606 gen_helper_maxuw4(vc
, va
, vb
);
2610 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2611 va
= load_gpr(ctx
, ra
);
2612 gen_helper_maxsb8(vc
, va
, vb
);
2616 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2617 va
= load_gpr(ctx
, ra
);
2618 gen_helper_maxsw4(vc
, va
, vb
);
2626 /* HW_MTPR (PALcode) */
2627 #ifndef CONFIG_USER_ONLY
2628 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2629 vb
= load_gpr(ctx
, rb
);
2630 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2637 /* HW_RET (PALcode) */
2638 #ifndef CONFIG_USER_ONLY
2639 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2641 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2642 address from EXC_ADDR. This turns out to be useful for our
2643 emulation PALcode, so continue to accept it. */
2644 ctx
->lit
= vb
= tcg_temp_new();
2645 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2647 vb
= load_gpr(ctx
, rb
);
2649 tmp
= tcg_temp_new();
2650 tcg_gen_movi_i64(tmp
, 0);
2651 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
2652 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2653 tcg_gen_andi_i64(tmp
, vb
, 1);
2654 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
2655 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2656 ret
= EXIT_PC_UPDATED
;
2663 /* HW_ST (PALcode) */
2664 #ifndef CONFIG_USER_ONLY
2665 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2667 TCGv addr
= tcg_temp_new();
2668 va
= load_gpr(ctx
, ra
);
2669 vb
= load_gpr(ctx
, rb
);
2671 tcg_gen_addi_i64(addr
, vb
, disp12
);
2672 switch ((insn
>> 12) & 0xF) {
2674 /* Longword physical access */
2675 gen_helper_stl_phys(cpu_env
, addr
, va
);
2678 /* Quadword physical access */
2679 gen_helper_stq_phys(cpu_env
, addr
, va
);
2682 /* Longword physical access with lock */
2683 gen_helper_stl_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2686 /* Quadword physical access with lock */
2687 gen_helper_stq_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2690 /* Longword virtual access */
2693 /* Quadword virtual access */
2714 /* Longword virtual access with alternate access mode */
2717 /* Quadword virtual access with alternate access mode */
2726 tcg_temp_free(addr
);
2734 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2738 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2742 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2746 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2750 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2754 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2758 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2762 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2766 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2770 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2774 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2778 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2782 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2786 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2790 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
2794 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
2798 ret
= gen_bdirect(ctx
, ra
, disp21
);
2800 case 0x31: /* FBEQ */
2801 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2803 case 0x32: /* FBLT */
2804 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2806 case 0x33: /* FBLE */
2807 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2811 ret
= gen_bdirect(ctx
, ra
, disp21
);
2813 case 0x35: /* FBNE */
2814 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2816 case 0x36: /* FBGE */
2817 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2819 case 0x37: /* FBGT */
2820 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2824 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2828 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2832 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2836 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2840 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2844 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2848 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2852 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2855 ret
= gen_invalid(ctx
);
2862 void gen_intermediate_code(CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2864 AlphaCPU
*cpu
= alpha_env_get_cpu(env
);
2865 CPUState
*cs
= CPU(cpu
);
2866 DisasContext ctx
, *ctxp
= &ctx
;
2867 target_ulong pc_start
;
2868 target_ulong pc_mask
;
2878 ctx
.mem_idx
= cpu_mmu_index(env
, false);
2879 ctx
.implver
= env
->implver
;
2880 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2882 #ifdef CONFIG_USER_ONLY
2883 ctx
.ir
= cpu_std_ir
;
2885 ctx
.palbr
= env
->palbr
;
2886 ctx
.ir
= (tb
->flags
& TB_FLAGS_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2889 /* ??? Every TB begins with unset rounding mode, to be initialized on
2890 the first fp insn of the TB. Alternately we could define a proper
2891 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2892 to reset the FP_STATUS to that default at the end of any TB that
2893 changes the default. We could even (gasp) dynamiclly figure out
2894 what default would be most efficient given the running program. */
2896 /* Similarly for flush-to-zero. */
2900 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2901 if (max_insns
== 0) {
2902 max_insns
= CF_COUNT_MASK
;
2904 if (max_insns
> TCG_MAX_INSNS
) {
2905 max_insns
= TCG_MAX_INSNS
;
2908 if (in_superpage(&ctx
, pc_start
)) {
2909 pc_mask
= (1ULL << 41) - 1;
2911 pc_mask
= ~TARGET_PAGE_MASK
;
2916 tcg_gen_insn_start(ctx
.pc
);
2919 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
2920 ret
= gen_excp(&ctx
, EXCP_DEBUG
, 0);
2921 /* The address covered by the breakpoint must be included in
2922 [tb->pc, tb->pc + tb->size) in order to for it to be
2923 properly cleared -- thus we increment the PC here so that
2924 the logic setting tb->size below does the right thing. */
2928 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2931 insn
= cpu_ldl_code(env
, ctx
.pc
);
2933 TCGV_UNUSED_I64(ctx
.zero
);
2934 TCGV_UNUSED_I64(ctx
.sink
);
2935 TCGV_UNUSED_I64(ctx
.lit
);
2938 ret
= translate_one(ctxp
, insn
);
2940 if (!TCGV_IS_UNUSED_I64(ctx
.sink
)) {
2941 tcg_gen_discard_i64(ctx
.sink
);
2942 tcg_temp_free(ctx
.sink
);
2944 if (!TCGV_IS_UNUSED_I64(ctx
.zero
)) {
2945 tcg_temp_free(ctx
.zero
);
2947 if (!TCGV_IS_UNUSED_I64(ctx
.lit
)) {
2948 tcg_temp_free(ctx
.lit
);
2951 /* If we reach a page boundary, are single stepping,
2952 or exhaust instruction count, stop generation. */
2954 && ((ctx
.pc
& pc_mask
) == 0
2955 || tcg_op_buf_full()
2956 || num_insns
>= max_insns
2958 || ctx
.singlestep_enabled
)) {
2959 ret
= EXIT_PC_STALE
;
2961 } while (ret
== NO_EXIT
);
2963 if (tb
->cflags
& CF_LAST_IO
) {
2972 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2974 case EXIT_PC_UPDATED
:
2975 if (ctx
.singlestep_enabled
) {
2976 gen_excp_1(EXCP_DEBUG
, 0);
2985 gen_tb_end(tb
, num_insns
);
2987 tb
->size
= ctx
.pc
- pc_start
;
2988 tb
->icount
= num_insns
;
2991 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2992 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2993 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
2999 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,