2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 struct TranslationBlock
*tb
;
48 #ifndef CONFIG_USER_ONLY
53 /* Current rounding mode for this TB. */
55 /* Current flush-to-zero setting for this TB. */
58 /* implver value for this CPU. */
61 /* The set of registers active in the current context. */
64 /* Temporaries for $31 and $f31 as source and destination. */
67 /* Temporary for immediate constants. */
70 bool singlestep_enabled
;
73 /* Return values from translate_one, indicating the state of the TB.
74 Note that zero indicates that we are not exiting the TB. */
79 /* We have emitted one or more goto_tb. No fixup required. */
82 /* We are not using a goto_tb (for whatever reason), but have updated
83 the PC (for whatever reason), so there's no need to do it again on
87 /* We are exiting the TB, but have neither emitted a goto_tb, nor
88 updated the PC for the next instruction to be executed. */
91 /* We are ending the TB with a noreturn function call, e.g. longjmp.
92 No following code will be executed. */
96 /* global register indexes */
97 static TCGv_env cpu_env
;
98 static TCGv cpu_std_ir
[31];
99 static TCGv cpu_fir
[31];
101 static TCGv cpu_lock_addr
;
102 static TCGv cpu_lock_st_addr
;
103 static TCGv cpu_lock_value
;
105 #ifndef CONFIG_USER_ONLY
106 static TCGv cpu_pal_ir
[31];
109 #include "exec/gen-icount.h"
111 void alpha_translate_init(void)
113 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
115 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
116 static const GlobalVar vars
[] = {
119 DEF_VAR(lock_st_addr
),
125 /* Use the symbolic register names that match the disassembler. */
126 static const char greg_names
[31][4] = {
127 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
128 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
129 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
130 "t10", "t11", "ra", "t12", "at", "gp", "sp"
132 static const char freg_names
[31][4] = {
133 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
134 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
135 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
136 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
138 #ifndef CONFIG_USER_ONLY
139 static const char shadow_names
[8][8] = {
140 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
141 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
145 static bool done_init
= 0;
153 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
154 tcg_ctx
.tcg_env
= cpu_env
;
156 for (i
= 0; i
< 31; i
++) {
157 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
158 offsetof(CPUAlphaState
, ir
[i
]),
162 for (i
= 0; i
< 31; i
++) {
163 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
164 offsetof(CPUAlphaState
, fir
[i
]),
168 #ifndef CONFIG_USER_ONLY
169 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
170 for (i
= 0; i
< 8; i
++) {
171 int r
= (i
== 7 ? 25 : i
+ 8);
172 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
173 offsetof(CPUAlphaState
,
179 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
180 const GlobalVar
*v
= &vars
[i
];
181 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
185 static TCGv
load_zero(DisasContext
*ctx
)
187 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
188 ctx
->zero
= tcg_const_i64(0);
193 static TCGv
dest_sink(DisasContext
*ctx
)
195 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
196 ctx
->sink
= tcg_temp_new();
201 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
203 if (likely(reg
< 31)) {
206 return load_zero(ctx
);
210 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
211 uint8_t lit
, bool islit
)
214 ctx
->lit
= tcg_const_i64(lit
);
216 } else if (likely(reg
< 31)) {
219 return load_zero(ctx
);
223 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
225 if (likely(reg
< 31)) {
228 return dest_sink(ctx
);
232 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
234 if (likely(reg
< 31)) {
237 return load_zero(ctx
);
241 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
243 if (likely(reg
< 31)) {
246 return dest_sink(ctx
);
250 static void gen_excp_1(int exception
, int error_code
)
254 tmp1
= tcg_const_i32(exception
);
255 tmp2
= tcg_const_i32(error_code
);
256 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
257 tcg_temp_free_i32(tmp2
);
258 tcg_temp_free_i32(tmp1
);
261 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
263 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
264 gen_excp_1(exception
, error_code
);
265 return EXIT_NORETURN
;
268 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
270 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
273 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
275 TCGv_i32 tmp32
= tcg_temp_new_i32();
276 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
277 gen_helper_memory_to_f(t0
, tmp32
);
278 tcg_temp_free_i32(tmp32
);
281 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
283 TCGv tmp
= tcg_temp_new();
284 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
285 gen_helper_memory_to_g(t0
, tmp
);
289 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
291 TCGv_i32 tmp32
= tcg_temp_new_i32();
292 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
293 gen_helper_memory_to_s(t0
, tmp32
);
294 tcg_temp_free_i32(tmp32
);
297 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
299 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
300 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
301 tcg_gen_mov_i64(cpu_lock_value
, t0
);
304 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
306 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
307 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
308 tcg_gen_mov_i64(cpu_lock_value
, t0
);
311 static inline void gen_load_mem(DisasContext
*ctx
,
312 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
314 int ra
, int rb
, int32_t disp16
, bool fp
,
319 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
320 prefetches, which we can treat as nops. No worries about
321 missed exceptions here. */
322 if (unlikely(ra
== 31)) {
326 tmp
= tcg_temp_new();
327 addr
= load_gpr(ctx
, rb
);
330 tcg_gen_addi_i64(tmp
, addr
, disp16
);
334 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
338 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
339 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
344 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
346 TCGv_i32 tmp32
= tcg_temp_new_i32();
347 gen_helper_f_to_memory(tmp32
, t0
);
348 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
349 tcg_temp_free_i32(tmp32
);
352 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
354 TCGv tmp
= tcg_temp_new();
355 gen_helper_g_to_memory(tmp
, t0
);
356 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
360 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
362 TCGv_i32 tmp32
= tcg_temp_new_i32();
363 gen_helper_s_to_memory(tmp32
, t0
);
364 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
365 tcg_temp_free_i32(tmp32
);
368 static inline void gen_store_mem(DisasContext
*ctx
,
369 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
371 int ra
, int rb
, int32_t disp16
, bool fp
,
376 tmp
= tcg_temp_new();
377 addr
= load_gpr(ctx
, rb
);
380 tcg_gen_addi_i64(tmp
, addr
, disp16
);
384 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
388 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
389 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
394 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
395 int32_t disp16
, int quad
)
400 /* ??? Don't bother storing anything. The user can't tell
401 the difference, since the zero register always reads zero. */
405 #if defined(CONFIG_USER_ONLY)
406 addr
= cpu_lock_st_addr
;
408 addr
= tcg_temp_local_new();
411 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
413 #if defined(CONFIG_USER_ONLY)
414 /* ??? This is handled via a complicated version of compare-and-swap
415 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
416 in TCG so that this isn't necessary. */
417 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
419 /* ??? In system mode we are never multi-threaded, so CAS can be
420 implemented via a non-atomic load-compare-store sequence. */
422 TCGLabel
*lab_fail
, *lab_done
;
425 lab_fail
= gen_new_label();
426 lab_done
= gen_new_label();
427 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
429 val
= tcg_temp_new();
430 tcg_gen_qemu_ld_i64(val
, addr
, ctx
->mem_idx
, quad
? MO_LEQ
: MO_LESL
);
431 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
433 tcg_gen_qemu_st_i64(ctx
->ir
[ra
], addr
, ctx
->mem_idx
,
434 quad
? MO_LEQ
: MO_LEUL
);
435 tcg_gen_movi_i64(ctx
->ir
[ra
], 1);
436 tcg_gen_br(lab_done
);
438 gen_set_label(lab_fail
);
439 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
441 gen_set_label(lab_done
);
442 tcg_gen_movi_i64(cpu_lock_addr
, -1);
450 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
452 #ifndef CONFIG_USER_ONLY
453 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
454 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== -1
455 && ((addr
>> 41) & 3) == 2);
461 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
463 /* Suppress goto_tb in the case of single-steping and IO. */
464 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
465 || ctx
->singlestep_enabled
|| singlestep
) {
468 #ifndef CONFIG_USER_ONLY
469 /* If the destination is in the superpage, the page perms can't change. */
470 if (in_superpage(ctx
, dest
)) {
473 /* Check for the dest on the same page as the start of the TB. */
474 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
480 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
482 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
485 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
488 /* Notice branch-to-next; used to initialize RA with the PC. */
491 } else if (use_goto_tb(ctx
, dest
)) {
493 tcg_gen_movi_i64(cpu_pc
, dest
);
494 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
497 tcg_gen_movi_i64(cpu_pc
, dest
);
498 return EXIT_PC_UPDATED
;
502 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
503 TCGv cmp
, int32_t disp
)
505 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
506 TCGLabel
*lab_true
= gen_new_label();
508 if (use_goto_tb(ctx
, dest
)) {
509 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
512 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
513 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
515 gen_set_label(lab_true
);
517 tcg_gen_movi_i64(cpu_pc
, dest
);
518 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
522 TCGv_i64 z
= tcg_const_i64(0);
523 TCGv_i64 d
= tcg_const_i64(dest
);
524 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
526 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
528 tcg_temp_free_i64(z
);
529 tcg_temp_free_i64(d
);
530 tcg_temp_free_i64(p
);
531 return EXIT_PC_UPDATED
;
535 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
536 int32_t disp
, int mask
)
541 cmp_tmp
= tcg_temp_new();
542 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
544 cmp_tmp
= load_gpr(ctx
, ra
);
547 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
550 /* Fold -0.0 for comparison with COND. */
552 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
554 uint64_t mzero
= 1ull << 63;
559 /* For <= or >, the -0.0 value directly compares the way we want. */
560 tcg_gen_mov_i64(dest
, src
);
565 /* For == or !=, we can simply mask off the sign bit and compare. */
566 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
571 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
572 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
573 tcg_gen_neg_i64(dest
, dest
);
574 tcg_gen_and_i64(dest
, dest
, src
);
582 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
585 TCGv cmp_tmp
= tcg_temp_new();
586 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
587 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
590 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
595 vb
= load_fpr(ctx
, rb
);
597 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
599 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
604 #define QUAL_RM_N 0x080 /* Round mode nearest even */
605 #define QUAL_RM_C 0x000 /* Round mode chopped */
606 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
607 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
608 #define QUAL_RM_MASK 0x0c0
610 #define QUAL_U 0x100 /* Underflow enable (fp output) */
611 #define QUAL_V 0x100 /* Overflow enable (int output) */
612 #define QUAL_S 0x400 /* Software completion enable */
613 #define QUAL_I 0x200 /* Inexact detection enable */
615 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
619 fn11
&= QUAL_RM_MASK
;
620 if (fn11
== ctx
->tb_rm
) {
625 tmp
= tcg_temp_new_i32();
628 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
631 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
634 tcg_gen_movi_i32(tmp
, float_round_down
);
637 tcg_gen_ld8u_i32(tmp
, cpu_env
,
638 offsetof(CPUAlphaState
, fpcr_dyn_round
));
642 #if defined(CONFIG_SOFTFLOAT_INLINE)
643 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
644 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
645 sets the one field. */
646 tcg_gen_st8_i32(tmp
, cpu_env
,
647 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
649 gen_helper_setroundmode(tmp
);
652 tcg_temp_free_i32(tmp
);
655 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
660 if (fn11
== ctx
->tb_ftz
) {
665 tmp
= tcg_temp_new_i32();
667 /* Underflow is enabled, use the FPCR setting. */
668 tcg_gen_ld8u_i32(tmp
, cpu_env
,
669 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
671 /* Underflow is disabled, force flush-to-zero. */
672 tcg_gen_movi_i32(tmp
, 1);
675 #if defined(CONFIG_SOFTFLOAT_INLINE)
676 tcg_gen_st8_i32(tmp
, cpu_env
,
677 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
679 gen_helper_setflushzero(tmp
);
682 tcg_temp_free_i32(tmp
);
685 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
689 if (unlikely(reg
== 31)) {
690 val
= load_zero(ctx
);
693 if ((fn11
& QUAL_S
) == 0) {
695 gen_helper_ieee_input_cmp(cpu_env
, val
);
697 gen_helper_ieee_input(cpu_env
, val
);
700 #ifndef CONFIG_USER_ONLY
701 /* In system mode, raise exceptions for denormals like real
702 hardware. In user mode, proceed as if the OS completion
703 handler is handling the denormal as per spec. */
704 gen_helper_ieee_input_s(cpu_env
, val
);
711 static void gen_fp_exc_raise(int rc
, int fn11
)
713 /* ??? We ought to be able to do something with imprecise exceptions.
714 E.g. notice we're still in the trap shadow of something within the
715 TB and do not generate the code to signal the exception; end the TB
716 when an exception is forced to arrive, either by consumption of a
717 register value or TRAPB or EXCB. */
721 if (!(fn11
& QUAL_U
)) {
722 /* Note that QUAL_U == QUAL_V, so ignore either. */
723 ignore
|= FPCR_UNF
| FPCR_IOV
;
725 if (!(fn11
& QUAL_I
)) {
728 ign
= tcg_const_i32(ignore
);
730 /* ??? Pass in the regno of the destination so that the helper can
731 set EXC_MASK, which contains a bitmask of destination registers
732 that have caused arithmetic traps. A simple userspace emulation
733 does not require this. We do need it for a guest kernel's entArith,
734 or if we were to do something clever with imprecise exceptions. */
735 reg
= tcg_const_i32(rc
+ 32);
737 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
739 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
742 tcg_temp_free_i32(reg
);
743 tcg_temp_free_i32(ign
);
746 static void gen_cvtlq(TCGv vc
, TCGv vb
)
748 TCGv tmp
= tcg_temp_new();
750 /* The arithmetic right shift here, plus the sign-extended mask below
751 yields a sign-extended result without an explicit ext32s_i64. */
752 tcg_gen_sari_i64(tmp
, vb
, 32);
753 tcg_gen_shri_i64(vc
, vb
, 29);
754 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
755 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
756 tcg_gen_or_i64(vc
, vc
, tmp
);
761 static void gen_ieee_arith2(DisasContext
*ctx
,
762 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
763 int rb
, int rc
, int fn11
)
767 gen_qual_roundmode(ctx
, fn11
);
768 gen_qual_flushzero(ctx
, fn11
);
770 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
771 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
773 gen_fp_exc_raise(rc
, fn11
);
776 #define IEEE_ARITH2(name) \
777 static inline void glue(gen_, name)(DisasContext *ctx, \
778 int rb, int rc, int fn11) \
780 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
787 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
791 /* No need to set flushzero, since we have an integer output. */
792 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
793 vc
= dest_fpr(ctx
, rc
);
795 /* Almost all integer conversions use cropped rounding;
796 special case that. */
797 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
798 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
800 gen_qual_roundmode(ctx
, fn11
);
801 gen_helper_cvttq(vc
, cpu_env
, vb
);
803 gen_fp_exc_raise(rc
, fn11
);
806 static void gen_ieee_intcvt(DisasContext
*ctx
,
807 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
808 int rb
, int rc
, int fn11
)
812 gen_qual_roundmode(ctx
, fn11
);
813 vb
= load_fpr(ctx
, rb
);
814 vc
= dest_fpr(ctx
, rc
);
816 /* The only exception that can be raised by integer conversion
817 is inexact. Thus we only need to worry about exceptions when
818 inexact handling is requested. */
820 helper(vc
, cpu_env
, vb
);
821 gen_fp_exc_raise(rc
, fn11
);
823 helper(vc
, cpu_env
, vb
);
827 #define IEEE_INTCVT(name) \
828 static inline void glue(gen_, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
831 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
836 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
838 TCGv vmask
= tcg_const_i64(mask
);
839 TCGv tmp
= tcg_temp_new_i64();
842 tcg_gen_andc_i64(tmp
, vmask
, va
);
844 tcg_gen_and_i64(tmp
, va
, vmask
);
847 tcg_gen_andc_i64(vc
, vb
, vmask
);
848 tcg_gen_or_i64(vc
, vc
, tmp
);
850 tcg_temp_free(vmask
);
854 static void gen_ieee_arith3(DisasContext
*ctx
,
855 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
856 int ra
, int rb
, int rc
, int fn11
)
860 gen_qual_roundmode(ctx
, fn11
);
861 gen_qual_flushzero(ctx
, fn11
);
863 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
864 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
865 vc
= dest_fpr(ctx
, rc
);
866 helper(vc
, cpu_env
, va
, vb
);
868 gen_fp_exc_raise(rc
, fn11
);
871 #define IEEE_ARITH3(name) \
872 static inline void glue(gen_, name)(DisasContext *ctx, \
873 int ra, int rb, int rc, int fn11) \
875 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
886 static void gen_ieee_compare(DisasContext
*ctx
,
887 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
888 int ra
, int rb
, int rc
, int fn11
)
892 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
893 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
894 vc
= dest_fpr(ctx
, rc
);
895 helper(vc
, cpu_env
, va
, vb
);
897 gen_fp_exc_raise(rc
, fn11
);
900 #define IEEE_CMP3(name) \
901 static inline void glue(gen_, name)(DisasContext *ctx, \
902 int ra, int rb, int rc, int fn11) \
904 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
911 static inline uint64_t zapnot_mask(uint8_t lit
)
916 for (i
= 0; i
< 8; ++i
) {
917 if ((lit
>> i
) & 1) {
918 mask
|= 0xffull
<< (i
* 8);
924 /* Implement zapnot with an immediate operand, which expands to some
925 form of immediate AND. This is a basic building block in the
926 definition of many of the other byte manipulation instructions. */
927 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
931 tcg_gen_movi_i64(dest
, 0);
934 tcg_gen_ext8u_i64(dest
, src
);
937 tcg_gen_ext16u_i64(dest
, src
);
940 tcg_gen_ext32u_i64(dest
, src
);
943 tcg_gen_mov_i64(dest
, src
);
946 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
951 /* EXTWH, EXTLH, EXTQH */
952 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
953 uint8_t lit
, uint8_t byte_mask
)
956 tcg_gen_shli_i64(vc
, va
, (64 - lit
* 8) & 0x3f);
958 TCGv tmp
= tcg_temp_new();
959 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
960 tcg_gen_neg_i64(tmp
, tmp
);
961 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
962 tcg_gen_shl_i64(vc
, va
, tmp
);
965 gen_zapnoti(vc
, vc
, byte_mask
);
968 /* EXTBL, EXTWL, EXTLL, EXTQL */
969 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
970 uint8_t lit
, uint8_t byte_mask
)
973 tcg_gen_shri_i64(vc
, va
, (lit
& 7) * 8);
975 TCGv tmp
= tcg_temp_new();
976 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
977 tcg_gen_shli_i64(tmp
, tmp
, 3);
978 tcg_gen_shr_i64(vc
, va
, tmp
);
981 gen_zapnoti(vc
, vc
, byte_mask
);
984 /* INSWH, INSLH, INSQH */
985 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
986 uint8_t lit
, uint8_t byte_mask
)
988 TCGv tmp
= tcg_temp_new();
990 /* The instruction description has us left-shift the byte mask and extract
991 bits <15:8> and apply that zap at the end. This is equivalent to simply
992 performing the zap first and shifting afterward. */
993 gen_zapnoti(tmp
, va
, byte_mask
);
997 if (unlikely(lit
== 0)) {
998 tcg_gen_movi_i64(vc
, 0);
1000 tcg_gen_shri_i64(vc
, tmp
, 64 - lit
* 8);
1003 TCGv shift
= tcg_temp_new();
1005 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1006 portably by splitting the shift into two parts: shift_count-1 and 1.
1007 Arrange for the -1 by using ones-complement instead of
1008 twos-complement in the negation: ~(B * 8) & 63. */
1010 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1011 tcg_gen_not_i64(shift
, shift
);
1012 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1014 tcg_gen_shr_i64(vc
, tmp
, shift
);
1015 tcg_gen_shri_i64(vc
, vc
, 1);
1016 tcg_temp_free(shift
);
1021 /* INSBL, INSWL, INSLL, INSQL */
1022 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1023 uint8_t lit
, uint8_t byte_mask
)
1025 TCGv tmp
= tcg_temp_new();
1027 /* The instruction description has us left-shift the byte mask
1028 the same number of byte slots as the data and apply the zap
1029 at the end. This is equivalent to simply performing the zap
1030 first and shifting afterward. */
1031 gen_zapnoti(tmp
, va
, byte_mask
);
1034 tcg_gen_shli_i64(vc
, tmp
, (lit
& 7) * 8);
1036 TCGv shift
= tcg_temp_new();
1037 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1038 tcg_gen_shli_i64(shift
, shift
, 3);
1039 tcg_gen_shl_i64(vc
, tmp
, shift
);
1040 tcg_temp_free(shift
);
1045 /* MSKWH, MSKLH, MSKQH */
1046 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1047 uint8_t lit
, uint8_t byte_mask
)
1050 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1052 TCGv shift
= tcg_temp_new();
1053 TCGv mask
= tcg_temp_new();
1055 /* The instruction description is as above, where the byte_mask
1056 is shifted left, and then we extract bits <15:8>. This can be
1057 emulated with a right-shift on the expanded byte mask. This
1058 requires extra care because for an input <2:0> == 0 we need a
1059 shift of 64 bits in order to generate a zero. This is done by
1060 splitting the shift into two parts, the variable shift - 1
1061 followed by a constant 1 shift. The code we expand below is
1062 equivalent to ~(B * 8) & 63. */
1064 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1065 tcg_gen_not_i64(shift
, shift
);
1066 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1067 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1068 tcg_gen_shr_i64(mask
, mask
, shift
);
1069 tcg_gen_shri_i64(mask
, mask
, 1);
1071 tcg_gen_andc_i64(vc
, va
, mask
);
1073 tcg_temp_free(mask
);
1074 tcg_temp_free(shift
);
1078 /* MSKBL, MSKWL, MSKLL, MSKQL */
1079 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1080 uint8_t lit
, uint8_t byte_mask
)
1083 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1085 TCGv shift
= tcg_temp_new();
1086 TCGv mask
= tcg_temp_new();
1088 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1089 tcg_gen_shli_i64(shift
, shift
, 3);
1090 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1091 tcg_gen_shl_i64(mask
, mask
, shift
);
1093 tcg_gen_andc_i64(vc
, va
, mask
);
1095 tcg_temp_free(mask
);
1096 tcg_temp_free(shift
);
1100 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1105 tcg_gen_ld8u_i64(ctx
->ir
[ra
], cpu_env
,
1106 offsetof(CPUAlphaState
, intr_flag
));
1109 tmp
= tcg_const_i32(set
);
1110 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1111 tcg_temp_free_i32(tmp
);
1114 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1116 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1117 to internal cpu registers. */
1119 /* Unprivileged PAL call */
1120 if (palcode
>= 0x80 && palcode
< 0xC0) {
1124 /* No-op inside QEMU. */
1128 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1129 offsetof(CPUAlphaState
, unique
));
1133 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1134 offsetof(CPUAlphaState
, unique
));
1143 #ifndef CONFIG_USER_ONLY
1144 /* Privileged PAL code */
1145 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1149 /* No-op inside QEMU. */
1153 /* No-op inside QEMU. */
1157 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1158 offsetof(CPUAlphaState
, vptptr
));
1162 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1163 offsetof(CPUAlphaState
, sysval
));
1167 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1168 offsetof(CPUAlphaState
, sysval
));
1175 /* Note that we already know we're in kernel mode, so we know
1176 that PS only contains the 3 IPL bits. */
1177 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1178 offsetof(CPUAlphaState
, ps
));
1180 /* But make sure and store only the 3 IPL bits from the user. */
1181 tmp
= tcg_temp_new();
1182 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1183 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1190 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1191 offsetof(CPUAlphaState
, ps
));
1195 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1196 offsetof(CPUAlphaState
, usp
));
1200 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1201 offsetof(CPUAlphaState
, usp
));
1205 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1206 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1216 return gen_invalid(ctx
);
1219 #ifdef CONFIG_USER_ONLY
1220 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1223 TCGv tmp
= tcg_temp_new();
1224 uint64_t exc_addr
= ctx
->pc
;
1225 uint64_t entry
= ctx
->palbr
;
1227 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
1230 tcg_gen_movi_i64(tmp
, 1);
1231 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
1234 tcg_gen_movi_i64(tmp
, exc_addr
);
1235 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1238 entry
+= (palcode
& 0x80
1239 ? 0x2000 + (palcode
- 0x80) * 64
1240 : 0x1000 + palcode
* 64);
1242 /* Since the destination is running in PALmode, we don't really
1243 need the page permissions check. We'll see the existence of
1244 the page when we create the TB, and we'll flush all TBs if
1245 we change the PAL base register. */
1246 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1248 tcg_gen_movi_i64(cpu_pc
, entry
);
1249 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1250 return EXIT_GOTO_TB
;
1252 tcg_gen_movi_i64(cpu_pc
, entry
);
1253 return EXIT_PC_UPDATED
;
1259 #ifndef CONFIG_USER_ONLY
1261 #define PR_BYTE 0x100000
1262 #define PR_LONG 0x200000
1264 static int cpu_pr_data(int pr
)
1267 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1268 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1269 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1270 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1271 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1272 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1273 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1274 case 7: return offsetof(CPUAlphaState
, palbr
);
1275 case 8: return offsetof(CPUAlphaState
, ptbr
);
1276 case 9: return offsetof(CPUAlphaState
, vptptr
);
1277 case 10: return offsetof(CPUAlphaState
, unique
);
1278 case 11: return offsetof(CPUAlphaState
, sysval
);
1279 case 12: return offsetof(CPUAlphaState
, usp
);
1282 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1285 return offsetof(CPUAlphaState
, alarm_expire
);
1290 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1292 void (*helper
)(TCGv
);
1297 /* Accessing the "non-shadow" general registers. */
1298 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1299 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1302 case 250: /* WALLTIME */
1303 helper
= gen_helper_get_walltime
;
1305 case 249: /* VMTIME */
1306 helper
= gen_helper_get_vmtime
;
1312 return EXIT_PC_STALE
;
1319 /* The basic registers are data only, and unknown registers
1320 are read-zero, write-ignore. */
1321 data
= cpu_pr_data(regno
);
1323 tcg_gen_movi_i64(va
, 0);
1324 } else if (data
& PR_BYTE
) {
1325 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1326 } else if (data
& PR_LONG
) {
1327 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1329 tcg_gen_ld_i64(va
, cpu_env
, data
);
1337 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1345 gen_helper_tbia(cpu_env
);
1350 gen_helper_tbis(cpu_env
, vb
);
1355 tmp
= tcg_const_i64(1);
1356 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1357 offsetof(CPUState
, halted
));
1358 return gen_excp(ctx
, EXCP_HLT
, 0);
1362 gen_helper_halt(vb
);
1363 return EXIT_PC_STALE
;
1367 gen_helper_set_alarm(cpu_env
, vb
);
1372 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1373 /* Changing the PAL base register implies un-chaining all of the TBs
1374 that ended with a CALL_PAL. Since the base register usually only
1375 changes during boot, flushing everything works well. */
1376 gen_helper_tb_flush(cpu_env
);
1377 return EXIT_PC_STALE
;
1380 /* Accessing the "non-shadow" general registers. */
1381 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1382 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1386 /* The basic registers are data only, and unknown registers
1387 are read-zero, write-ignore. */
1388 data
= cpu_pr_data(regno
);
1390 if (data
& PR_BYTE
) {
1391 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1392 } else if (data
& PR_LONG
) {
1393 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1395 tcg_gen_st_i64(vb
, cpu_env
, data
);
1403 #endif /* !USER_ONLY*/
1405 #define REQUIRE_NO_LIT \
1412 #define REQUIRE_TB_FLAG(FLAG) \
1414 if ((ctx->tb->flags & (FLAG)) == 0) { \
1419 #define REQUIRE_REG_31(WHICH) \
1421 if (WHICH != 31) { \
1426 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1428 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1430 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1431 bool islit
, real_islit
;
1432 TCGv va
, vb
, vc
, tmp
, tmp2
;
1436 /* Decode all instruction fields */
1437 opc
= extract32(insn
, 26, 6);
1438 ra
= extract32(insn
, 21, 5);
1439 rb
= extract32(insn
, 16, 5);
1440 rc
= extract32(insn
, 0, 5);
1441 real_islit
= islit
= extract32(insn
, 12, 1);
1442 lit
= extract32(insn
, 13, 8);
1444 disp21
= sextract32(insn
, 0, 21);
1445 disp16
= sextract32(insn
, 0, 16);
1446 disp12
= sextract32(insn
, 0, 12);
1448 fn11
= extract32(insn
, 5, 11);
1449 fpfn
= extract32(insn
, 5, 6);
1450 fn7
= extract32(insn
, 5, 7);
1452 if (rb
== 31 && !islit
) {
1461 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1487 disp16
= (uint32_t)disp16
<< 16;
1491 va
= dest_gpr(ctx
, ra
);
1492 /* It's worth special-casing immediate loads. */
1494 tcg_gen_movi_i64(va
, disp16
);
1496 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1502 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1503 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1507 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1511 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1512 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1516 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1517 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1521 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1522 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1526 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1530 vc
= dest_gpr(ctx
, rc
);
1531 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1535 /* Special case ADDL as SEXTL. */
1536 tcg_gen_ext32s_i64(vc
, vb
);
1540 /* Special case SUBQ as NEGQ. */
1541 tcg_gen_neg_i64(vc
, vb
);
1546 va
= load_gpr(ctx
, ra
);
1550 tcg_gen_add_i64(vc
, va
, vb
);
1551 tcg_gen_ext32s_i64(vc
, vc
);
1555 tmp
= tcg_temp_new();
1556 tcg_gen_shli_i64(tmp
, va
, 2);
1557 tcg_gen_add_i64(tmp
, tmp
, vb
);
1558 tcg_gen_ext32s_i64(vc
, tmp
);
1563 tcg_gen_sub_i64(vc
, va
, vb
);
1564 tcg_gen_ext32s_i64(vc
, vc
);
1568 tmp
= tcg_temp_new();
1569 tcg_gen_shli_i64(tmp
, va
, 2);
1570 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1571 tcg_gen_ext32s_i64(vc
, tmp
);
1577 /* Special case 0 >= X as X == 0. */
1578 gen_helper_cmpbe0(vc
, vb
);
1580 gen_helper_cmpbge(vc
, va
, vb
);
1585 tmp
= tcg_temp_new();
1586 tcg_gen_shli_i64(tmp
, va
, 3);
1587 tcg_gen_add_i64(tmp
, tmp
, vb
);
1588 tcg_gen_ext32s_i64(vc
, tmp
);
1593 tmp
= tcg_temp_new();
1594 tcg_gen_shli_i64(tmp
, va
, 3);
1595 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1596 tcg_gen_ext32s_i64(vc
, tmp
);
1601 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1605 tcg_gen_add_i64(vc
, va
, vb
);
1609 tmp
= tcg_temp_new();
1610 tcg_gen_shli_i64(tmp
, va
, 2);
1611 tcg_gen_add_i64(vc
, tmp
, vb
);
1616 tcg_gen_sub_i64(vc
, va
, vb
);
1620 tmp
= tcg_temp_new();
1621 tcg_gen_shli_i64(tmp
, va
, 2);
1622 tcg_gen_sub_i64(vc
, tmp
, vb
);
1627 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1631 tmp
= tcg_temp_new();
1632 tcg_gen_shli_i64(tmp
, va
, 3);
1633 tcg_gen_add_i64(vc
, tmp
, vb
);
1638 tmp
= tcg_temp_new();
1639 tcg_gen_shli_i64(tmp
, va
, 3);
1640 tcg_gen_sub_i64(vc
, tmp
, vb
);
1645 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1649 tmp
= tcg_temp_new();
1650 tcg_gen_ext32s_i64(tmp
, va
);
1651 tcg_gen_ext32s_i64(vc
, vb
);
1652 tcg_gen_add_i64(tmp
, tmp
, vc
);
1653 tcg_gen_ext32s_i64(vc
, tmp
);
1654 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1659 tmp
= tcg_temp_new();
1660 tcg_gen_ext32s_i64(tmp
, va
);
1661 tcg_gen_ext32s_i64(vc
, vb
);
1662 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1663 tcg_gen_ext32s_i64(vc
, tmp
);
1664 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1669 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1673 tmp
= tcg_temp_new();
1674 tmp2
= tcg_temp_new();
1675 tcg_gen_eqv_i64(tmp
, va
, vb
);
1676 tcg_gen_mov_i64(tmp2
, va
);
1677 tcg_gen_add_i64(vc
, va
, vb
);
1678 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1679 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1680 tcg_gen_shri_i64(tmp
, tmp
, 63);
1681 tcg_gen_movi_i64(tmp2
, 0);
1682 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1684 tcg_temp_free(tmp2
);
1688 tmp
= tcg_temp_new();
1689 tmp2
= tcg_temp_new();
1690 tcg_gen_xor_i64(tmp
, va
, vb
);
1691 tcg_gen_mov_i64(tmp2
, va
);
1692 tcg_gen_sub_i64(vc
, va
, vb
);
1693 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1694 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1695 tcg_gen_shri_i64(tmp
, tmp
, 63);
1696 tcg_gen_movi_i64(tmp2
, 0);
1697 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1699 tcg_temp_free(tmp2
);
1703 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1713 /* Special case BIS as NOP. */
1717 /* Special case BIS as MOV. */
1718 vc
= dest_gpr(ctx
, rc
);
1720 tcg_gen_movi_i64(vc
, lit
);
1722 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1728 vc
= dest_gpr(ctx
, rc
);
1729 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1731 if (fn7
== 0x28 && ra
== 31) {
1732 /* Special case ORNOT as NOT. */
1733 tcg_gen_not_i64(vc
, vb
);
1737 va
= load_gpr(ctx
, ra
);
1741 tcg_gen_and_i64(vc
, va
, vb
);
1745 tcg_gen_andc_i64(vc
, va
, vb
);
1749 tmp
= tcg_temp_new();
1750 tcg_gen_andi_i64(tmp
, va
, 1);
1751 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1752 vb
, load_gpr(ctx
, rc
));
1757 tmp
= tcg_temp_new();
1758 tcg_gen_andi_i64(tmp
, va
, 1);
1759 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1760 vb
, load_gpr(ctx
, rc
));
1765 tcg_gen_or_i64(vc
, va
, vb
);
1769 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1770 vb
, load_gpr(ctx
, rc
));
1774 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1775 vb
, load_gpr(ctx
, rc
));
1779 tcg_gen_orc_i64(vc
, va
, vb
);
1783 tcg_gen_xor_i64(vc
, va
, vb
);
1787 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1788 vb
, load_gpr(ctx
, rc
));
1792 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1793 vb
, load_gpr(ctx
, rc
));
1797 tcg_gen_eqv_i64(vc
, va
, vb
);
1803 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1804 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1809 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1810 vb
, load_gpr(ctx
, rc
));
1814 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1815 vb
, load_gpr(ctx
, rc
));
1820 tcg_gen_movi_i64(vc
, ctx
->implver
);
1828 vc
= dest_gpr(ctx
, rc
);
1829 va
= load_gpr(ctx
, ra
);
1833 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1837 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1841 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1845 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1849 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1853 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1857 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1861 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1865 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1870 gen_zapnoti(vc
, va
, ~lit
);
1872 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1878 gen_zapnoti(vc
, va
, lit
);
1880 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1885 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1890 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1892 tmp
= tcg_temp_new();
1893 vb
= load_gpr(ctx
, rb
);
1894 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1895 tcg_gen_shr_i64(vc
, va
, tmp
);
1901 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1906 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1908 tmp
= tcg_temp_new();
1909 vb
= load_gpr(ctx
, rb
);
1910 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1911 tcg_gen_shl_i64(vc
, va
, tmp
);
1917 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1922 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1924 tmp
= tcg_temp_new();
1925 vb
= load_gpr(ctx
, rb
);
1926 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1927 tcg_gen_sar_i64(vc
, va
, tmp
);
1933 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1937 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1941 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1945 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1949 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1953 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1957 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1961 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1965 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1973 vc
= dest_gpr(ctx
, rc
);
1974 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1975 va
= load_gpr(ctx
, ra
);
1979 tcg_gen_mul_i64(vc
, va
, vb
);
1980 tcg_gen_ext32s_i64(vc
, vc
);
1984 tcg_gen_mul_i64(vc
, va
, vb
);
1988 tmp
= tcg_temp_new();
1989 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1994 tmp
= tcg_temp_new();
1995 tcg_gen_ext32s_i64(tmp
, va
);
1996 tcg_gen_ext32s_i64(vc
, vb
);
1997 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1998 tcg_gen_ext32s_i64(vc
, tmp
);
1999 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
2004 tmp
= tcg_temp_new();
2005 tmp2
= tcg_temp_new();
2006 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
2007 tcg_gen_sari_i64(tmp2
, vc
, 63);
2008 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2010 tcg_temp_free(tmp2
);
2018 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2019 vc
= dest_fpr(ctx
, rc
);
2020 switch (fpfn
) { /* fn11 & 0x3F */
2024 t32
= tcg_temp_new_i32();
2025 va
= load_gpr(ctx
, ra
);
2026 tcg_gen_extrl_i64_i32(t32
, va
);
2027 gen_helper_memory_to_s(vc
, t32
);
2028 tcg_temp_free_i32(t32
);
2033 vb
= load_fpr(ctx
, rb
);
2034 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2039 gen_sqrts(ctx
, rb
, rc
, fn11
);
2044 t32
= tcg_temp_new_i32();
2045 va
= load_gpr(ctx
, ra
);
2046 tcg_gen_extrl_i64_i32(t32
, va
);
2047 gen_helper_memory_to_f(vc
, t32
);
2048 tcg_temp_free_i32(t32
);
2053 va
= load_gpr(ctx
, ra
);
2054 tcg_gen_mov_i64(vc
, va
);
2059 vb
= load_fpr(ctx
, rb
);
2060 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2065 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2073 /* VAX floating point */
2074 /* XXX: rounding mode and trap are ignored (!) */
2075 vc
= dest_fpr(ctx
, rc
);
2076 vb
= load_fpr(ctx
, rb
);
2077 va
= load_fpr(ctx
, ra
);
2078 switch (fpfn
) { /* fn11 & 0x3F */
2081 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2085 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2089 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2093 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2101 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2105 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2109 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2113 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2117 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2121 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2125 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2130 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2139 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2144 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2149 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2157 /* IEEE floating-point */
2158 switch (fpfn
) { /* fn11 & 0x3F */
2161 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2165 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2169 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2173 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2177 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2181 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2185 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2189 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2193 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2197 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2201 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2205 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2209 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2211 gen_cvtst(ctx
, rb
, rc
, fn11
);
2214 gen_cvtts(ctx
, rb
, rc
, fn11
);
2220 gen_cvttq(ctx
, rb
, rc
, fn11
);
2225 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2230 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2242 vc
= dest_fpr(ctx
, rc
);
2243 vb
= load_fpr(ctx
, rb
);
2249 /* Special case CPYS as FNOP. */
2251 vc
= dest_fpr(ctx
, rc
);
2252 va
= load_fpr(ctx
, ra
);
2254 /* Special case CPYS as FMOV. */
2255 tcg_gen_mov_i64(vc
, va
);
2257 vb
= load_fpr(ctx
, rb
);
2258 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2264 vc
= dest_fpr(ctx
, rc
);
2265 vb
= load_fpr(ctx
, rb
);
2266 va
= load_fpr(ctx
, ra
);
2267 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2271 vc
= dest_fpr(ctx
, rc
);
2272 vb
= load_fpr(ctx
, rb
);
2273 va
= load_fpr(ctx
, ra
);
2274 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2278 va
= load_fpr(ctx
, ra
);
2279 gen_helper_store_fpcr(cpu_env
, va
);
2280 if (ctx
->tb_rm
== QUAL_RM_D
) {
2281 /* Re-do the copy of the rounding mode to fp_status
2282 the next time we use dynamic rounding. */
2288 va
= dest_fpr(ctx
, ra
);
2289 gen_helper_load_fpcr(va
, cpu_env
);
2293 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2297 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2301 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2305 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2309 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2313 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2315 case 0x030: /* CVTQL */
2316 case 0x130: /* CVTQL/V */
2317 case 0x530: /* CVTQL/SV */
2319 vc
= dest_fpr(ctx
, rc
);
2320 vb
= load_fpr(ctx
, rb
);
2321 gen_helper_cvtql(vc
, cpu_env
, vb
);
2322 gen_fp_exc_raise(rc
, fn11
);
2330 switch ((uint16_t)disp16
) {
2341 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2345 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2357 va
= dest_gpr(ctx
, ra
);
2358 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2360 gen_helper_load_pcc(va
, cpu_env
);
2362 ret
= EXIT_PC_STALE
;
2364 gen_helper_load_pcc(va
, cpu_env
);
2392 /* HW_MFPR (PALcode) */
2393 #ifndef CONFIG_USER_ONLY
2394 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2395 va
= dest_gpr(ctx
, ra
);
2396 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2403 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2404 prediction stack action, which of course we don't implement. */
2405 vb
= load_gpr(ctx
, rb
);
2406 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2408 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2410 ret
= EXIT_PC_UPDATED
;
2414 /* HW_LD (PALcode) */
2415 #ifndef CONFIG_USER_ONLY
2416 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2418 TCGv addr
= tcg_temp_new();
2419 vb
= load_gpr(ctx
, rb
);
2420 va
= dest_gpr(ctx
, ra
);
2422 tcg_gen_addi_i64(addr
, vb
, disp12
);
2423 switch ((insn
>> 12) & 0xF) {
2425 /* Longword physical access (hw_ldl/p) */
2426 gen_helper_ldl_phys(va
, cpu_env
, addr
);
2429 /* Quadword physical access (hw_ldq/p) */
2430 gen_helper_ldq_phys(va
, cpu_env
, addr
);
2433 /* Longword physical access with lock (hw_ldl_l/p) */
2434 gen_helper_ldl_l_phys(va
, cpu_env
, addr
);
2437 /* Quadword physical access with lock (hw_ldq_l/p) */
2438 gen_helper_ldq_l_phys(va
, cpu_env
, addr
);
2441 /* Longword virtual PTE fetch (hw_ldl/v) */
2444 /* Quadword virtual PTE fetch (hw_ldq/v) */
2454 /* Longword virtual access (hw_ldl) */
2457 /* Quadword virtual access (hw_ldq) */
2460 /* Longword virtual access with protection check (hw_ldl/w) */
2461 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2464 /* Quadword virtual access with protection check (hw_ldq/w) */
2465 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2468 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2471 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2474 /* Longword virtual access with alternate access mode and
2475 protection checks (hw_ldl/wa) */
2476 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2479 /* Quadword virtual access with alternate access mode and
2480 protection checks (hw_ldq/wa) */
2481 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2484 tcg_temp_free(addr
);
2492 vc
= dest_gpr(ctx
, rc
);
2495 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2497 va
= load_fpr(ctx
, ra
);
2498 tcg_gen_mov_i64(vc
, va
);
2500 } else if (fn7
== 0x78) {
2502 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2504 t32
= tcg_temp_new_i32();
2505 va
= load_fpr(ctx
, ra
);
2506 gen_helper_s_to_memory(t32
, va
);
2507 tcg_gen_ext_i32_i64(vc
, t32
);
2508 tcg_temp_free_i32(t32
);
2512 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2516 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2518 tcg_gen_ext8s_i64(vc
, vb
);
2522 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2524 tcg_gen_ext16s_i64(vc
, vb
);
2528 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2531 gen_helper_ctpop(vc
, vb
);
2535 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2537 va
= load_gpr(ctx
, ra
);
2538 gen_helper_perr(vc
, va
, vb
);
2542 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2545 gen_helper_ctlz(vc
, vb
);
2549 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2552 gen_helper_cttz(vc
, vb
);
2556 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2559 gen_helper_unpkbw(vc
, vb
);
2563 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2566 gen_helper_unpkbl(vc
, vb
);
2570 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2573 gen_helper_pkwb(vc
, vb
);
2577 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2580 gen_helper_pklb(vc
, vb
);
2584 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2585 va
= load_gpr(ctx
, ra
);
2586 gen_helper_minsb8(vc
, va
, vb
);
2590 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2591 va
= load_gpr(ctx
, ra
);
2592 gen_helper_minsw4(vc
, va
, vb
);
2596 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2597 va
= load_gpr(ctx
, ra
);
2598 gen_helper_minub8(vc
, va
, vb
);
2602 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2603 va
= load_gpr(ctx
, ra
);
2604 gen_helper_minuw4(vc
, va
, vb
);
2608 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2609 va
= load_gpr(ctx
, ra
);
2610 gen_helper_maxub8(vc
, va
, vb
);
2614 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2615 va
= load_gpr(ctx
, ra
);
2616 gen_helper_maxuw4(vc
, va
, vb
);
2620 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2621 va
= load_gpr(ctx
, ra
);
2622 gen_helper_maxsb8(vc
, va
, vb
);
2626 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2627 va
= load_gpr(ctx
, ra
);
2628 gen_helper_maxsw4(vc
, va
, vb
);
2636 /* HW_MTPR (PALcode) */
2637 #ifndef CONFIG_USER_ONLY
2638 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2639 vb
= load_gpr(ctx
, rb
);
2640 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2647 /* HW_RET (PALcode) */
2648 #ifndef CONFIG_USER_ONLY
2649 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2651 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2652 address from EXC_ADDR. This turns out to be useful for our
2653 emulation PALcode, so continue to accept it. */
2654 ctx
->lit
= vb
= tcg_temp_new();
2655 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2657 vb
= load_gpr(ctx
, rb
);
2659 tmp
= tcg_temp_new();
2660 tcg_gen_movi_i64(tmp
, 0);
2661 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
2662 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2663 tcg_gen_andi_i64(tmp
, vb
, 1);
2664 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
2665 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2666 ret
= EXIT_PC_UPDATED
;
2673 /* HW_ST (PALcode) */
2674 #ifndef CONFIG_USER_ONLY
2675 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2677 TCGv addr
= tcg_temp_new();
2678 va
= load_gpr(ctx
, ra
);
2679 vb
= load_gpr(ctx
, rb
);
2681 tcg_gen_addi_i64(addr
, vb
, disp12
);
2682 switch ((insn
>> 12) & 0xF) {
2684 /* Longword physical access */
2685 gen_helper_stl_phys(cpu_env
, addr
, va
);
2688 /* Quadword physical access */
2689 gen_helper_stq_phys(cpu_env
, addr
, va
);
2692 /* Longword physical access with lock */
2693 gen_helper_stl_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2696 /* Quadword physical access with lock */
2697 gen_helper_stq_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2700 /* Longword virtual access */
2703 /* Quadword virtual access */
2724 /* Longword virtual access with alternate access mode */
2727 /* Quadword virtual access with alternate access mode */
2736 tcg_temp_free(addr
);
2744 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2748 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2752 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2756 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2760 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2764 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2768 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2772 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2776 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2780 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2784 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2788 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2792 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2796 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2800 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
2804 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
2808 ret
= gen_bdirect(ctx
, ra
, disp21
);
2810 case 0x31: /* FBEQ */
2811 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2813 case 0x32: /* FBLT */
2814 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2816 case 0x33: /* FBLE */
2817 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2821 ret
= gen_bdirect(ctx
, ra
, disp21
);
2823 case 0x35: /* FBNE */
2824 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2826 case 0x36: /* FBGE */
2827 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2829 case 0x37: /* FBGT */
2830 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2834 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2838 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2842 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2846 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2850 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2854 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2858 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2862 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2865 ret
= gen_invalid(ctx
);
2872 void gen_intermediate_code(CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2874 AlphaCPU
*cpu
= alpha_env_get_cpu(env
);
2875 CPUState
*cs
= CPU(cpu
);
2876 DisasContext ctx
, *ctxp
= &ctx
;
2877 target_ulong pc_start
;
2878 target_ulong pc_mask
;
2888 ctx
.mem_idx
= cpu_mmu_index(env
, false);
2889 ctx
.implver
= env
->implver
;
2890 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2892 #ifdef CONFIG_USER_ONLY
2893 ctx
.ir
= cpu_std_ir
;
2895 ctx
.palbr
= env
->palbr
;
2896 ctx
.ir
= (tb
->flags
& TB_FLAGS_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2899 /* ??? Every TB begins with unset rounding mode, to be initialized on
2900 the first fp insn of the TB. Alternately we could define a proper
2901 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2902 to reset the FP_STATUS to that default at the end of any TB that
2903 changes the default. We could even (gasp) dynamiclly figure out
2904 what default would be most efficient given the running program. */
2906 /* Similarly for flush-to-zero. */
2910 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2911 if (max_insns
== 0) {
2912 max_insns
= CF_COUNT_MASK
;
2914 if (max_insns
> TCG_MAX_INSNS
) {
2915 max_insns
= TCG_MAX_INSNS
;
2918 if (in_superpage(&ctx
, pc_start
)) {
2919 pc_mask
= (1ULL << 41) - 1;
2921 pc_mask
= ~TARGET_PAGE_MASK
;
2926 tcg_gen_insn_start(ctx
.pc
);
2929 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
2930 ret
= gen_excp(&ctx
, EXCP_DEBUG
, 0);
2931 /* The address covered by the breakpoint must be included in
2932 [tb->pc, tb->pc + tb->size) in order to for it to be
2933 properly cleared -- thus we increment the PC here so that
2934 the logic setting tb->size below does the right thing. */
2938 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2941 insn
= cpu_ldl_code(env
, ctx
.pc
);
2943 TCGV_UNUSED_I64(ctx
.zero
);
2944 TCGV_UNUSED_I64(ctx
.sink
);
2945 TCGV_UNUSED_I64(ctx
.lit
);
2948 ret
= translate_one(ctxp
, insn
);
2950 if (!TCGV_IS_UNUSED_I64(ctx
.sink
)) {
2951 tcg_gen_discard_i64(ctx
.sink
);
2952 tcg_temp_free(ctx
.sink
);
2954 if (!TCGV_IS_UNUSED_I64(ctx
.zero
)) {
2955 tcg_temp_free(ctx
.zero
);
2957 if (!TCGV_IS_UNUSED_I64(ctx
.lit
)) {
2958 tcg_temp_free(ctx
.lit
);
2961 /* If we reach a page boundary, are single stepping,
2962 or exhaust instruction count, stop generation. */
2964 && ((ctx
.pc
& pc_mask
) == 0
2965 || tcg_op_buf_full()
2966 || num_insns
>= max_insns
2968 || ctx
.singlestep_enabled
)) {
2969 ret
= EXIT_PC_STALE
;
2971 } while (ret
== NO_EXIT
);
2973 if (tb
->cflags
& CF_LAST_IO
) {
2982 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2984 case EXIT_PC_UPDATED
:
2985 if (ctx
.singlestep_enabled
) {
2986 gen_excp_1(EXCP_DEBUG
, 0);
2995 gen_tb_end(tb
, num_insns
);
2997 tb
->size
= ctx
.pc
- pc_start
;
2998 tb
->icount
= num_insns
;
3001 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
3002 && qemu_log_in_addr_range(pc_start
)) {
3003 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3004 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
3010 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,