2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "trace-tcg.h"
32 #undef ALPHA_DEBUG_DISAS
33 #define CONFIG_SOFTFLOAT_INLINE
35 #ifdef ALPHA_DEBUG_DISAS
36 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
38 # define LOG_DISAS(...) do { } while (0)
41 typedef struct DisasContext DisasContext
;
43 struct TranslationBlock
*tb
;
45 #ifndef CONFIG_USER_ONLY
50 /* Current rounding mode for this TB. */
52 /* Current flush-to-zero setting for this TB. */
55 /* implver value for this CPU. */
58 /* The set of registers active in the current context. */
61 /* Temporaries for $31 and $f31 as source and destination. */
64 /* Temporary for immediate constants. */
67 bool singlestep_enabled
;
70 /* Return values from translate_one, indicating the state of the TB.
71 Note that zero indicates that we are not exiting the TB. */
76 /* We have emitted one or more goto_tb. No fixup required. */
79 /* We are not using a goto_tb (for whatever reason), but have updated
80 the PC (for whatever reason), so there's no need to do it again on
84 /* We are exiting the TB, but have neither emitted a goto_tb, nor
85 updated the PC for the next instruction to be executed. */
88 /* We are ending the TB with a noreturn function call, e.g. longjmp.
89 No following code will be executed. */
93 /* global register indexes */
94 static TCGv_ptr cpu_env
;
95 static TCGv cpu_std_ir
[31];
96 static TCGv cpu_fir
[31];
98 static TCGv cpu_lock_addr
;
99 static TCGv cpu_lock_st_addr
;
100 static TCGv cpu_lock_value
;
102 #ifndef CONFIG_USER_ONLY
103 static TCGv cpu_pal_ir
[31];
106 #include "exec/gen-icount.h"
108 void alpha_translate_init(void)
110 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
112 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
113 static const GlobalVar vars
[] = {
116 DEF_VAR(lock_st_addr
),
122 /* Use the symbolic register names that match the disassembler. */
123 static const char greg_names
[31][4] = {
124 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
125 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
126 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
127 "t10", "t11", "ra", "t12", "at", "gp", "sp"
129 static const char freg_names
[31][4] = {
130 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
131 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
132 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
133 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
135 #ifndef CONFIG_USER_ONLY
136 static const char shadow_names
[8][8] = {
137 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
138 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
142 static bool done_init
= 0;
150 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
152 for (i
= 0; i
< 31; i
++) {
153 cpu_std_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
154 offsetof(CPUAlphaState
, ir
[i
]),
158 for (i
= 0; i
< 31; i
++) {
159 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
160 offsetof(CPUAlphaState
, fir
[i
]),
164 #ifndef CONFIG_USER_ONLY
165 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
166 for (i
= 0; i
< 8; i
++) {
167 int r
= (i
== 7 ? 25 : i
+ 8);
168 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUAlphaState
,
175 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
176 const GlobalVar
*v
= &vars
[i
];
177 *v
->var
= tcg_global_mem_new_i64(TCG_AREG0
, v
->ofs
, v
->name
);
181 static TCGv
load_zero(DisasContext
*ctx
)
183 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
184 ctx
->zero
= tcg_const_i64(0);
189 static TCGv
dest_sink(DisasContext
*ctx
)
191 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
192 ctx
->sink
= tcg_temp_new();
197 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
199 if (likely(reg
< 31)) {
202 return load_zero(ctx
);
206 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
207 uint8_t lit
, bool islit
)
210 ctx
->lit
= tcg_const_i64(lit
);
212 } else if (likely(reg
< 31)) {
215 return load_zero(ctx
);
219 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
221 if (likely(reg
< 31)) {
224 return dest_sink(ctx
);
228 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
230 if (likely(reg
< 31)) {
233 return load_zero(ctx
);
237 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
239 if (likely(reg
< 31)) {
242 return dest_sink(ctx
);
246 static void gen_excp_1(int exception
, int error_code
)
250 tmp1
= tcg_const_i32(exception
);
251 tmp2
= tcg_const_i32(error_code
);
252 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
253 tcg_temp_free_i32(tmp2
);
254 tcg_temp_free_i32(tmp1
);
257 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
259 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
260 gen_excp_1(exception
, error_code
);
261 return EXIT_NORETURN
;
264 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
266 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
269 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
271 TCGv_i32 tmp32
= tcg_temp_new_i32();
272 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
273 gen_helper_memory_to_f(t0
, tmp32
);
274 tcg_temp_free_i32(tmp32
);
277 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
279 TCGv tmp
= tcg_temp_new();
280 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
281 gen_helper_memory_to_g(t0
, tmp
);
285 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
287 TCGv_i32 tmp32
= tcg_temp_new_i32();
288 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
289 gen_helper_memory_to_s(t0
, tmp32
);
290 tcg_temp_free_i32(tmp32
);
293 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
295 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
296 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
297 tcg_gen_mov_i64(cpu_lock_value
, t0
);
300 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
302 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
303 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
304 tcg_gen_mov_i64(cpu_lock_value
, t0
);
307 static inline void gen_load_mem(DisasContext
*ctx
,
308 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
310 int ra
, int rb
, int32_t disp16
, bool fp
,
315 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
316 prefetches, which we can treat as nops. No worries about
317 missed exceptions here. */
318 if (unlikely(ra
== 31)) {
322 tmp
= tcg_temp_new();
323 addr
= load_gpr(ctx
, rb
);
326 tcg_gen_addi_i64(tmp
, addr
, disp16
);
330 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
334 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
335 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
340 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
342 TCGv_i32 tmp32
= tcg_temp_new_i32();
343 gen_helper_f_to_memory(tmp32
, t0
);
344 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
345 tcg_temp_free_i32(tmp32
);
348 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
350 TCGv tmp
= tcg_temp_new();
351 gen_helper_g_to_memory(tmp
, t0
);
352 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
356 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
358 TCGv_i32 tmp32
= tcg_temp_new_i32();
359 gen_helper_s_to_memory(tmp32
, t0
);
360 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
361 tcg_temp_free_i32(tmp32
);
364 static inline void gen_store_mem(DisasContext
*ctx
,
365 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
367 int ra
, int rb
, int32_t disp16
, bool fp
,
372 tmp
= tcg_temp_new();
373 addr
= load_gpr(ctx
, rb
);
376 tcg_gen_addi_i64(tmp
, addr
, disp16
);
380 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
384 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
385 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
390 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
391 int32_t disp16
, int quad
)
396 /* ??? Don't bother storing anything. The user can't tell
397 the difference, since the zero register always reads zero. */
401 #if defined(CONFIG_USER_ONLY)
402 addr
= cpu_lock_st_addr
;
404 addr
= tcg_temp_local_new();
407 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
409 #if defined(CONFIG_USER_ONLY)
410 /* ??? This is handled via a complicated version of compare-and-swap
411 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
412 in TCG so that this isn't necessary. */
413 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
415 /* ??? In system mode we are never multi-threaded, so CAS can be
416 implemented via a non-atomic load-compare-store sequence. */
418 TCGLabel
*lab_fail
, *lab_done
;
421 lab_fail
= gen_new_label();
422 lab_done
= gen_new_label();
423 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
425 val
= tcg_temp_new();
426 tcg_gen_qemu_ld_i64(val
, addr
, ctx
->mem_idx
, quad
? MO_LEQ
: MO_LESL
);
427 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
429 tcg_gen_qemu_st_i64(ctx
->ir
[ra
], addr
, ctx
->mem_idx
,
430 quad
? MO_LEQ
: MO_LEUL
);
431 tcg_gen_movi_i64(ctx
->ir
[ra
], 1);
432 tcg_gen_br(lab_done
);
434 gen_set_label(lab_fail
);
435 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
437 gen_set_label(lab_done
);
438 tcg_gen_movi_i64(cpu_lock_addr
, -1);
446 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
448 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
450 && ((addr
>> 41) & 3) == 2
451 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
454 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
456 /* Suppress goto_tb in the case of single-steping and IO. */
457 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
458 || ctx
->singlestep_enabled
|| singlestep
) {
461 /* If the destination is in the superpage, the page perms can't change. */
462 if (in_superpage(ctx
, dest
)) {
465 /* Check for the dest on the same page as the start of the TB. */
466 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
469 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
471 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
474 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
477 /* Notice branch-to-next; used to initialize RA with the PC. */
480 } else if (use_goto_tb(ctx
, dest
)) {
482 tcg_gen_movi_i64(cpu_pc
, dest
);
483 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
486 tcg_gen_movi_i64(cpu_pc
, dest
);
487 return EXIT_PC_UPDATED
;
491 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
492 TCGv cmp
, int32_t disp
)
494 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
495 TCGLabel
*lab_true
= gen_new_label();
497 if (use_goto_tb(ctx
, dest
)) {
498 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
501 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
502 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
504 gen_set_label(lab_true
);
506 tcg_gen_movi_i64(cpu_pc
, dest
);
507 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
511 TCGv_i64 z
= tcg_const_i64(0);
512 TCGv_i64 d
= tcg_const_i64(dest
);
513 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
515 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
517 tcg_temp_free_i64(z
);
518 tcg_temp_free_i64(d
);
519 tcg_temp_free_i64(p
);
520 return EXIT_PC_UPDATED
;
524 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
525 int32_t disp
, int mask
)
530 cmp_tmp
= tcg_temp_new();
531 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
533 cmp_tmp
= load_gpr(ctx
, ra
);
536 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
539 /* Fold -0.0 for comparison with COND. */
541 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
543 uint64_t mzero
= 1ull << 63;
548 /* For <= or >, the -0.0 value directly compares the way we want. */
549 tcg_gen_mov_i64(dest
, src
);
554 /* For == or !=, we can simply mask off the sign bit and compare. */
555 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
560 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
561 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
562 tcg_gen_neg_i64(dest
, dest
);
563 tcg_gen_and_i64(dest
, dest
, src
);
571 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
574 TCGv cmp_tmp
= tcg_temp_new();
575 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
576 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
579 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
584 vb
= load_fpr(ctx
, rb
);
586 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
588 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
593 #define QUAL_RM_N 0x080 /* Round mode nearest even */
594 #define QUAL_RM_C 0x000 /* Round mode chopped */
595 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
596 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
597 #define QUAL_RM_MASK 0x0c0
599 #define QUAL_U 0x100 /* Underflow enable (fp output) */
600 #define QUAL_V 0x100 /* Overflow enable (int output) */
601 #define QUAL_S 0x400 /* Software completion enable */
602 #define QUAL_I 0x200 /* Inexact detection enable */
604 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
608 fn11
&= QUAL_RM_MASK
;
609 if (fn11
== ctx
->tb_rm
) {
614 tmp
= tcg_temp_new_i32();
617 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
620 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
623 tcg_gen_movi_i32(tmp
, float_round_down
);
626 tcg_gen_ld8u_i32(tmp
, cpu_env
,
627 offsetof(CPUAlphaState
, fpcr_dyn_round
));
631 #if defined(CONFIG_SOFTFLOAT_INLINE)
632 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
633 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
634 sets the one field. */
635 tcg_gen_st8_i32(tmp
, cpu_env
,
636 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
638 gen_helper_setroundmode(tmp
);
641 tcg_temp_free_i32(tmp
);
644 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
649 if (fn11
== ctx
->tb_ftz
) {
654 tmp
= tcg_temp_new_i32();
656 /* Underflow is enabled, use the FPCR setting. */
657 tcg_gen_ld8u_i32(tmp
, cpu_env
,
658 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
660 /* Underflow is disabled, force flush-to-zero. */
661 tcg_gen_movi_i32(tmp
, 1);
664 #if defined(CONFIG_SOFTFLOAT_INLINE)
665 tcg_gen_st8_i32(tmp
, cpu_env
,
666 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
668 gen_helper_setflushzero(tmp
);
671 tcg_temp_free_i32(tmp
);
674 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
678 if (unlikely(reg
== 31)) {
679 val
= load_zero(ctx
);
682 if ((fn11
& QUAL_S
) == 0) {
684 gen_helper_ieee_input_cmp(cpu_env
, val
);
686 gen_helper_ieee_input(cpu_env
, val
);
689 #ifndef CONFIG_USER_ONLY
690 /* In system mode, raise exceptions for denormals like real
691 hardware. In user mode, proceed as if the OS completion
692 handler is handling the denormal as per spec. */
693 gen_helper_ieee_input_s(cpu_env
, val
);
700 static void gen_fp_exc_raise(int rc
, int fn11
)
702 /* ??? We ought to be able to do something with imprecise exceptions.
703 E.g. notice we're still in the trap shadow of something within the
704 TB and do not generate the code to signal the exception; end the TB
705 when an exception is forced to arrive, either by consumption of a
706 register value or TRAPB or EXCB. */
710 if (!(fn11
& QUAL_U
)) {
711 /* Note that QUAL_U == QUAL_V, so ignore either. */
712 ignore
|= FPCR_UNF
| FPCR_IOV
;
714 if (!(fn11
& QUAL_I
)) {
717 ign
= tcg_const_i32(ignore
);
719 /* ??? Pass in the regno of the destination so that the helper can
720 set EXC_MASK, which contains a bitmask of destination registers
721 that have caused arithmetic traps. A simple userspace emulation
722 does not require this. We do need it for a guest kernel's entArith,
723 or if we were to do something clever with imprecise exceptions. */
724 reg
= tcg_const_i32(rc
+ 32);
726 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
728 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
731 tcg_temp_free_i32(reg
);
732 tcg_temp_free_i32(ign
);
735 static void gen_cvtlq(TCGv vc
, TCGv vb
)
737 TCGv tmp
= tcg_temp_new();
739 /* The arithmetic right shift here, plus the sign-extended mask below
740 yields a sign-extended result without an explicit ext32s_i64. */
741 tcg_gen_sari_i64(tmp
, vb
, 32);
742 tcg_gen_shri_i64(vc
, vb
, 29);
743 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
744 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
745 tcg_gen_or_i64(vc
, vc
, tmp
);
750 static void gen_ieee_arith2(DisasContext
*ctx
,
751 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
752 int rb
, int rc
, int fn11
)
756 gen_qual_roundmode(ctx
, fn11
);
757 gen_qual_flushzero(ctx
, fn11
);
759 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
760 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
762 gen_fp_exc_raise(rc
, fn11
);
765 #define IEEE_ARITH2(name) \
766 static inline void glue(gen_, name)(DisasContext *ctx, \
767 int rb, int rc, int fn11) \
769 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
776 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
780 /* No need to set flushzero, since we have an integer output. */
781 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
782 vc
= dest_fpr(ctx
, rc
);
784 /* Almost all integer conversions use cropped rounding;
785 special case that. */
786 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
787 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
789 gen_qual_roundmode(ctx
, fn11
);
790 gen_helper_cvttq(vc
, cpu_env
, vb
);
792 gen_fp_exc_raise(rc
, fn11
);
795 static void gen_ieee_intcvt(DisasContext
*ctx
,
796 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
797 int rb
, int rc
, int fn11
)
801 gen_qual_roundmode(ctx
, fn11
);
802 vb
= load_fpr(ctx
, rb
);
803 vc
= dest_fpr(ctx
, rc
);
805 /* The only exception that can be raised by integer conversion
806 is inexact. Thus we only need to worry about exceptions when
807 inexact handling is requested. */
809 helper(vc
, cpu_env
, vb
);
810 gen_fp_exc_raise(rc
, fn11
);
812 helper(vc
, cpu_env
, vb
);
816 #define IEEE_INTCVT(name) \
817 static inline void glue(gen_, name)(DisasContext *ctx, \
818 int rb, int rc, int fn11) \
820 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
825 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
827 TCGv vmask
= tcg_const_i64(mask
);
828 TCGv tmp
= tcg_temp_new_i64();
831 tcg_gen_andc_i64(tmp
, vmask
, va
);
833 tcg_gen_and_i64(tmp
, va
, vmask
);
836 tcg_gen_andc_i64(vc
, vb
, vmask
);
837 tcg_gen_or_i64(vc
, vc
, tmp
);
839 tcg_temp_free(vmask
);
843 static void gen_ieee_arith3(DisasContext
*ctx
,
844 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
845 int ra
, int rb
, int rc
, int fn11
)
849 gen_qual_roundmode(ctx
, fn11
);
850 gen_qual_flushzero(ctx
, fn11
);
852 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
853 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
854 vc
= dest_fpr(ctx
, rc
);
855 helper(vc
, cpu_env
, va
, vb
);
857 gen_fp_exc_raise(rc
, fn11
);
860 #define IEEE_ARITH3(name) \
861 static inline void glue(gen_, name)(DisasContext *ctx, \
862 int ra, int rb, int rc, int fn11) \
864 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
875 static void gen_ieee_compare(DisasContext
*ctx
,
876 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
877 int ra
, int rb
, int rc
, int fn11
)
881 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
882 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
883 vc
= dest_fpr(ctx
, rc
);
884 helper(vc
, cpu_env
, va
, vb
);
886 gen_fp_exc_raise(rc
, fn11
);
889 #define IEEE_CMP3(name) \
890 static inline void glue(gen_, name)(DisasContext *ctx, \
891 int ra, int rb, int rc, int fn11) \
893 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
900 static inline uint64_t zapnot_mask(uint8_t lit
)
905 for (i
= 0; i
< 8; ++i
) {
906 if ((lit
>> i
) & 1) {
907 mask
|= 0xffull
<< (i
* 8);
913 /* Implement zapnot with an immediate operand, which expands to some
914 form of immediate AND. This is a basic building block in the
915 definition of many of the other byte manipulation instructions. */
916 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
920 tcg_gen_movi_i64(dest
, 0);
923 tcg_gen_ext8u_i64(dest
, src
);
926 tcg_gen_ext16u_i64(dest
, src
);
929 tcg_gen_ext32u_i64(dest
, src
);
932 tcg_gen_mov_i64(dest
, src
);
935 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
940 /* EXTWH, EXTLH, EXTQH */
941 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
942 uint8_t lit
, uint8_t byte_mask
)
945 tcg_gen_shli_i64(vc
, va
, (64 - lit
* 8) & 0x3f);
947 TCGv tmp
= tcg_temp_new();
948 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
949 tcg_gen_neg_i64(tmp
, tmp
);
950 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
951 tcg_gen_shl_i64(vc
, va
, tmp
);
954 gen_zapnoti(vc
, vc
, byte_mask
);
957 /* EXTBL, EXTWL, EXTLL, EXTQL */
958 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
959 uint8_t lit
, uint8_t byte_mask
)
962 tcg_gen_shri_i64(vc
, va
, (lit
& 7) * 8);
964 TCGv tmp
= tcg_temp_new();
965 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
966 tcg_gen_shli_i64(tmp
, tmp
, 3);
967 tcg_gen_shr_i64(vc
, va
, tmp
);
970 gen_zapnoti(vc
, vc
, byte_mask
);
973 /* INSWH, INSLH, INSQH */
974 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
975 uint8_t lit
, uint8_t byte_mask
)
977 TCGv tmp
= tcg_temp_new();
979 /* The instruction description has us left-shift the byte mask and extract
980 bits <15:8> and apply that zap at the end. This is equivalent to simply
981 performing the zap first and shifting afterward. */
982 gen_zapnoti(tmp
, va
, byte_mask
);
986 if (unlikely(lit
== 0)) {
987 tcg_gen_movi_i64(vc
, 0);
989 tcg_gen_shri_i64(vc
, tmp
, 64 - lit
* 8);
992 TCGv shift
= tcg_temp_new();
994 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
995 portably by splitting the shift into two parts: shift_count-1 and 1.
996 Arrange for the -1 by using ones-complement instead of
997 twos-complement in the negation: ~(B * 8) & 63. */
999 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1000 tcg_gen_not_i64(shift
, shift
);
1001 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1003 tcg_gen_shr_i64(vc
, tmp
, shift
);
1004 tcg_gen_shri_i64(vc
, vc
, 1);
1005 tcg_temp_free(shift
);
1010 /* INSBL, INSWL, INSLL, INSQL */
1011 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1012 uint8_t lit
, uint8_t byte_mask
)
1014 TCGv tmp
= tcg_temp_new();
1016 /* The instruction description has us left-shift the byte mask
1017 the same number of byte slots as the data and apply the zap
1018 at the end. This is equivalent to simply performing the zap
1019 first and shifting afterward. */
1020 gen_zapnoti(tmp
, va
, byte_mask
);
1023 tcg_gen_shli_i64(vc
, tmp
, (lit
& 7) * 8);
1025 TCGv shift
= tcg_temp_new();
1026 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1027 tcg_gen_shli_i64(shift
, shift
, 3);
1028 tcg_gen_shl_i64(vc
, tmp
, shift
);
1029 tcg_temp_free(shift
);
1034 /* MSKWH, MSKLH, MSKQH */
1035 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1036 uint8_t lit
, uint8_t byte_mask
)
1039 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1041 TCGv shift
= tcg_temp_new();
1042 TCGv mask
= tcg_temp_new();
1044 /* The instruction description is as above, where the byte_mask
1045 is shifted left, and then we extract bits <15:8>. This can be
1046 emulated with a right-shift on the expanded byte mask. This
1047 requires extra care because for an input <2:0> == 0 we need a
1048 shift of 64 bits in order to generate a zero. This is done by
1049 splitting the shift into two parts, the variable shift - 1
1050 followed by a constant 1 shift. The code we expand below is
1051 equivalent to ~(B * 8) & 63. */
1053 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1054 tcg_gen_not_i64(shift
, shift
);
1055 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1056 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1057 tcg_gen_shr_i64(mask
, mask
, shift
);
1058 tcg_gen_shri_i64(mask
, mask
, 1);
1060 tcg_gen_andc_i64(vc
, va
, mask
);
1062 tcg_temp_free(mask
);
1063 tcg_temp_free(shift
);
1067 /* MSKBL, MSKWL, MSKLL, MSKQL */
1068 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1069 uint8_t lit
, uint8_t byte_mask
)
1072 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1074 TCGv shift
= tcg_temp_new();
1075 TCGv mask
= tcg_temp_new();
1077 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1078 tcg_gen_shli_i64(shift
, shift
, 3);
1079 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1080 tcg_gen_shl_i64(mask
, mask
, shift
);
1082 tcg_gen_andc_i64(vc
, va
, mask
);
1084 tcg_temp_free(mask
);
1085 tcg_temp_free(shift
);
1089 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1094 tcg_gen_ld8u_i64(ctx
->ir
[ra
], cpu_env
,
1095 offsetof(CPUAlphaState
, intr_flag
));
1098 tmp
= tcg_const_i32(set
);
1099 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1100 tcg_temp_free_i32(tmp
);
1103 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1105 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1106 to internal cpu registers. */
1108 /* Unprivileged PAL call */
1109 if (palcode
>= 0x80 && palcode
< 0xC0) {
1113 /* No-op inside QEMU. */
1117 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1118 offsetof(CPUAlphaState
, unique
));
1122 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1123 offsetof(CPUAlphaState
, unique
));
1132 #ifndef CONFIG_USER_ONLY
1133 /* Privileged PAL code */
1134 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1138 /* No-op inside QEMU. */
1142 /* No-op inside QEMU. */
1146 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1147 offsetof(CPUAlphaState
, vptptr
));
1151 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1152 offsetof(CPUAlphaState
, sysval
));
1156 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1157 offsetof(CPUAlphaState
, sysval
));
1164 /* Note that we already know we're in kernel mode, so we know
1165 that PS only contains the 3 IPL bits. */
1166 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1167 offsetof(CPUAlphaState
, ps
));
1169 /* But make sure and store only the 3 IPL bits from the user. */
1170 tmp
= tcg_temp_new();
1171 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1172 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1179 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1180 offsetof(CPUAlphaState
, ps
));
1184 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1185 offsetof(CPUAlphaState
, usp
));
1189 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1190 offsetof(CPUAlphaState
, usp
));
1194 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1195 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1205 return gen_invalid(ctx
);
1208 #ifdef CONFIG_USER_ONLY
1209 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1212 TCGv tmp
= tcg_temp_new();
1213 uint64_t exc_addr
= ctx
->pc
;
1214 uint64_t entry
= ctx
->palbr
;
1216 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
1219 tcg_gen_movi_i64(tmp
, 1);
1220 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
1223 tcg_gen_movi_i64(tmp
, exc_addr
);
1224 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1227 entry
+= (palcode
& 0x80
1228 ? 0x2000 + (palcode
- 0x80) * 64
1229 : 0x1000 + palcode
* 64);
1231 /* Since the destination is running in PALmode, we don't really
1232 need the page permissions check. We'll see the existence of
1233 the page when we create the TB, and we'll flush all TBs if
1234 we change the PAL base register. */
1235 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1237 tcg_gen_movi_i64(cpu_pc
, entry
);
1238 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1239 return EXIT_GOTO_TB
;
1241 tcg_gen_movi_i64(cpu_pc
, entry
);
1242 return EXIT_PC_UPDATED
;
1248 #ifndef CONFIG_USER_ONLY
1250 #define PR_BYTE 0x100000
1251 #define PR_LONG 0x200000
1253 static int cpu_pr_data(int pr
)
1256 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1257 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1258 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1259 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1260 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1261 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1262 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1263 case 7: return offsetof(CPUAlphaState
, palbr
);
1264 case 8: return offsetof(CPUAlphaState
, ptbr
);
1265 case 9: return offsetof(CPUAlphaState
, vptptr
);
1266 case 10: return offsetof(CPUAlphaState
, unique
);
1267 case 11: return offsetof(CPUAlphaState
, sysval
);
1268 case 12: return offsetof(CPUAlphaState
, usp
);
1271 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1274 return offsetof(CPUAlphaState
, alarm_expire
);
1279 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1281 void (*helper
)(TCGv
);
1286 /* Accessing the "non-shadow" general registers. */
1287 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1288 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1291 case 250: /* WALLTIME */
1292 helper
= gen_helper_get_walltime
;
1294 case 249: /* VMTIME */
1295 helper
= gen_helper_get_vmtime
;
1301 return EXIT_PC_STALE
;
1308 /* The basic registers are data only, and unknown registers
1309 are read-zero, write-ignore. */
1310 data
= cpu_pr_data(regno
);
1312 tcg_gen_movi_i64(va
, 0);
1313 } else if (data
& PR_BYTE
) {
1314 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1315 } else if (data
& PR_LONG
) {
1316 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1318 tcg_gen_ld_i64(va
, cpu_env
, data
);
1326 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1334 gen_helper_tbia(cpu_env
);
1339 gen_helper_tbis(cpu_env
, vb
);
1344 tmp
= tcg_const_i64(1);
1345 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1346 offsetof(CPUState
, halted
));
1347 return gen_excp(ctx
, EXCP_HLT
, 0);
1351 gen_helper_halt(vb
);
1352 return EXIT_PC_STALE
;
1356 gen_helper_set_alarm(cpu_env
, vb
);
1361 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1362 /* Changing the PAL base register implies un-chaining all of the TBs
1363 that ended with a CALL_PAL. Since the base register usually only
1364 changes during boot, flushing everything works well. */
1365 gen_helper_tb_flush(cpu_env
);
1366 return EXIT_PC_STALE
;
1369 /* Accessing the "non-shadow" general registers. */
1370 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1371 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1375 /* The basic registers are data only, and unknown registers
1376 are read-zero, write-ignore. */
1377 data
= cpu_pr_data(regno
);
1379 if (data
& PR_BYTE
) {
1380 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1381 } else if (data
& PR_LONG
) {
1382 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1384 tcg_gen_st_i64(vb
, cpu_env
, data
);
1392 #endif /* !USER_ONLY*/
1394 #define REQUIRE_NO_LIT \
1401 #define REQUIRE_TB_FLAG(FLAG) \
1403 if ((ctx->tb->flags & (FLAG)) == 0) { \
1408 #define REQUIRE_REG_31(WHICH) \
1410 if (WHICH != 31) { \
1415 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1417 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1419 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1420 bool islit
, real_islit
;
1421 TCGv va
, vb
, vc
, tmp
, tmp2
;
1425 /* Decode all instruction fields */
1426 opc
= extract32(insn
, 26, 6);
1427 ra
= extract32(insn
, 21, 5);
1428 rb
= extract32(insn
, 16, 5);
1429 rc
= extract32(insn
, 0, 5);
1430 real_islit
= islit
= extract32(insn
, 12, 1);
1431 lit
= extract32(insn
, 13, 8);
1433 disp21
= sextract32(insn
, 0, 21);
1434 disp16
= sextract32(insn
, 0, 16);
1435 disp12
= sextract32(insn
, 0, 12);
1437 fn11
= extract32(insn
, 5, 11);
1438 fpfn
= extract32(insn
, 5, 6);
1439 fn7
= extract32(insn
, 5, 7);
1441 if (rb
== 31 && !islit
) {
1450 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1476 disp16
= (uint32_t)disp16
<< 16;
1480 va
= dest_gpr(ctx
, ra
);
1481 /* It's worth special-casing immediate loads. */
1483 tcg_gen_movi_i64(va
, disp16
);
1485 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1491 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1492 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1496 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1500 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1501 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1505 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1506 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1510 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1511 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1515 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1519 vc
= dest_gpr(ctx
, rc
);
1520 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1524 /* Special case ADDL as SEXTL. */
1525 tcg_gen_ext32s_i64(vc
, vb
);
1529 /* Special case SUBQ as NEGQ. */
1530 tcg_gen_neg_i64(vc
, vb
);
1535 va
= load_gpr(ctx
, ra
);
1539 tcg_gen_add_i64(vc
, va
, vb
);
1540 tcg_gen_ext32s_i64(vc
, vc
);
1544 tmp
= tcg_temp_new();
1545 tcg_gen_shli_i64(tmp
, va
, 2);
1546 tcg_gen_add_i64(tmp
, tmp
, vb
);
1547 tcg_gen_ext32s_i64(vc
, tmp
);
1552 tcg_gen_sub_i64(vc
, va
, vb
);
1553 tcg_gen_ext32s_i64(vc
, vc
);
1557 tmp
= tcg_temp_new();
1558 tcg_gen_shli_i64(tmp
, va
, 2);
1559 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1560 tcg_gen_ext32s_i64(vc
, tmp
);
1565 gen_helper_cmpbge(vc
, va
, vb
);
1569 tmp
= tcg_temp_new();
1570 tcg_gen_shli_i64(tmp
, va
, 3);
1571 tcg_gen_add_i64(tmp
, tmp
, vb
);
1572 tcg_gen_ext32s_i64(vc
, tmp
);
1577 tmp
= tcg_temp_new();
1578 tcg_gen_shli_i64(tmp
, va
, 3);
1579 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1580 tcg_gen_ext32s_i64(vc
, tmp
);
1585 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1589 tcg_gen_add_i64(vc
, va
, vb
);
1593 tmp
= tcg_temp_new();
1594 tcg_gen_shli_i64(tmp
, va
, 2);
1595 tcg_gen_add_i64(vc
, tmp
, vb
);
1600 tcg_gen_sub_i64(vc
, va
, vb
);
1604 tmp
= tcg_temp_new();
1605 tcg_gen_shli_i64(tmp
, va
, 2);
1606 tcg_gen_sub_i64(vc
, tmp
, vb
);
1611 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1615 tmp
= tcg_temp_new();
1616 tcg_gen_shli_i64(tmp
, va
, 3);
1617 tcg_gen_add_i64(vc
, tmp
, vb
);
1622 tmp
= tcg_temp_new();
1623 tcg_gen_shli_i64(tmp
, va
, 3);
1624 tcg_gen_sub_i64(vc
, tmp
, vb
);
1629 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1633 tmp
= tcg_temp_new();
1634 tcg_gen_ext32s_i64(tmp
, va
);
1635 tcg_gen_ext32s_i64(vc
, vb
);
1636 tcg_gen_add_i64(tmp
, tmp
, vc
);
1637 tcg_gen_ext32s_i64(vc
, tmp
);
1638 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1643 tmp
= tcg_temp_new();
1644 tcg_gen_ext32s_i64(tmp
, va
);
1645 tcg_gen_ext32s_i64(vc
, vb
);
1646 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1647 tcg_gen_ext32s_i64(vc
, tmp
);
1648 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1653 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1657 tmp
= tcg_temp_new();
1658 tmp2
= tcg_temp_new();
1659 tcg_gen_eqv_i64(tmp
, va
, vb
);
1660 tcg_gen_mov_i64(tmp2
, va
);
1661 tcg_gen_add_i64(vc
, va
, vb
);
1662 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1663 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1664 tcg_gen_shri_i64(tmp
, tmp
, 63);
1665 tcg_gen_movi_i64(tmp2
, 0);
1666 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1668 tcg_temp_free(tmp2
);
1672 tmp
= tcg_temp_new();
1673 tmp2
= tcg_temp_new();
1674 tcg_gen_xor_i64(tmp
, va
, vb
);
1675 tcg_gen_mov_i64(tmp2
, va
);
1676 tcg_gen_sub_i64(vc
, va
, vb
);
1677 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1678 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1679 tcg_gen_shri_i64(tmp
, tmp
, 63);
1680 tcg_gen_movi_i64(tmp2
, 0);
1681 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1683 tcg_temp_free(tmp2
);
1687 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1697 /* Special case BIS as NOP. */
1701 /* Special case BIS as MOV. */
1702 vc
= dest_gpr(ctx
, rc
);
1704 tcg_gen_movi_i64(vc
, lit
);
1706 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1712 vc
= dest_gpr(ctx
, rc
);
1713 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1715 if (fn7
== 0x28 && ra
== 31) {
1716 /* Special case ORNOT as NOT. */
1717 tcg_gen_not_i64(vc
, vb
);
1721 va
= load_gpr(ctx
, ra
);
1725 tcg_gen_and_i64(vc
, va
, vb
);
1729 tcg_gen_andc_i64(vc
, va
, vb
);
1733 tmp
= tcg_temp_new();
1734 tcg_gen_andi_i64(tmp
, va
, 1);
1735 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1736 vb
, load_gpr(ctx
, rc
));
1741 tmp
= tcg_temp_new();
1742 tcg_gen_andi_i64(tmp
, va
, 1);
1743 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1744 vb
, load_gpr(ctx
, rc
));
1749 tcg_gen_or_i64(vc
, va
, vb
);
1753 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1754 vb
, load_gpr(ctx
, rc
));
1758 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1759 vb
, load_gpr(ctx
, rc
));
1763 tcg_gen_orc_i64(vc
, va
, vb
);
1767 tcg_gen_xor_i64(vc
, va
, vb
);
1771 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1772 vb
, load_gpr(ctx
, rc
));
1776 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1777 vb
, load_gpr(ctx
, rc
));
1781 tcg_gen_eqv_i64(vc
, va
, vb
);
1787 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1788 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1793 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1794 vb
, load_gpr(ctx
, rc
));
1798 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1799 vb
, load_gpr(ctx
, rc
));
1804 tcg_gen_movi_i64(vc
, ctx
->implver
);
1812 vc
= dest_gpr(ctx
, rc
);
1813 va
= load_gpr(ctx
, ra
);
1817 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1821 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1825 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1829 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1833 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1837 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1841 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1845 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1849 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1854 gen_zapnoti(vc
, va
, ~lit
);
1856 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1862 gen_zapnoti(vc
, va
, lit
);
1864 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1869 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1874 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1876 tmp
= tcg_temp_new();
1877 vb
= load_gpr(ctx
, rb
);
1878 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1879 tcg_gen_shr_i64(vc
, va
, tmp
);
1885 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1890 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1892 tmp
= tcg_temp_new();
1893 vb
= load_gpr(ctx
, rb
);
1894 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1895 tcg_gen_shl_i64(vc
, va
, tmp
);
1901 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1906 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1908 tmp
= tcg_temp_new();
1909 vb
= load_gpr(ctx
, rb
);
1910 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1911 tcg_gen_sar_i64(vc
, va
, tmp
);
1917 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1921 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1925 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1929 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1933 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1937 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1941 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1945 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1949 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1957 vc
= dest_gpr(ctx
, rc
);
1958 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1959 va
= load_gpr(ctx
, ra
);
1963 tcg_gen_mul_i64(vc
, va
, vb
);
1964 tcg_gen_ext32s_i64(vc
, vc
);
1968 tcg_gen_mul_i64(vc
, va
, vb
);
1972 tmp
= tcg_temp_new();
1973 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1978 tmp
= tcg_temp_new();
1979 tcg_gen_ext32s_i64(tmp
, va
);
1980 tcg_gen_ext32s_i64(vc
, vb
);
1981 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1982 tcg_gen_ext32s_i64(vc
, tmp
);
1983 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1988 tmp
= tcg_temp_new();
1989 tmp2
= tcg_temp_new();
1990 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1991 tcg_gen_sari_i64(tmp2
, vc
, 63);
1992 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1994 tcg_temp_free(tmp2
);
2002 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2003 vc
= dest_fpr(ctx
, rc
);
2004 switch (fpfn
) { /* fn11 & 0x3F */
2008 t32
= tcg_temp_new_i32();
2009 va
= load_gpr(ctx
, ra
);
2010 tcg_gen_extrl_i64_i32(t32
, va
);
2011 gen_helper_memory_to_s(vc
, t32
);
2012 tcg_temp_free_i32(t32
);
2017 vb
= load_fpr(ctx
, rb
);
2018 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2023 gen_sqrts(ctx
, rb
, rc
, fn11
);
2028 t32
= tcg_temp_new_i32();
2029 va
= load_gpr(ctx
, ra
);
2030 tcg_gen_extrl_i64_i32(t32
, va
);
2031 gen_helper_memory_to_f(vc
, t32
);
2032 tcg_temp_free_i32(t32
);
2037 va
= load_gpr(ctx
, ra
);
2038 tcg_gen_mov_i64(vc
, va
);
2043 vb
= load_fpr(ctx
, rb
);
2044 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2049 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2057 /* VAX floating point */
2058 /* XXX: rounding mode and trap are ignored (!) */
2059 vc
= dest_fpr(ctx
, rc
);
2060 vb
= load_fpr(ctx
, rb
);
2061 va
= load_fpr(ctx
, ra
);
2062 switch (fpfn
) { /* fn11 & 0x3F */
2065 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2069 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2073 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2077 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2085 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2089 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2093 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2097 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2101 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2105 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2109 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2114 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2123 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2128 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2133 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2141 /* IEEE floating-point */
2142 switch (fpfn
) { /* fn11 & 0x3F */
2145 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2149 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2153 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2157 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2161 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2165 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2169 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2173 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2177 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2181 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2185 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2189 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2193 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2195 gen_cvtst(ctx
, rb
, rc
, fn11
);
2198 gen_cvtts(ctx
, rb
, rc
, fn11
);
2204 gen_cvttq(ctx
, rb
, rc
, fn11
);
2209 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2214 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2226 vc
= dest_fpr(ctx
, rc
);
2227 vb
= load_fpr(ctx
, rb
);
2233 /* Special case CPYS as FNOP. */
2235 vc
= dest_fpr(ctx
, rc
);
2236 va
= load_fpr(ctx
, ra
);
2238 /* Special case CPYS as FMOV. */
2239 tcg_gen_mov_i64(vc
, va
);
2241 vb
= load_fpr(ctx
, rb
);
2242 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2248 vc
= dest_fpr(ctx
, rc
);
2249 vb
= load_fpr(ctx
, rb
);
2250 va
= load_fpr(ctx
, ra
);
2251 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2255 vc
= dest_fpr(ctx
, rc
);
2256 vb
= load_fpr(ctx
, rb
);
2257 va
= load_fpr(ctx
, ra
);
2258 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2262 va
= load_fpr(ctx
, ra
);
2263 gen_helper_store_fpcr(cpu_env
, va
);
2264 if (ctx
->tb_rm
== QUAL_RM_D
) {
2265 /* Re-do the copy of the rounding mode to fp_status
2266 the next time we use dynamic rounding. */
2272 va
= dest_fpr(ctx
, ra
);
2273 gen_helper_load_fpcr(va
, cpu_env
);
2277 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2281 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2285 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2289 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2293 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2297 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2299 case 0x030: /* CVTQL */
2300 case 0x130: /* CVTQL/V */
2301 case 0x530: /* CVTQL/SV */
2303 vc
= dest_fpr(ctx
, rc
);
2304 vb
= load_fpr(ctx
, rb
);
2305 gen_helper_cvtql(vc
, cpu_env
, vb
);
2306 gen_fp_exc_raise(rc
, fn11
);
2314 switch ((uint16_t)disp16
) {
2341 va
= dest_gpr(ctx
, ra
);
2342 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2344 gen_helper_load_pcc(va
, cpu_env
);
2346 ret
= EXIT_PC_STALE
;
2348 gen_helper_load_pcc(va
, cpu_env
);
2376 /* HW_MFPR (PALcode) */
2377 #ifndef CONFIG_USER_ONLY
2378 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2379 va
= dest_gpr(ctx
, ra
);
2380 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2387 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2388 prediction stack action, which of course we don't implement. */
2389 vb
= load_gpr(ctx
, rb
);
2390 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2392 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2394 ret
= EXIT_PC_UPDATED
;
2398 /* HW_LD (PALcode) */
2399 #ifndef CONFIG_USER_ONLY
2400 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2402 TCGv addr
= tcg_temp_new();
2403 vb
= load_gpr(ctx
, rb
);
2404 va
= dest_gpr(ctx
, ra
);
2406 tcg_gen_addi_i64(addr
, vb
, disp12
);
2407 switch ((insn
>> 12) & 0xF) {
2409 /* Longword physical access (hw_ldl/p) */
2410 gen_helper_ldl_phys(va
, cpu_env
, addr
);
2413 /* Quadword physical access (hw_ldq/p) */
2414 gen_helper_ldq_phys(va
, cpu_env
, addr
);
2417 /* Longword physical access with lock (hw_ldl_l/p) */
2418 gen_helper_ldl_l_phys(va
, cpu_env
, addr
);
2421 /* Quadword physical access with lock (hw_ldq_l/p) */
2422 gen_helper_ldq_l_phys(va
, cpu_env
, addr
);
2425 /* Longword virtual PTE fetch (hw_ldl/v) */
2428 /* Quadword virtual PTE fetch (hw_ldq/v) */
2438 /* Longword virtual access (hw_ldl) */
2441 /* Quadword virtual access (hw_ldq) */
2444 /* Longword virtual access with protection check (hw_ldl/w) */
2445 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2448 /* Quadword virtual access with protection check (hw_ldq/w) */
2449 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2452 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2455 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2458 /* Longword virtual access with alternate access mode and
2459 protection checks (hw_ldl/wa) */
2460 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2463 /* Quadword virtual access with alternate access mode and
2464 protection checks (hw_ldq/wa) */
2465 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2468 tcg_temp_free(addr
);
2476 vc
= dest_gpr(ctx
, rc
);
2479 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2481 va
= load_fpr(ctx
, ra
);
2482 tcg_gen_mov_i64(vc
, va
);
2484 } else if (fn7
== 0x78) {
2486 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2488 t32
= tcg_temp_new_i32();
2489 va
= load_fpr(ctx
, ra
);
2490 gen_helper_s_to_memory(t32
, va
);
2491 tcg_gen_ext_i32_i64(vc
, t32
);
2492 tcg_temp_free_i32(t32
);
2496 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2500 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2502 tcg_gen_ext8s_i64(vc
, vb
);
2506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2508 tcg_gen_ext16s_i64(vc
, vb
);
2512 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2515 gen_helper_ctpop(vc
, vb
);
2519 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2521 va
= load_gpr(ctx
, ra
);
2522 gen_helper_perr(vc
, va
, vb
);
2526 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2529 gen_helper_ctlz(vc
, vb
);
2533 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2536 gen_helper_cttz(vc
, vb
);
2540 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2543 gen_helper_unpkbw(vc
, vb
);
2547 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2550 gen_helper_unpkbl(vc
, vb
);
2554 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2557 gen_helper_pkwb(vc
, vb
);
2561 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2564 gen_helper_pklb(vc
, vb
);
2568 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2569 va
= load_gpr(ctx
, ra
);
2570 gen_helper_minsb8(vc
, va
, vb
);
2574 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2575 va
= load_gpr(ctx
, ra
);
2576 gen_helper_minsw4(vc
, va
, vb
);
2580 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2581 va
= load_gpr(ctx
, ra
);
2582 gen_helper_minub8(vc
, va
, vb
);
2586 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2587 va
= load_gpr(ctx
, ra
);
2588 gen_helper_minuw4(vc
, va
, vb
);
2592 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2593 va
= load_gpr(ctx
, ra
);
2594 gen_helper_maxub8(vc
, va
, vb
);
2598 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2599 va
= load_gpr(ctx
, ra
);
2600 gen_helper_maxuw4(vc
, va
, vb
);
2604 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2605 va
= load_gpr(ctx
, ra
);
2606 gen_helper_maxsb8(vc
, va
, vb
);
2610 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2611 va
= load_gpr(ctx
, ra
);
2612 gen_helper_maxsw4(vc
, va
, vb
);
2620 /* HW_MTPR (PALcode) */
2621 #ifndef CONFIG_USER_ONLY
2622 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2623 vb
= load_gpr(ctx
, rb
);
2624 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2631 /* HW_RET (PALcode) */
2632 #ifndef CONFIG_USER_ONLY
2633 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2635 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2636 address from EXC_ADDR. This turns out to be useful for our
2637 emulation PALcode, so continue to accept it. */
2638 ctx
->lit
= vb
= tcg_temp_new();
2639 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2641 vb
= load_gpr(ctx
, rb
);
2643 tmp
= tcg_temp_new();
2644 tcg_gen_movi_i64(tmp
, 0);
2645 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
2646 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2647 tcg_gen_andi_i64(tmp
, vb
, 1);
2648 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
2649 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2650 ret
= EXIT_PC_UPDATED
;
2657 /* HW_ST (PALcode) */
2658 #ifndef CONFIG_USER_ONLY
2659 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2661 TCGv addr
= tcg_temp_new();
2662 va
= load_gpr(ctx
, ra
);
2663 vb
= load_gpr(ctx
, rb
);
2665 tcg_gen_addi_i64(addr
, vb
, disp12
);
2666 switch ((insn
>> 12) & 0xF) {
2668 /* Longword physical access */
2669 gen_helper_stl_phys(cpu_env
, addr
, va
);
2672 /* Quadword physical access */
2673 gen_helper_stq_phys(cpu_env
, addr
, va
);
2676 /* Longword physical access with lock */
2677 gen_helper_stl_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2680 /* Quadword physical access with lock */
2681 gen_helper_stq_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2684 /* Longword virtual access */
2687 /* Quadword virtual access */
2708 /* Longword virtual access with alternate access mode */
2711 /* Quadword virtual access with alternate access mode */
2720 tcg_temp_free(addr
);
2728 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2732 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2736 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2740 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2744 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2748 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2752 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2756 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2760 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2764 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2768 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2772 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2776 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2780 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2784 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
2788 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
2792 ret
= gen_bdirect(ctx
, ra
, disp21
);
2794 case 0x31: /* FBEQ */
2795 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2797 case 0x32: /* FBLT */
2798 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2800 case 0x33: /* FBLE */
2801 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2805 ret
= gen_bdirect(ctx
, ra
, disp21
);
2807 case 0x35: /* FBNE */
2808 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2810 case 0x36: /* FBGE */
2811 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2813 case 0x37: /* FBGT */
2814 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2818 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2822 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2826 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2830 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2834 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2838 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2842 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2846 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2849 ret
= gen_invalid(ctx
);
2856 static inline void gen_intermediate_code_internal(AlphaCPU
*cpu
,
2857 TranslationBlock
*tb
,
2860 CPUState
*cs
= CPU(cpu
);
2861 CPUAlphaState
*env
= &cpu
->env
;
2862 DisasContext ctx
, *ctxp
= &ctx
;
2863 target_ulong pc_start
;
2864 target_ulong pc_mask
;
2876 ctx
.mem_idx
= cpu_mmu_index(env
);
2877 ctx
.implver
= env
->implver
;
2878 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2880 #ifdef CONFIG_USER_ONLY
2881 ctx
.ir
= cpu_std_ir
;
2883 ctx
.palbr
= env
->palbr
;
2884 ctx
.ir
= (tb
->flags
& TB_FLAGS_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2887 /* ??? Every TB begins with unset rounding mode, to be initialized on
2888 the first fp insn of the TB. Alternately we could define a proper
2889 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2890 to reset the FP_STATUS to that default at the end of any TB that
2891 changes the default. We could even (gasp) dynamiclly figure out
2892 what default would be most efficient given the running program. */
2894 /* Similarly for flush-to-zero. */
2898 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2899 if (max_insns
== 0) {
2900 max_insns
= CF_COUNT_MASK
;
2903 if (in_superpage(&ctx
, pc_start
)) {
2904 pc_mask
= (1ULL << 41) - 1;
2906 pc_mask
= ~TARGET_PAGE_MASK
;
2911 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
2912 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
2913 if (bp
->pc
== ctx
.pc
) {
2914 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2920 j
= tcg_op_buf_count();
2924 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2927 tcg_ctx
.gen_opc_pc
[lj
] = ctx
.pc
;
2928 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
2929 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
2931 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2934 insn
= cpu_ldl_code(env
, ctx
.pc
);
2937 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2938 tcg_gen_debug_insn_start(ctx
.pc
);
2941 TCGV_UNUSED_I64(ctx
.zero
);
2942 TCGV_UNUSED_I64(ctx
.sink
);
2943 TCGV_UNUSED_I64(ctx
.lit
);
2946 ret
= translate_one(ctxp
, insn
);
2948 if (!TCGV_IS_UNUSED_I64(ctx
.sink
)) {
2949 tcg_gen_discard_i64(ctx
.sink
);
2950 tcg_temp_free(ctx
.sink
);
2952 if (!TCGV_IS_UNUSED_I64(ctx
.zero
)) {
2953 tcg_temp_free(ctx
.zero
);
2955 if (!TCGV_IS_UNUSED_I64(ctx
.lit
)) {
2956 tcg_temp_free(ctx
.lit
);
2959 /* If we reach a page boundary, are single stepping,
2960 or exhaust instruction count, stop generation. */
2962 && ((ctx
.pc
& pc_mask
) == 0
2963 || tcg_op_buf_full()
2964 || num_insns
>= max_insns
2966 || ctx
.singlestep_enabled
)) {
2967 ret
= EXIT_PC_STALE
;
2969 } while (ret
== NO_EXIT
);
2971 if (tb
->cflags
& CF_LAST_IO
) {
2980 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2982 case EXIT_PC_UPDATED
:
2983 if (ctx
.singlestep_enabled
) {
2984 gen_excp_1(EXCP_DEBUG
, 0);
2993 gen_tb_end(tb
, num_insns
);
2996 j
= tcg_op_buf_count();
2999 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3002 tb
->size
= ctx
.pc
- pc_start
;
3003 tb
->icount
= num_insns
;
3007 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3008 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3009 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
3015 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3017 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, false);
3020 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3022 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, true);
3025 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
3027 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];