2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "trace-tcg.h"
32 #undef ALPHA_DEBUG_DISAS
33 #define CONFIG_SOFTFLOAT_INLINE
35 #ifdef ALPHA_DEBUG_DISAS
36 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
38 # define LOG_DISAS(...) do { } while (0)
41 typedef struct DisasContext DisasContext
;
43 struct TranslationBlock
*tb
;
45 #ifndef CONFIG_USER_ONLY
50 /* Current rounding mode for this TB. */
52 /* Current flush-to-zero setting for this TB. */
55 /* implver value for this CPU. */
58 /* The set of registers active in the current context. */
61 /* Temporaries for $31 and $f31 as source and destination. */
64 /* Temporary for immediate constants. */
67 bool singlestep_enabled
;
70 /* Return values from translate_one, indicating the state of the TB.
71 Note that zero indicates that we are not exiting the TB. */
76 /* We have emitted one or more goto_tb. No fixup required. */
79 /* We are not using a goto_tb (for whatever reason), but have updated
80 the PC (for whatever reason), so there's no need to do it again on
84 /* We are exiting the TB, but have neither emitted a goto_tb, nor
85 updated the PC for the next instruction to be executed. */
88 /* We are ending the TB with a noreturn function call, e.g. longjmp.
89 No following code will be executed. */
93 /* global register indexes */
94 static TCGv_ptr cpu_env
;
95 static TCGv cpu_std_ir
[31];
96 static TCGv cpu_fir
[31];
98 static TCGv cpu_lock_addr
;
99 static TCGv cpu_lock_st_addr
;
100 static TCGv cpu_lock_value
;
102 #ifndef CONFIG_USER_ONLY
103 static TCGv cpu_pal_ir
[31];
106 #include "exec/gen-icount.h"
108 void alpha_translate_init(void)
110 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
112 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
113 static const GlobalVar vars
[] = {
116 DEF_VAR(lock_st_addr
),
122 /* Use the symbolic register names that match the disassembler. */
123 static const char greg_names
[31][4] = {
124 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
125 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
126 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
127 "t10", "t11", "ra", "t12", "at", "gp", "sp"
129 static const char freg_names
[31][4] = {
130 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
131 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
132 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
133 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
135 #ifndef CONFIG_USER_ONLY
136 static const char shadow_names
[8][8] = {
137 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
138 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
142 static bool done_init
= 0;
150 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
152 for (i
= 0; i
< 31; i
++) {
153 cpu_std_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
154 offsetof(CPUAlphaState
, ir
[i
]),
158 for (i
= 0; i
< 31; i
++) {
159 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
160 offsetof(CPUAlphaState
, fir
[i
]),
164 #ifndef CONFIG_USER_ONLY
165 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
166 for (i
= 0; i
< 8; i
++) {
167 int r
= (i
== 7 ? 25 : i
+ 8);
168 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(TCG_AREG0
,
169 offsetof(CPUAlphaState
,
175 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
176 const GlobalVar
*v
= &vars
[i
];
177 *v
->var
= tcg_global_mem_new_i64(TCG_AREG0
, v
->ofs
, v
->name
);
181 static TCGv
load_zero(DisasContext
*ctx
)
183 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
184 ctx
->zero
= tcg_const_i64(0);
189 static TCGv
dest_sink(DisasContext
*ctx
)
191 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
192 ctx
->sink
= tcg_temp_new();
197 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
199 if (likely(reg
< 31)) {
202 return load_zero(ctx
);
206 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
207 uint8_t lit
, bool islit
)
210 ctx
->lit
= tcg_const_i64(lit
);
212 } else if (likely(reg
< 31)) {
215 return load_zero(ctx
);
219 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
221 if (likely(reg
< 31)) {
224 return dest_sink(ctx
);
228 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
230 if (likely(reg
< 31)) {
233 return load_zero(ctx
);
237 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
239 if (likely(reg
< 31)) {
242 return dest_sink(ctx
);
246 static void gen_excp_1(int exception
, int error_code
)
250 tmp1
= tcg_const_i32(exception
);
251 tmp2
= tcg_const_i32(error_code
);
252 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
253 tcg_temp_free_i32(tmp2
);
254 tcg_temp_free_i32(tmp1
);
257 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
259 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
260 gen_excp_1(exception
, error_code
);
261 return EXIT_NORETURN
;
264 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
266 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
269 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
271 TCGv_i32 tmp32
= tcg_temp_new_i32();
272 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
273 gen_helper_memory_to_f(t0
, tmp32
);
274 tcg_temp_free_i32(tmp32
);
277 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
279 TCGv tmp
= tcg_temp_new();
280 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
281 gen_helper_memory_to_g(t0
, tmp
);
285 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
287 TCGv_i32 tmp32
= tcg_temp_new_i32();
288 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
289 gen_helper_memory_to_s(t0
, tmp32
);
290 tcg_temp_free_i32(tmp32
);
293 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
295 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
296 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
297 tcg_gen_mov_i64(cpu_lock_value
, t0
);
300 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
302 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
303 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
304 tcg_gen_mov_i64(cpu_lock_value
, t0
);
307 static inline void gen_load_mem(DisasContext
*ctx
,
308 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
310 int ra
, int rb
, int32_t disp16
, bool fp
,
315 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
316 prefetches, which we can treat as nops. No worries about
317 missed exceptions here. */
318 if (unlikely(ra
== 31)) {
322 tmp
= tcg_temp_new();
323 addr
= load_gpr(ctx
, rb
);
326 tcg_gen_addi_i64(tmp
, addr
, disp16
);
330 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
334 va
= (fp
? cpu_fir
[ra
] : ctx
->ir
[ra
]);
335 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
340 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
342 TCGv_i32 tmp32
= tcg_temp_new_i32();
343 gen_helper_f_to_memory(tmp32
, t0
);
344 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
345 tcg_temp_free_i32(tmp32
);
348 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
350 TCGv tmp
= tcg_temp_new();
351 gen_helper_g_to_memory(tmp
, t0
);
352 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
356 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
358 TCGv_i32 tmp32
= tcg_temp_new_i32();
359 gen_helper_s_to_memory(tmp32
, t0
);
360 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
361 tcg_temp_free_i32(tmp32
);
364 static inline void gen_store_mem(DisasContext
*ctx
,
365 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
367 int ra
, int rb
, int32_t disp16
, bool fp
,
372 tmp
= tcg_temp_new();
373 addr
= load_gpr(ctx
, rb
);
376 tcg_gen_addi_i64(tmp
, addr
, disp16
);
380 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
384 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
385 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
390 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
391 int32_t disp16
, int quad
)
396 /* ??? Don't bother storing anything. The user can't tell
397 the difference, since the zero register always reads zero. */
401 #if defined(CONFIG_USER_ONLY)
402 addr
= cpu_lock_st_addr
;
404 addr
= tcg_temp_local_new();
407 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
409 #if defined(CONFIG_USER_ONLY)
410 /* ??? This is handled via a complicated version of compare-and-swap
411 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
412 in TCG so that this isn't necessary. */
413 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
415 /* ??? In system mode we are never multi-threaded, so CAS can be
416 implemented via a non-atomic load-compare-store sequence. */
418 TCGLabel
*lab_fail
, *lab_done
;
421 lab_fail
= gen_new_label();
422 lab_done
= gen_new_label();
423 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
425 val
= tcg_temp_new();
426 tcg_gen_qemu_ld_i64(val
, addr
, ctx
->mem_idx
, quad
? MO_LEQ
: MO_LESL
);
427 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
429 tcg_gen_qemu_st_i64(ctx
->ir
[ra
], addr
, ctx
->mem_idx
,
430 quad
? MO_LEQ
: MO_LEUL
);
431 tcg_gen_movi_i64(ctx
->ir
[ra
], 1);
432 tcg_gen_br(lab_done
);
434 gen_set_label(lab_fail
);
435 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
437 gen_set_label(lab_done
);
438 tcg_gen_movi_i64(cpu_lock_addr
, -1);
446 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
448 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
450 && ((addr
>> 41) & 3) == 2
451 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
454 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
456 /* Suppress goto_tb in the case of single-steping and IO. */
457 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
458 || ctx
->singlestep_enabled
|| singlestep
) {
461 /* If the destination is in the superpage, the page perms can't change. */
462 if (in_superpage(ctx
, dest
)) {
465 /* Check for the dest on the same page as the start of the TB. */
466 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
469 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
471 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
474 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
477 /* Notice branch-to-next; used to initialize RA with the PC. */
480 } else if (use_goto_tb(ctx
, dest
)) {
482 tcg_gen_movi_i64(cpu_pc
, dest
);
483 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
486 tcg_gen_movi_i64(cpu_pc
, dest
);
487 return EXIT_PC_UPDATED
;
491 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
492 TCGv cmp
, int32_t disp
)
494 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
495 TCGLabel
*lab_true
= gen_new_label();
497 if (use_goto_tb(ctx
, dest
)) {
498 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
501 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
502 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
504 gen_set_label(lab_true
);
506 tcg_gen_movi_i64(cpu_pc
, dest
);
507 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
511 TCGv_i64 z
= tcg_const_i64(0);
512 TCGv_i64 d
= tcg_const_i64(dest
);
513 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
515 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
517 tcg_temp_free_i64(z
);
518 tcg_temp_free_i64(d
);
519 tcg_temp_free_i64(p
);
520 return EXIT_PC_UPDATED
;
524 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
525 int32_t disp
, int mask
)
530 cmp_tmp
= tcg_temp_new();
531 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
533 cmp_tmp
= load_gpr(ctx
, ra
);
536 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
539 /* Fold -0.0 for comparison with COND. */
541 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
543 uint64_t mzero
= 1ull << 63;
548 /* For <= or >, the -0.0 value directly compares the way we want. */
549 tcg_gen_mov_i64(dest
, src
);
554 /* For == or !=, we can simply mask off the sign bit and compare. */
555 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
560 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
561 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
562 tcg_gen_neg_i64(dest
, dest
);
563 tcg_gen_and_i64(dest
, dest
, src
);
571 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
574 TCGv cmp_tmp
= tcg_temp_new();
575 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
576 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
579 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
584 vb
= load_fpr(ctx
, rb
);
586 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
588 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
593 #define QUAL_RM_N 0x080 /* Round mode nearest even */
594 #define QUAL_RM_C 0x000 /* Round mode chopped */
595 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
596 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
597 #define QUAL_RM_MASK 0x0c0
599 #define QUAL_U 0x100 /* Underflow enable (fp output) */
600 #define QUAL_V 0x100 /* Overflow enable (int output) */
601 #define QUAL_S 0x400 /* Software completion enable */
602 #define QUAL_I 0x200 /* Inexact detection enable */
604 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
608 fn11
&= QUAL_RM_MASK
;
609 if (fn11
== ctx
->tb_rm
) {
614 tmp
= tcg_temp_new_i32();
617 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
620 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
623 tcg_gen_movi_i32(tmp
, float_round_down
);
626 tcg_gen_ld8u_i32(tmp
, cpu_env
,
627 offsetof(CPUAlphaState
, fpcr_dyn_round
));
631 #if defined(CONFIG_SOFTFLOAT_INLINE)
632 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
633 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
634 sets the one field. */
635 tcg_gen_st8_i32(tmp
, cpu_env
,
636 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
638 gen_helper_setroundmode(tmp
);
641 tcg_temp_free_i32(tmp
);
644 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
649 if (fn11
== ctx
->tb_ftz
) {
654 tmp
= tcg_temp_new_i32();
656 /* Underflow is enabled, use the FPCR setting. */
657 tcg_gen_ld8u_i32(tmp
, cpu_env
,
658 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
660 /* Underflow is disabled, force flush-to-zero. */
661 tcg_gen_movi_i32(tmp
, 1);
664 #if defined(CONFIG_SOFTFLOAT_INLINE)
665 tcg_gen_st8_i32(tmp
, cpu_env
,
666 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
668 gen_helper_setflushzero(tmp
);
671 tcg_temp_free_i32(tmp
);
674 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
678 if (unlikely(reg
== 31)) {
679 val
= load_zero(ctx
);
682 if ((fn11
& QUAL_S
) == 0) {
684 gen_helper_ieee_input_cmp(cpu_env
, val
);
686 gen_helper_ieee_input(cpu_env
, val
);
689 #ifndef CONFIG_USER_ONLY
690 /* In system mode, raise exceptions for denormals like real
691 hardware. In user mode, proceed as if the OS completion
692 handler is handling the denormal as per spec. */
693 gen_helper_ieee_input_s(cpu_env
, val
);
700 static void gen_fp_exc_raise(int rc
, int fn11
)
702 /* ??? We ought to be able to do something with imprecise exceptions.
703 E.g. notice we're still in the trap shadow of something within the
704 TB and do not generate the code to signal the exception; end the TB
705 when an exception is forced to arrive, either by consumption of a
706 register value or TRAPB or EXCB. */
710 if (!(fn11
& QUAL_U
)) {
711 /* Note that QUAL_U == QUAL_V, so ignore either. */
712 ignore
|= FPCR_UNF
| FPCR_IOV
;
714 if (!(fn11
& QUAL_I
)) {
717 ign
= tcg_const_i32(ignore
);
719 /* ??? Pass in the regno of the destination so that the helper can
720 set EXC_MASK, which contains a bitmask of destination registers
721 that have caused arithmetic traps. A simple userspace emulation
722 does not require this. We do need it for a guest kernel's entArith,
723 or if we were to do something clever with imprecise exceptions. */
724 reg
= tcg_const_i32(rc
+ 32);
726 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
728 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
731 tcg_temp_free_i32(reg
);
732 tcg_temp_free_i32(ign
);
735 static void gen_cvtlq(TCGv vc
, TCGv vb
)
737 TCGv tmp
= tcg_temp_new();
739 /* The arithmetic right shift here, plus the sign-extended mask below
740 yields a sign-extended result without an explicit ext32s_i64. */
741 tcg_gen_sari_i64(tmp
, vb
, 32);
742 tcg_gen_shri_i64(vc
, vb
, 29);
743 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
744 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
745 tcg_gen_or_i64(vc
, vc
, tmp
);
750 static void gen_ieee_arith2(DisasContext
*ctx
,
751 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
752 int rb
, int rc
, int fn11
)
756 gen_qual_roundmode(ctx
, fn11
);
757 gen_qual_flushzero(ctx
, fn11
);
759 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
760 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
762 gen_fp_exc_raise(rc
, fn11
);
765 #define IEEE_ARITH2(name) \
766 static inline void glue(gen_, name)(DisasContext *ctx, \
767 int rb, int rc, int fn11) \
769 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
776 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
780 /* No need to set flushzero, since we have an integer output. */
781 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
782 vc
= dest_fpr(ctx
, rc
);
784 /* Almost all integer conversions use cropped rounding;
785 special case that. */
786 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
787 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
789 gen_qual_roundmode(ctx
, fn11
);
790 gen_helper_cvttq(vc
, cpu_env
, vb
);
792 gen_fp_exc_raise(rc
, fn11
);
795 static void gen_ieee_intcvt(DisasContext
*ctx
,
796 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
797 int rb
, int rc
, int fn11
)
801 gen_qual_roundmode(ctx
, fn11
);
802 vb
= load_fpr(ctx
, rb
);
803 vc
= dest_fpr(ctx
, rc
);
805 /* The only exception that can be raised by integer conversion
806 is inexact. Thus we only need to worry about exceptions when
807 inexact handling is requested. */
809 helper(vc
, cpu_env
, vb
);
810 gen_fp_exc_raise(rc
, fn11
);
812 helper(vc
, cpu_env
, vb
);
816 #define IEEE_INTCVT(name) \
817 static inline void glue(gen_, name)(DisasContext *ctx, \
818 int rb, int rc, int fn11) \
820 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
825 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
827 TCGv vmask
= tcg_const_i64(mask
);
828 TCGv tmp
= tcg_temp_new_i64();
831 tcg_gen_andc_i64(tmp
, vmask
, va
);
833 tcg_gen_and_i64(tmp
, va
, vmask
);
836 tcg_gen_andc_i64(vc
, vb
, vmask
);
837 tcg_gen_or_i64(vc
, vc
, tmp
);
839 tcg_temp_free(vmask
);
843 static void gen_ieee_arith3(DisasContext
*ctx
,
844 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
845 int ra
, int rb
, int rc
, int fn11
)
849 gen_qual_roundmode(ctx
, fn11
);
850 gen_qual_flushzero(ctx
, fn11
);
852 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
853 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
854 vc
= dest_fpr(ctx
, rc
);
855 helper(vc
, cpu_env
, va
, vb
);
857 gen_fp_exc_raise(rc
, fn11
);
860 #define IEEE_ARITH3(name) \
861 static inline void glue(gen_, name)(DisasContext *ctx, \
862 int ra, int rb, int rc, int fn11) \
864 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
875 static void gen_ieee_compare(DisasContext
*ctx
,
876 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
877 int ra
, int rb
, int rc
, int fn11
)
881 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
882 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
883 vc
= dest_fpr(ctx
, rc
);
884 helper(vc
, cpu_env
, va
, vb
);
886 gen_fp_exc_raise(rc
, fn11
);
889 #define IEEE_CMP3(name) \
890 static inline void glue(gen_, name)(DisasContext *ctx, \
891 int ra, int rb, int rc, int fn11) \
893 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
900 static inline uint64_t zapnot_mask(uint8_t lit
)
905 for (i
= 0; i
< 8; ++i
) {
906 if ((lit
>> i
) & 1) {
907 mask
|= 0xffull
<< (i
* 8);
913 /* Implement zapnot with an immediate operand, which expands to some
914 form of immediate AND. This is a basic building block in the
915 definition of many of the other byte manipulation instructions. */
916 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
920 tcg_gen_movi_i64(dest
, 0);
923 tcg_gen_ext8u_i64(dest
, src
);
926 tcg_gen_ext16u_i64(dest
, src
);
929 tcg_gen_ext32u_i64(dest
, src
);
932 tcg_gen_mov_i64(dest
, src
);
935 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
940 /* EXTWH, EXTLH, EXTQH */
941 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
942 uint8_t lit
, uint8_t byte_mask
)
945 tcg_gen_shli_i64(vc
, va
, (64 - lit
* 8) & 0x3f);
947 TCGv tmp
= tcg_temp_new();
948 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
949 tcg_gen_neg_i64(tmp
, tmp
);
950 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
951 tcg_gen_shl_i64(vc
, va
, tmp
);
954 gen_zapnoti(vc
, vc
, byte_mask
);
957 /* EXTBL, EXTWL, EXTLL, EXTQL */
958 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
959 uint8_t lit
, uint8_t byte_mask
)
962 tcg_gen_shri_i64(vc
, va
, (lit
& 7) * 8);
964 TCGv tmp
= tcg_temp_new();
965 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
966 tcg_gen_shli_i64(tmp
, tmp
, 3);
967 tcg_gen_shr_i64(vc
, va
, tmp
);
970 gen_zapnoti(vc
, vc
, byte_mask
);
973 /* INSWH, INSLH, INSQH */
974 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
975 uint8_t lit
, uint8_t byte_mask
)
977 TCGv tmp
= tcg_temp_new();
979 /* The instruction description has us left-shift the byte mask and extract
980 bits <15:8> and apply that zap at the end. This is equivalent to simply
981 performing the zap first and shifting afterward. */
982 gen_zapnoti(tmp
, va
, byte_mask
);
986 if (unlikely(lit
== 0)) {
987 tcg_gen_movi_i64(vc
, 0);
989 tcg_gen_shri_i64(vc
, tmp
, 64 - lit
* 8);
992 TCGv shift
= tcg_temp_new();
994 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
995 portably by splitting the shift into two parts: shift_count-1 and 1.
996 Arrange for the -1 by using ones-complement instead of
997 twos-complement in the negation: ~(B * 8) & 63. */
999 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1000 tcg_gen_not_i64(shift
, shift
);
1001 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1003 tcg_gen_shr_i64(vc
, tmp
, shift
);
1004 tcg_gen_shri_i64(vc
, vc
, 1);
1005 tcg_temp_free(shift
);
1010 /* INSBL, INSWL, INSLL, INSQL */
1011 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1012 uint8_t lit
, uint8_t byte_mask
)
1014 TCGv tmp
= tcg_temp_new();
1016 /* The instruction description has us left-shift the byte mask
1017 the same number of byte slots as the data and apply the zap
1018 at the end. This is equivalent to simply performing the zap
1019 first and shifting afterward. */
1020 gen_zapnoti(tmp
, va
, byte_mask
);
1023 tcg_gen_shli_i64(vc
, tmp
, (lit
& 7) * 8);
1025 TCGv shift
= tcg_temp_new();
1026 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1027 tcg_gen_shli_i64(shift
, shift
, 3);
1028 tcg_gen_shl_i64(vc
, tmp
, shift
);
1029 tcg_temp_free(shift
);
1034 /* MSKWH, MSKLH, MSKQH */
1035 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1036 uint8_t lit
, uint8_t byte_mask
)
1039 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1041 TCGv shift
= tcg_temp_new();
1042 TCGv mask
= tcg_temp_new();
1044 /* The instruction description is as above, where the byte_mask
1045 is shifted left, and then we extract bits <15:8>. This can be
1046 emulated with a right-shift on the expanded byte mask. This
1047 requires extra care because for an input <2:0> == 0 we need a
1048 shift of 64 bits in order to generate a zero. This is done by
1049 splitting the shift into two parts, the variable shift - 1
1050 followed by a constant 1 shift. The code we expand below is
1051 equivalent to ~(B * 8) & 63. */
1053 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1054 tcg_gen_not_i64(shift
, shift
);
1055 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1056 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1057 tcg_gen_shr_i64(mask
, mask
, shift
);
1058 tcg_gen_shri_i64(mask
, mask
, 1);
1060 tcg_gen_andc_i64(vc
, va
, mask
);
1062 tcg_temp_free(mask
);
1063 tcg_temp_free(shift
);
1067 /* MSKBL, MSKWL, MSKLL, MSKQL */
1068 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1069 uint8_t lit
, uint8_t byte_mask
)
1072 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1074 TCGv shift
= tcg_temp_new();
1075 TCGv mask
= tcg_temp_new();
1077 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1078 tcg_gen_shli_i64(shift
, shift
, 3);
1079 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1080 tcg_gen_shl_i64(mask
, mask
, shift
);
1082 tcg_gen_andc_i64(vc
, va
, mask
);
1084 tcg_temp_free(mask
);
1085 tcg_temp_free(shift
);
1089 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1094 tcg_gen_ld8u_i64(ctx
->ir
[ra
], cpu_env
,
1095 offsetof(CPUAlphaState
, intr_flag
));
1098 tmp
= tcg_const_i32(set
);
1099 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1100 tcg_temp_free_i32(tmp
);
1103 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1105 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1106 to internal cpu registers. */
1108 /* Unprivileged PAL call */
1109 if (palcode
>= 0x80 && palcode
< 0xC0) {
1113 /* No-op inside QEMU. */
1117 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1118 offsetof(CPUAlphaState
, unique
));
1122 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1123 offsetof(CPUAlphaState
, unique
));
1132 #ifndef CONFIG_USER_ONLY
1133 /* Privileged PAL code */
1134 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1138 /* No-op inside QEMU. */
1142 /* No-op inside QEMU. */
1146 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1147 offsetof(CPUAlphaState
, vptptr
));
1151 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1152 offsetof(CPUAlphaState
, sysval
));
1156 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1157 offsetof(CPUAlphaState
, sysval
));
1164 /* Note that we already know we're in kernel mode, so we know
1165 that PS only contains the 3 IPL bits. */
1166 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1167 offsetof(CPUAlphaState
, ps
));
1169 /* But make sure and store only the 3 IPL bits from the user. */
1170 tmp
= tcg_temp_new();
1171 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1172 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1179 tcg_gen_ld8u_i64(ctx
->ir
[IR_V0
], cpu_env
,
1180 offsetof(CPUAlphaState
, ps
));
1184 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1185 offsetof(CPUAlphaState
, usp
));
1189 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1190 offsetof(CPUAlphaState
, usp
));
1194 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1195 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1205 return gen_invalid(ctx
);
1208 #ifdef CONFIG_USER_ONLY
1209 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1212 TCGv tmp
= tcg_temp_new();
1213 uint64_t exc_addr
= ctx
->pc
;
1214 uint64_t entry
= ctx
->palbr
;
1216 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
1219 tcg_gen_movi_i64(tmp
, 1);
1220 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
1223 tcg_gen_movi_i64(tmp
, exc_addr
);
1224 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1227 entry
+= (palcode
& 0x80
1228 ? 0x2000 + (palcode
- 0x80) * 64
1229 : 0x1000 + palcode
* 64);
1231 /* Since the destination is running in PALmode, we don't really
1232 need the page permissions check. We'll see the existence of
1233 the page when we create the TB, and we'll flush all TBs if
1234 we change the PAL base register. */
1235 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1237 tcg_gen_movi_i64(cpu_pc
, entry
);
1238 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1239 return EXIT_GOTO_TB
;
1241 tcg_gen_movi_i64(cpu_pc
, entry
);
1242 return EXIT_PC_UPDATED
;
1248 #ifndef CONFIG_USER_ONLY
1250 #define PR_BYTE 0x100000
1251 #define PR_LONG 0x200000
1253 static int cpu_pr_data(int pr
)
1256 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1257 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1258 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1259 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1260 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1261 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1262 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1263 case 7: return offsetof(CPUAlphaState
, palbr
);
1264 case 8: return offsetof(CPUAlphaState
, ptbr
);
1265 case 9: return offsetof(CPUAlphaState
, vptptr
);
1266 case 10: return offsetof(CPUAlphaState
, unique
);
1267 case 11: return offsetof(CPUAlphaState
, sysval
);
1268 case 12: return offsetof(CPUAlphaState
, usp
);
1271 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1274 return offsetof(CPUAlphaState
, alarm_expire
);
1279 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1281 void (*helper
)(TCGv
);
1286 /* Accessing the "non-shadow" general registers. */
1287 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1288 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1291 case 250: /* WALLTIME */
1292 helper
= gen_helper_get_walltime
;
1294 case 249: /* VMTIME */
1295 helper
= gen_helper_get_vmtime
;
1301 return EXIT_PC_STALE
;
1308 /* The basic registers are data only, and unknown registers
1309 are read-zero, write-ignore. */
1310 data
= cpu_pr_data(regno
);
1312 tcg_gen_movi_i64(va
, 0);
1313 } else if (data
& PR_BYTE
) {
1314 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1315 } else if (data
& PR_LONG
) {
1316 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1318 tcg_gen_ld_i64(va
, cpu_env
, data
);
1326 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1334 gen_helper_tbia(cpu_env
);
1339 gen_helper_tbis(cpu_env
, vb
);
1344 tmp
= tcg_const_i64(1);
1345 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1346 offsetof(CPUState
, halted
));
1347 return gen_excp(ctx
, EXCP_HLT
, 0);
1351 gen_helper_halt(vb
);
1352 return EXIT_PC_STALE
;
1356 gen_helper_set_alarm(cpu_env
, vb
);
1361 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1362 /* Changing the PAL base register implies un-chaining all of the TBs
1363 that ended with a CALL_PAL. Since the base register usually only
1364 changes during boot, flushing everything works well. */
1365 gen_helper_tb_flush(cpu_env
);
1366 return EXIT_PC_STALE
;
1369 /* Accessing the "non-shadow" general registers. */
1370 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1371 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1375 /* The basic registers are data only, and unknown registers
1376 are read-zero, write-ignore. */
1377 data
= cpu_pr_data(regno
);
1379 if (data
& PR_BYTE
) {
1380 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1381 } else if (data
& PR_LONG
) {
1382 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1384 tcg_gen_st_i64(vb
, cpu_env
, data
);
1392 #endif /* !USER_ONLY*/
1394 #define REQUIRE_NO_LIT \
1401 #define REQUIRE_TB_FLAG(FLAG) \
1403 if ((ctx->tb->flags & (FLAG)) == 0) { \
1408 #define REQUIRE_REG_31(WHICH) \
1410 if (WHICH != 31) { \
1415 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1417 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1419 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1420 bool islit
, real_islit
;
1421 TCGv va
, vb
, vc
, tmp
, tmp2
;
1425 /* Decode all instruction fields */
1426 opc
= extract32(insn
, 26, 6);
1427 ra
= extract32(insn
, 21, 5);
1428 rb
= extract32(insn
, 16, 5);
1429 rc
= extract32(insn
, 0, 5);
1430 real_islit
= islit
= extract32(insn
, 12, 1);
1431 lit
= extract32(insn
, 13, 8);
1433 disp21
= sextract32(insn
, 0, 21);
1434 disp16
= sextract32(insn
, 0, 16);
1435 disp12
= sextract32(insn
, 0, 12);
1437 fn11
= extract32(insn
, 5, 11);
1438 fpfn
= extract32(insn
, 5, 6);
1439 fn7
= extract32(insn
, 5, 7);
1441 if (rb
== 31 && !islit
) {
1450 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1476 disp16
= (uint32_t)disp16
<< 16;
1480 va
= dest_gpr(ctx
, ra
);
1481 /* It's worth special-casing immediate loads. */
1483 tcg_gen_movi_i64(va
, disp16
);
1485 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1491 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1492 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1496 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1500 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1501 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1505 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1506 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1510 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1511 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1515 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1519 vc
= dest_gpr(ctx
, rc
);
1520 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1524 /* Special case ADDL as SEXTL. */
1525 tcg_gen_ext32s_i64(vc
, vb
);
1529 /* Special case SUBQ as NEGQ. */
1530 tcg_gen_neg_i64(vc
, vb
);
1535 va
= load_gpr(ctx
, ra
);
1539 tcg_gen_add_i64(vc
, va
, vb
);
1540 tcg_gen_ext32s_i64(vc
, vc
);
1544 tmp
= tcg_temp_new();
1545 tcg_gen_shli_i64(tmp
, va
, 2);
1546 tcg_gen_add_i64(tmp
, tmp
, vb
);
1547 tcg_gen_ext32s_i64(vc
, tmp
);
1552 tcg_gen_sub_i64(vc
, va
, vb
);
1553 tcg_gen_ext32s_i64(vc
, vc
);
1557 tmp
= tcg_temp_new();
1558 tcg_gen_shli_i64(tmp
, va
, 2);
1559 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1560 tcg_gen_ext32s_i64(vc
, tmp
);
1566 /* Special case 0 >= X as X == 0. */
1567 gen_helper_cmpbe0(vc
, vb
);
1569 gen_helper_cmpbge(vc
, va
, vb
);
1574 tmp
= tcg_temp_new();
1575 tcg_gen_shli_i64(tmp
, va
, 3);
1576 tcg_gen_add_i64(tmp
, tmp
, vb
);
1577 tcg_gen_ext32s_i64(vc
, tmp
);
1582 tmp
= tcg_temp_new();
1583 tcg_gen_shli_i64(tmp
, va
, 3);
1584 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1585 tcg_gen_ext32s_i64(vc
, tmp
);
1590 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1594 tcg_gen_add_i64(vc
, va
, vb
);
1598 tmp
= tcg_temp_new();
1599 tcg_gen_shli_i64(tmp
, va
, 2);
1600 tcg_gen_add_i64(vc
, tmp
, vb
);
1605 tcg_gen_sub_i64(vc
, va
, vb
);
1609 tmp
= tcg_temp_new();
1610 tcg_gen_shli_i64(tmp
, va
, 2);
1611 tcg_gen_sub_i64(vc
, tmp
, vb
);
1616 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1620 tmp
= tcg_temp_new();
1621 tcg_gen_shli_i64(tmp
, va
, 3);
1622 tcg_gen_add_i64(vc
, tmp
, vb
);
1627 tmp
= tcg_temp_new();
1628 tcg_gen_shli_i64(tmp
, va
, 3);
1629 tcg_gen_sub_i64(vc
, tmp
, vb
);
1634 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1638 tmp
= tcg_temp_new();
1639 tcg_gen_ext32s_i64(tmp
, va
);
1640 tcg_gen_ext32s_i64(vc
, vb
);
1641 tcg_gen_add_i64(tmp
, tmp
, vc
);
1642 tcg_gen_ext32s_i64(vc
, tmp
);
1643 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1648 tmp
= tcg_temp_new();
1649 tcg_gen_ext32s_i64(tmp
, va
);
1650 tcg_gen_ext32s_i64(vc
, vb
);
1651 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1652 tcg_gen_ext32s_i64(vc
, tmp
);
1653 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1658 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1662 tmp
= tcg_temp_new();
1663 tmp2
= tcg_temp_new();
1664 tcg_gen_eqv_i64(tmp
, va
, vb
);
1665 tcg_gen_mov_i64(tmp2
, va
);
1666 tcg_gen_add_i64(vc
, va
, vb
);
1667 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1668 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1669 tcg_gen_shri_i64(tmp
, tmp
, 63);
1670 tcg_gen_movi_i64(tmp2
, 0);
1671 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1673 tcg_temp_free(tmp2
);
1677 tmp
= tcg_temp_new();
1678 tmp2
= tcg_temp_new();
1679 tcg_gen_xor_i64(tmp
, va
, vb
);
1680 tcg_gen_mov_i64(tmp2
, va
);
1681 tcg_gen_sub_i64(vc
, va
, vb
);
1682 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1683 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1684 tcg_gen_shri_i64(tmp
, tmp
, 63);
1685 tcg_gen_movi_i64(tmp2
, 0);
1686 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1688 tcg_temp_free(tmp2
);
1692 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1702 /* Special case BIS as NOP. */
1706 /* Special case BIS as MOV. */
1707 vc
= dest_gpr(ctx
, rc
);
1709 tcg_gen_movi_i64(vc
, lit
);
1711 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1717 vc
= dest_gpr(ctx
, rc
);
1718 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1720 if (fn7
== 0x28 && ra
== 31) {
1721 /* Special case ORNOT as NOT. */
1722 tcg_gen_not_i64(vc
, vb
);
1726 va
= load_gpr(ctx
, ra
);
1730 tcg_gen_and_i64(vc
, va
, vb
);
1734 tcg_gen_andc_i64(vc
, va
, vb
);
1738 tmp
= tcg_temp_new();
1739 tcg_gen_andi_i64(tmp
, va
, 1);
1740 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1741 vb
, load_gpr(ctx
, rc
));
1746 tmp
= tcg_temp_new();
1747 tcg_gen_andi_i64(tmp
, va
, 1);
1748 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1749 vb
, load_gpr(ctx
, rc
));
1754 tcg_gen_or_i64(vc
, va
, vb
);
1758 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1759 vb
, load_gpr(ctx
, rc
));
1763 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1764 vb
, load_gpr(ctx
, rc
));
1768 tcg_gen_orc_i64(vc
, va
, vb
);
1772 tcg_gen_xor_i64(vc
, va
, vb
);
1776 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1777 vb
, load_gpr(ctx
, rc
));
1781 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1782 vb
, load_gpr(ctx
, rc
));
1786 tcg_gen_eqv_i64(vc
, va
, vb
);
1792 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1793 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1798 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1799 vb
, load_gpr(ctx
, rc
));
1803 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1804 vb
, load_gpr(ctx
, rc
));
1809 tcg_gen_movi_i64(vc
, ctx
->implver
);
1817 vc
= dest_gpr(ctx
, rc
);
1818 va
= load_gpr(ctx
, ra
);
1822 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1826 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1830 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1834 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1838 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1842 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1846 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1850 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1854 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1859 gen_zapnoti(vc
, va
, ~lit
);
1861 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1867 gen_zapnoti(vc
, va
, lit
);
1869 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1874 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1879 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1881 tmp
= tcg_temp_new();
1882 vb
= load_gpr(ctx
, rb
);
1883 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1884 tcg_gen_shr_i64(vc
, va
, tmp
);
1890 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1895 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1897 tmp
= tcg_temp_new();
1898 vb
= load_gpr(ctx
, rb
);
1899 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1900 tcg_gen_shl_i64(vc
, va
, tmp
);
1906 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1911 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1913 tmp
= tcg_temp_new();
1914 vb
= load_gpr(ctx
, rb
);
1915 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1916 tcg_gen_sar_i64(vc
, va
, tmp
);
1922 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1926 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1930 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1934 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1938 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1942 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1946 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1950 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1954 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1962 vc
= dest_gpr(ctx
, rc
);
1963 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1964 va
= load_gpr(ctx
, ra
);
1968 tcg_gen_mul_i64(vc
, va
, vb
);
1969 tcg_gen_ext32s_i64(vc
, vc
);
1973 tcg_gen_mul_i64(vc
, va
, vb
);
1977 tmp
= tcg_temp_new();
1978 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1983 tmp
= tcg_temp_new();
1984 tcg_gen_ext32s_i64(tmp
, va
);
1985 tcg_gen_ext32s_i64(vc
, vb
);
1986 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1987 tcg_gen_ext32s_i64(vc
, tmp
);
1988 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1993 tmp
= tcg_temp_new();
1994 tmp2
= tcg_temp_new();
1995 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1996 tcg_gen_sari_i64(tmp2
, vc
, 63);
1997 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1999 tcg_temp_free(tmp2
);
2007 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2008 vc
= dest_fpr(ctx
, rc
);
2009 switch (fpfn
) { /* fn11 & 0x3F */
2013 t32
= tcg_temp_new_i32();
2014 va
= load_gpr(ctx
, ra
);
2015 tcg_gen_extrl_i64_i32(t32
, va
);
2016 gen_helper_memory_to_s(vc
, t32
);
2017 tcg_temp_free_i32(t32
);
2022 vb
= load_fpr(ctx
, rb
);
2023 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2028 gen_sqrts(ctx
, rb
, rc
, fn11
);
2033 t32
= tcg_temp_new_i32();
2034 va
= load_gpr(ctx
, ra
);
2035 tcg_gen_extrl_i64_i32(t32
, va
);
2036 gen_helper_memory_to_f(vc
, t32
);
2037 tcg_temp_free_i32(t32
);
2042 va
= load_gpr(ctx
, ra
);
2043 tcg_gen_mov_i64(vc
, va
);
2048 vb
= load_fpr(ctx
, rb
);
2049 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2054 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2062 /* VAX floating point */
2063 /* XXX: rounding mode and trap are ignored (!) */
2064 vc
= dest_fpr(ctx
, rc
);
2065 vb
= load_fpr(ctx
, rb
);
2066 va
= load_fpr(ctx
, ra
);
2067 switch (fpfn
) { /* fn11 & 0x3F */
2070 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2074 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2078 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2082 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2090 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2094 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2098 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2102 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2106 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2110 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2114 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2119 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2128 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2133 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2138 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2146 /* IEEE floating-point */
2147 switch (fpfn
) { /* fn11 & 0x3F */
2150 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2154 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2158 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2162 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2166 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2170 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2174 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2178 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2182 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2186 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2190 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2194 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2198 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2200 gen_cvtst(ctx
, rb
, rc
, fn11
);
2203 gen_cvtts(ctx
, rb
, rc
, fn11
);
2209 gen_cvttq(ctx
, rb
, rc
, fn11
);
2214 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2219 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2231 vc
= dest_fpr(ctx
, rc
);
2232 vb
= load_fpr(ctx
, rb
);
2238 /* Special case CPYS as FNOP. */
2240 vc
= dest_fpr(ctx
, rc
);
2241 va
= load_fpr(ctx
, ra
);
2243 /* Special case CPYS as FMOV. */
2244 tcg_gen_mov_i64(vc
, va
);
2246 vb
= load_fpr(ctx
, rb
);
2247 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2253 vc
= dest_fpr(ctx
, rc
);
2254 vb
= load_fpr(ctx
, rb
);
2255 va
= load_fpr(ctx
, ra
);
2256 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2260 vc
= dest_fpr(ctx
, rc
);
2261 vb
= load_fpr(ctx
, rb
);
2262 va
= load_fpr(ctx
, ra
);
2263 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2267 va
= load_fpr(ctx
, ra
);
2268 gen_helper_store_fpcr(cpu_env
, va
);
2269 if (ctx
->tb_rm
== QUAL_RM_D
) {
2270 /* Re-do the copy of the rounding mode to fp_status
2271 the next time we use dynamic rounding. */
2277 va
= dest_fpr(ctx
, ra
);
2278 gen_helper_load_fpcr(va
, cpu_env
);
2282 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2286 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2290 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2294 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2298 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2302 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2304 case 0x030: /* CVTQL */
2305 case 0x130: /* CVTQL/V */
2306 case 0x530: /* CVTQL/SV */
2308 vc
= dest_fpr(ctx
, rc
);
2309 vb
= load_fpr(ctx
, rb
);
2310 gen_helper_cvtql(vc
, cpu_env
, vb
);
2311 gen_fp_exc_raise(rc
, fn11
);
2319 switch ((uint16_t)disp16
) {
2346 va
= dest_gpr(ctx
, ra
);
2347 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2349 gen_helper_load_pcc(va
, cpu_env
);
2351 ret
= EXIT_PC_STALE
;
2353 gen_helper_load_pcc(va
, cpu_env
);
2381 /* HW_MFPR (PALcode) */
2382 #ifndef CONFIG_USER_ONLY
2383 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2384 va
= dest_gpr(ctx
, ra
);
2385 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2392 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2393 prediction stack action, which of course we don't implement. */
2394 vb
= load_gpr(ctx
, rb
);
2395 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2397 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->pc
);
2399 ret
= EXIT_PC_UPDATED
;
2403 /* HW_LD (PALcode) */
2404 #ifndef CONFIG_USER_ONLY
2405 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2407 TCGv addr
= tcg_temp_new();
2408 vb
= load_gpr(ctx
, rb
);
2409 va
= dest_gpr(ctx
, ra
);
2411 tcg_gen_addi_i64(addr
, vb
, disp12
);
2412 switch ((insn
>> 12) & 0xF) {
2414 /* Longword physical access (hw_ldl/p) */
2415 gen_helper_ldl_phys(va
, cpu_env
, addr
);
2418 /* Quadword physical access (hw_ldq/p) */
2419 gen_helper_ldq_phys(va
, cpu_env
, addr
);
2422 /* Longword physical access with lock (hw_ldl_l/p) */
2423 gen_helper_ldl_l_phys(va
, cpu_env
, addr
);
2426 /* Quadword physical access with lock (hw_ldq_l/p) */
2427 gen_helper_ldq_l_phys(va
, cpu_env
, addr
);
2430 /* Longword virtual PTE fetch (hw_ldl/v) */
2433 /* Quadword virtual PTE fetch (hw_ldq/v) */
2443 /* Longword virtual access (hw_ldl) */
2446 /* Quadword virtual access (hw_ldq) */
2449 /* Longword virtual access with protection check (hw_ldl/w) */
2450 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2453 /* Quadword virtual access with protection check (hw_ldq/w) */
2454 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2457 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2460 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2463 /* Longword virtual access with alternate access mode and
2464 protection checks (hw_ldl/wa) */
2465 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2468 /* Quadword virtual access with alternate access mode and
2469 protection checks (hw_ldq/wa) */
2470 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2473 tcg_temp_free(addr
);
2481 vc
= dest_gpr(ctx
, rc
);
2484 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2486 va
= load_fpr(ctx
, ra
);
2487 tcg_gen_mov_i64(vc
, va
);
2489 } else if (fn7
== 0x78) {
2491 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2493 t32
= tcg_temp_new_i32();
2494 va
= load_fpr(ctx
, ra
);
2495 gen_helper_s_to_memory(t32
, va
);
2496 tcg_gen_ext_i32_i64(vc
, t32
);
2497 tcg_temp_free_i32(t32
);
2501 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2505 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2507 tcg_gen_ext8s_i64(vc
, vb
);
2511 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2513 tcg_gen_ext16s_i64(vc
, vb
);
2517 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2520 gen_helper_ctpop(vc
, vb
);
2524 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2526 va
= load_gpr(ctx
, ra
);
2527 gen_helper_perr(vc
, va
, vb
);
2531 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2534 gen_helper_ctlz(vc
, vb
);
2538 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2541 gen_helper_cttz(vc
, vb
);
2545 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2548 gen_helper_unpkbw(vc
, vb
);
2552 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2555 gen_helper_unpkbl(vc
, vb
);
2559 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2562 gen_helper_pkwb(vc
, vb
);
2566 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2569 gen_helper_pklb(vc
, vb
);
2573 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2574 va
= load_gpr(ctx
, ra
);
2575 gen_helper_minsb8(vc
, va
, vb
);
2579 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2580 va
= load_gpr(ctx
, ra
);
2581 gen_helper_minsw4(vc
, va
, vb
);
2585 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2586 va
= load_gpr(ctx
, ra
);
2587 gen_helper_minub8(vc
, va
, vb
);
2591 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2592 va
= load_gpr(ctx
, ra
);
2593 gen_helper_minuw4(vc
, va
, vb
);
2597 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2598 va
= load_gpr(ctx
, ra
);
2599 gen_helper_maxub8(vc
, va
, vb
);
2603 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2604 va
= load_gpr(ctx
, ra
);
2605 gen_helper_maxuw4(vc
, va
, vb
);
2609 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2610 va
= load_gpr(ctx
, ra
);
2611 gen_helper_maxsb8(vc
, va
, vb
);
2615 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2616 va
= load_gpr(ctx
, ra
);
2617 gen_helper_maxsw4(vc
, va
, vb
);
2625 /* HW_MTPR (PALcode) */
2626 #ifndef CONFIG_USER_ONLY
2627 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2628 vb
= load_gpr(ctx
, rb
);
2629 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2636 /* HW_RET (PALcode) */
2637 #ifndef CONFIG_USER_ONLY
2638 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2640 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2641 address from EXC_ADDR. This turns out to be useful for our
2642 emulation PALcode, so continue to accept it. */
2643 ctx
->lit
= vb
= tcg_temp_new();
2644 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2646 vb
= load_gpr(ctx
, rb
);
2648 tmp
= tcg_temp_new();
2649 tcg_gen_movi_i64(tmp
, 0);
2650 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
2651 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2652 tcg_gen_andi_i64(tmp
, vb
, 1);
2653 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, pal_mode
));
2654 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2655 ret
= EXIT_PC_UPDATED
;
2662 /* HW_ST (PALcode) */
2663 #ifndef CONFIG_USER_ONLY
2664 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2666 TCGv addr
= tcg_temp_new();
2667 va
= load_gpr(ctx
, ra
);
2668 vb
= load_gpr(ctx
, rb
);
2670 tcg_gen_addi_i64(addr
, vb
, disp12
);
2671 switch ((insn
>> 12) & 0xF) {
2673 /* Longword physical access */
2674 gen_helper_stl_phys(cpu_env
, addr
, va
);
2677 /* Quadword physical access */
2678 gen_helper_stq_phys(cpu_env
, addr
, va
);
2681 /* Longword physical access with lock */
2682 gen_helper_stl_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2685 /* Quadword physical access with lock */
2686 gen_helper_stq_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2689 /* Longword virtual access */
2692 /* Quadword virtual access */
2713 /* Longword virtual access with alternate access mode */
2716 /* Quadword virtual access with alternate access mode */
2725 tcg_temp_free(addr
);
2733 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2737 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2741 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2745 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2749 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2753 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2757 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2761 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2765 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2769 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2773 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2777 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2781 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2785 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2789 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
2793 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
2797 ret
= gen_bdirect(ctx
, ra
, disp21
);
2799 case 0x31: /* FBEQ */
2800 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2802 case 0x32: /* FBLT */
2803 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2805 case 0x33: /* FBLE */
2806 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2810 ret
= gen_bdirect(ctx
, ra
, disp21
);
2812 case 0x35: /* FBNE */
2813 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2815 case 0x36: /* FBGE */
2816 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2818 case 0x37: /* FBGT */
2819 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2823 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2827 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2831 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2835 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2839 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2843 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2847 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2851 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2854 ret
= gen_invalid(ctx
);
2861 static inline void gen_intermediate_code_internal(AlphaCPU
*cpu
,
2862 TranslationBlock
*tb
,
2865 CPUState
*cs
= CPU(cpu
);
2866 CPUAlphaState
*env
= &cpu
->env
;
2867 DisasContext ctx
, *ctxp
= &ctx
;
2868 target_ulong pc_start
;
2869 target_ulong pc_mask
;
2881 ctx
.mem_idx
= cpu_mmu_index(env
);
2882 ctx
.implver
= env
->implver
;
2883 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2885 #ifdef CONFIG_USER_ONLY
2886 ctx
.ir
= cpu_std_ir
;
2888 ctx
.palbr
= env
->palbr
;
2889 ctx
.ir
= (tb
->flags
& TB_FLAGS_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2892 /* ??? Every TB begins with unset rounding mode, to be initialized on
2893 the first fp insn of the TB. Alternately we could define a proper
2894 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2895 to reset the FP_STATUS to that default at the end of any TB that
2896 changes the default. We could even (gasp) dynamiclly figure out
2897 what default would be most efficient given the running program. */
2899 /* Similarly for flush-to-zero. */
2903 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2904 if (max_insns
== 0) {
2905 max_insns
= CF_COUNT_MASK
;
2908 if (in_superpage(&ctx
, pc_start
)) {
2909 pc_mask
= (1ULL << 41) - 1;
2911 pc_mask
= ~TARGET_PAGE_MASK
;
2916 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
2917 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
2918 if (bp
->pc
== ctx
.pc
) {
2919 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2925 j
= tcg_op_buf_count();
2929 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2932 tcg_ctx
.gen_opc_pc
[lj
] = ctx
.pc
;
2933 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
2934 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
2936 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2939 insn
= cpu_ldl_code(env
, ctx
.pc
);
2942 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2943 tcg_gen_debug_insn_start(ctx
.pc
);
2946 TCGV_UNUSED_I64(ctx
.zero
);
2947 TCGV_UNUSED_I64(ctx
.sink
);
2948 TCGV_UNUSED_I64(ctx
.lit
);
2951 ret
= translate_one(ctxp
, insn
);
2953 if (!TCGV_IS_UNUSED_I64(ctx
.sink
)) {
2954 tcg_gen_discard_i64(ctx
.sink
);
2955 tcg_temp_free(ctx
.sink
);
2957 if (!TCGV_IS_UNUSED_I64(ctx
.zero
)) {
2958 tcg_temp_free(ctx
.zero
);
2960 if (!TCGV_IS_UNUSED_I64(ctx
.lit
)) {
2961 tcg_temp_free(ctx
.lit
);
2964 /* If we reach a page boundary, are single stepping,
2965 or exhaust instruction count, stop generation. */
2967 && ((ctx
.pc
& pc_mask
) == 0
2968 || tcg_op_buf_full()
2969 || num_insns
>= max_insns
2971 || ctx
.singlestep_enabled
)) {
2972 ret
= EXIT_PC_STALE
;
2974 } while (ret
== NO_EXIT
);
2976 if (tb
->cflags
& CF_LAST_IO
) {
2985 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2987 case EXIT_PC_UPDATED
:
2988 if (ctx
.singlestep_enabled
) {
2989 gen_excp_1(EXCP_DEBUG
, 0);
2998 gen_tb_end(tb
, num_insns
);
3001 j
= tcg_op_buf_count();
3004 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3007 tb
->size
= ctx
.pc
- pc_start
;
3008 tb
->icount
= num_insns
;
3012 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3013 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3014 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 1);
3020 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3022 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, false);
3025 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3027 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, true);
3030 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
3032 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];