2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "trace-tcg.h"
32 #undef ALPHA_DEBUG_DISAS
33 #define CONFIG_SOFTFLOAT_INLINE
35 #ifdef ALPHA_DEBUG_DISAS
36 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
38 # define LOG_DISAS(...) do { } while (0)
41 typedef struct DisasContext DisasContext
;
43 struct TranslationBlock
*tb
;
47 /* Current rounding mode for this TB. */
49 /* Current flush-to-zero setting for this TB. */
52 /* implver value for this CPU. */
55 /* Temporaries for $31 and $f31 as source and destination. */
58 /* Temporary for immediate constants. */
61 bool singlestep_enabled
;
64 /* Return values from translate_one, indicating the state of the TB.
65 Note that zero indicates that we are not exiting the TB. */
70 /* We have emitted one or more goto_tb. No fixup required. */
73 /* We are not using a goto_tb (for whatever reason), but have updated
74 the PC (for whatever reason), so there's no need to do it again on
78 /* We are exiting the TB, but have neither emitted a goto_tb, nor
79 updated the PC for the next instruction to be executed. */
82 /* We are ending the TB with a noreturn function call, e.g. longjmp.
83 No following code will be executed. */
87 /* global register indexes */
88 static TCGv_ptr cpu_env
;
89 static TCGv cpu_ir
[31];
90 static TCGv cpu_fir
[31];
92 static TCGv cpu_lock_addr
;
93 static TCGv cpu_lock_st_addr
;
94 static TCGv cpu_lock_value
;
96 #include "exec/gen-icount.h"
98 void alpha_translate_init(void)
100 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
102 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
103 static const GlobalVar vars
[] = {
106 DEF_VAR(lock_st_addr
),
112 /* Use the symbolic register names that match the disassembler. */
113 static const char greg_names
[31][4] = {
114 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
115 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
116 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
117 "t10", "t11", "ra", "t12", "at", "gp", "sp"
119 static const char freg_names
[31][4] = {
120 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
121 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
122 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
123 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
126 static bool done_init
= 0;
134 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
136 for (i
= 0; i
< 31; i
++) {
137 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
138 offsetof(CPUAlphaState
, ir
[i
]),
142 for (i
= 0; i
< 31; i
++) {
143 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
144 offsetof(CPUAlphaState
, fir
[i
]),
148 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
149 const GlobalVar
*v
= &vars
[i
];
150 *v
->var
= tcg_global_mem_new_i64(TCG_AREG0
, v
->ofs
, v
->name
);
154 static TCGv
load_zero(DisasContext
*ctx
)
156 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
157 ctx
->zero
= tcg_const_i64(0);
162 static TCGv
dest_sink(DisasContext
*ctx
)
164 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
165 ctx
->sink
= tcg_temp_new();
170 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
172 if (likely(reg
< 31)) {
175 return load_zero(ctx
);
179 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
180 uint8_t lit
, bool islit
)
183 ctx
->lit
= tcg_const_i64(lit
);
185 } else if (likely(reg
< 31)) {
188 return load_zero(ctx
);
192 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
194 if (likely(reg
< 31)) {
197 return dest_sink(ctx
);
201 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
203 if (likely(reg
< 31)) {
206 return load_zero(ctx
);
210 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
212 if (likely(reg
< 31)) {
215 return dest_sink(ctx
);
219 static void gen_excp_1(int exception
, int error_code
)
223 tmp1
= tcg_const_i32(exception
);
224 tmp2
= tcg_const_i32(error_code
);
225 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
226 tcg_temp_free_i32(tmp2
);
227 tcg_temp_free_i32(tmp1
);
230 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
232 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
233 gen_excp_1(exception
, error_code
);
234 return EXIT_NORETURN
;
237 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
239 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
242 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
244 TCGv_i32 tmp32
= tcg_temp_new_i32();
245 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
246 gen_helper_memory_to_f(t0
, tmp32
);
247 tcg_temp_free_i32(tmp32
);
250 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
252 TCGv tmp
= tcg_temp_new();
253 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
254 gen_helper_memory_to_g(t0
, tmp
);
258 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
260 TCGv_i32 tmp32
= tcg_temp_new_i32();
261 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
262 gen_helper_memory_to_s(t0
, tmp32
);
263 tcg_temp_free_i32(tmp32
);
266 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
268 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
269 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
270 tcg_gen_mov_i64(cpu_lock_value
, t0
);
273 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
275 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
276 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
277 tcg_gen_mov_i64(cpu_lock_value
, t0
);
280 static inline void gen_load_mem(DisasContext
*ctx
,
281 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
283 int ra
, int rb
, int32_t disp16
, bool fp
,
288 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
289 prefetches, which we can treat as nops. No worries about
290 missed exceptions here. */
291 if (unlikely(ra
== 31)) {
295 tmp
= tcg_temp_new();
296 addr
= load_gpr(ctx
, rb
);
299 tcg_gen_addi_i64(tmp
, addr
, disp16
);
303 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
307 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
308 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
313 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
315 TCGv_i32 tmp32
= tcg_temp_new_i32();
316 gen_helper_f_to_memory(tmp32
, t0
);
317 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
318 tcg_temp_free_i32(tmp32
);
321 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
323 TCGv tmp
= tcg_temp_new();
324 gen_helper_g_to_memory(tmp
, t0
);
325 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
329 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
331 TCGv_i32 tmp32
= tcg_temp_new_i32();
332 gen_helper_s_to_memory(tmp32
, t0
);
333 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
334 tcg_temp_free_i32(tmp32
);
337 static inline void gen_store_mem(DisasContext
*ctx
,
338 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
340 int ra
, int rb
, int32_t disp16
, bool fp
,
345 tmp
= tcg_temp_new();
346 addr
= load_gpr(ctx
, rb
);
349 tcg_gen_addi_i64(tmp
, addr
, disp16
);
353 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
357 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
358 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
363 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
364 int32_t disp16
, int quad
)
369 /* ??? Don't bother storing anything. The user can't tell
370 the difference, since the zero register always reads zero. */
374 #if defined(CONFIG_USER_ONLY)
375 addr
= cpu_lock_st_addr
;
377 addr
= tcg_temp_local_new();
380 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
382 #if defined(CONFIG_USER_ONLY)
383 /* ??? This is handled via a complicated version of compare-and-swap
384 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
385 in TCG so that this isn't necessary. */
386 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
388 /* ??? In system mode we are never multi-threaded, so CAS can be
389 implemented via a non-atomic load-compare-store sequence. */
391 TCGLabel
*lab_fail
, *lab_done
;
394 lab_fail
= gen_new_label();
395 lab_done
= gen_new_label();
396 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
398 val
= tcg_temp_new();
399 tcg_gen_qemu_ld_i64(val
, addr
, ctx
->mem_idx
, quad
? MO_LEQ
: MO_LESL
);
400 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
402 tcg_gen_qemu_st_i64(cpu_ir
[ra
], addr
, ctx
->mem_idx
,
403 quad
? MO_LEQ
: MO_LEUL
);
404 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
405 tcg_gen_br(lab_done
);
407 gen_set_label(lab_fail
);
408 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
410 gen_set_label(lab_done
);
411 tcg_gen_movi_i64(cpu_lock_addr
, -1);
419 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
421 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
423 && ((addr
>> 41) & 3) == 2
424 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
427 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
429 /* Suppress goto_tb in the case of single-steping and IO. */
430 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
431 || ctx
->singlestep_enabled
|| singlestep
) {
434 /* If the destination is in the superpage, the page perms can't change. */
435 if (in_superpage(ctx
, dest
)) {
438 /* Check for the dest on the same page as the start of the TB. */
439 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
442 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
444 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
447 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
450 /* Notice branch-to-next; used to initialize RA with the PC. */
453 } else if (use_goto_tb(ctx
, dest
)) {
455 tcg_gen_movi_i64(cpu_pc
, dest
);
456 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
459 tcg_gen_movi_i64(cpu_pc
, dest
);
460 return EXIT_PC_UPDATED
;
464 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
465 TCGv cmp
, int32_t disp
)
467 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
468 TCGLabel
*lab_true
= gen_new_label();
470 if (use_goto_tb(ctx
, dest
)) {
471 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
474 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
475 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
477 gen_set_label(lab_true
);
479 tcg_gen_movi_i64(cpu_pc
, dest
);
480 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
484 TCGv_i64 z
= tcg_const_i64(0);
485 TCGv_i64 d
= tcg_const_i64(dest
);
486 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
488 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
490 tcg_temp_free_i64(z
);
491 tcg_temp_free_i64(d
);
492 tcg_temp_free_i64(p
);
493 return EXIT_PC_UPDATED
;
497 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
498 int32_t disp
, int mask
)
503 cmp_tmp
= tcg_temp_new();
504 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
506 cmp_tmp
= load_gpr(ctx
, ra
);
509 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
512 /* Fold -0.0 for comparison with COND. */
514 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
516 uint64_t mzero
= 1ull << 63;
521 /* For <= or >, the -0.0 value directly compares the way we want. */
522 tcg_gen_mov_i64(dest
, src
);
527 /* For == or !=, we can simply mask off the sign bit and compare. */
528 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
533 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
534 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
535 tcg_gen_neg_i64(dest
, dest
);
536 tcg_gen_and_i64(dest
, dest
, src
);
544 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
547 TCGv cmp_tmp
= tcg_temp_new();
548 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
549 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
552 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
557 vb
= load_fpr(ctx
, rb
);
559 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
561 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
566 #define QUAL_RM_N 0x080 /* Round mode nearest even */
567 #define QUAL_RM_C 0x000 /* Round mode chopped */
568 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
569 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
570 #define QUAL_RM_MASK 0x0c0
572 #define QUAL_U 0x100 /* Underflow enable (fp output) */
573 #define QUAL_V 0x100 /* Overflow enable (int output) */
574 #define QUAL_S 0x400 /* Software completion enable */
575 #define QUAL_I 0x200 /* Inexact detection enable */
577 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
581 fn11
&= QUAL_RM_MASK
;
582 if (fn11
== ctx
->tb_rm
) {
587 tmp
= tcg_temp_new_i32();
590 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
593 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
596 tcg_gen_movi_i32(tmp
, float_round_down
);
599 tcg_gen_ld8u_i32(tmp
, cpu_env
,
600 offsetof(CPUAlphaState
, fpcr_dyn_round
));
604 #if defined(CONFIG_SOFTFLOAT_INLINE)
605 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
606 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
607 sets the one field. */
608 tcg_gen_st8_i32(tmp
, cpu_env
,
609 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
611 gen_helper_setroundmode(tmp
);
614 tcg_temp_free_i32(tmp
);
617 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
622 if (fn11
== ctx
->tb_ftz
) {
627 tmp
= tcg_temp_new_i32();
629 /* Underflow is enabled, use the FPCR setting. */
630 tcg_gen_ld8u_i32(tmp
, cpu_env
,
631 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
633 /* Underflow is disabled, force flush-to-zero. */
634 tcg_gen_movi_i32(tmp
, 1);
637 #if defined(CONFIG_SOFTFLOAT_INLINE)
638 tcg_gen_st8_i32(tmp
, cpu_env
,
639 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
641 gen_helper_setflushzero(tmp
);
644 tcg_temp_free_i32(tmp
);
647 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
651 if (unlikely(reg
== 31)) {
652 val
= load_zero(ctx
);
655 if ((fn11
& QUAL_S
) == 0) {
657 gen_helper_ieee_input_cmp(cpu_env
, val
);
659 gen_helper_ieee_input(cpu_env
, val
);
662 #ifndef CONFIG_USER_ONLY
663 /* In system mode, raise exceptions for denormals like real
664 hardware. In user mode, proceed as if the OS completion
665 handler is handling the denormal as per spec. */
666 gen_helper_ieee_input_s(cpu_env
, val
);
673 static void gen_fp_exc_raise(int rc
, int fn11
)
675 /* ??? We ought to be able to do something with imprecise exceptions.
676 E.g. notice we're still in the trap shadow of something within the
677 TB and do not generate the code to signal the exception; end the TB
678 when an exception is forced to arrive, either by consumption of a
679 register value or TRAPB or EXCB. */
683 if (!(fn11
& QUAL_U
)) {
684 /* Note that QUAL_U == QUAL_V, so ignore either. */
685 ignore
|= FPCR_UNF
| FPCR_IOV
;
687 if (!(fn11
& QUAL_I
)) {
690 ign
= tcg_const_i32(ignore
);
692 /* ??? Pass in the regno of the destination so that the helper can
693 set EXC_MASK, which contains a bitmask of destination registers
694 that have caused arithmetic traps. A simple userspace emulation
695 does not require this. We do need it for a guest kernel's entArith,
696 or if we were to do something clever with imprecise exceptions. */
697 reg
= tcg_const_i32(rc
+ 32);
699 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
701 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
704 tcg_temp_free_i32(reg
);
705 tcg_temp_free_i32(ign
);
708 static void gen_cvtlq(TCGv vc
, TCGv vb
)
710 TCGv tmp
= tcg_temp_new();
712 /* The arithmetic right shift here, plus the sign-extended mask below
713 yields a sign-extended result without an explicit ext32s_i64. */
714 tcg_gen_sari_i64(tmp
, vb
, 32);
715 tcg_gen_shri_i64(vc
, vb
, 29);
716 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
717 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
718 tcg_gen_or_i64(vc
, vc
, tmp
);
723 static void gen_ieee_arith2(DisasContext
*ctx
,
724 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
725 int rb
, int rc
, int fn11
)
729 gen_qual_roundmode(ctx
, fn11
);
730 gen_qual_flushzero(ctx
, fn11
);
732 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
733 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
735 gen_fp_exc_raise(rc
, fn11
);
738 #define IEEE_ARITH2(name) \
739 static inline void glue(gen_, name)(DisasContext *ctx, \
740 int rb, int rc, int fn11) \
742 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
749 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
753 /* No need to set flushzero, since we have an integer output. */
754 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
755 vc
= dest_fpr(ctx
, rc
);
757 /* Almost all integer conversions use cropped rounding;
758 special case that. */
759 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
760 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
762 gen_qual_roundmode(ctx
, fn11
);
763 gen_helper_cvttq(vc
, cpu_env
, vb
);
765 gen_fp_exc_raise(rc
, fn11
);
768 static void gen_ieee_intcvt(DisasContext
*ctx
,
769 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
770 int rb
, int rc
, int fn11
)
774 gen_qual_roundmode(ctx
, fn11
);
775 vb
= load_fpr(ctx
, rb
);
776 vc
= dest_fpr(ctx
, rc
);
778 /* The only exception that can be raised by integer conversion
779 is inexact. Thus we only need to worry about exceptions when
780 inexact handling is requested. */
782 helper(vc
, cpu_env
, vb
);
783 gen_fp_exc_raise(rc
, fn11
);
785 helper(vc
, cpu_env
, vb
);
789 #define IEEE_INTCVT(name) \
790 static inline void glue(gen_, name)(DisasContext *ctx, \
791 int rb, int rc, int fn11) \
793 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
798 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
800 TCGv vmask
= tcg_const_i64(mask
);
801 TCGv tmp
= tcg_temp_new_i64();
804 tcg_gen_andc_i64(tmp
, vmask
, va
);
806 tcg_gen_and_i64(tmp
, va
, vmask
);
809 tcg_gen_andc_i64(vc
, vb
, vmask
);
810 tcg_gen_or_i64(vc
, vc
, tmp
);
812 tcg_temp_free(vmask
);
816 static void gen_ieee_arith3(DisasContext
*ctx
,
817 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
818 int ra
, int rb
, int rc
, int fn11
)
822 gen_qual_roundmode(ctx
, fn11
);
823 gen_qual_flushzero(ctx
, fn11
);
825 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
826 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
827 vc
= dest_fpr(ctx
, rc
);
828 helper(vc
, cpu_env
, va
, vb
);
830 gen_fp_exc_raise(rc
, fn11
);
833 #define IEEE_ARITH3(name) \
834 static inline void glue(gen_, name)(DisasContext *ctx, \
835 int ra, int rb, int rc, int fn11) \
837 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
848 static void gen_ieee_compare(DisasContext
*ctx
,
849 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
850 int ra
, int rb
, int rc
, int fn11
)
854 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
855 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
856 vc
= dest_fpr(ctx
, rc
);
857 helper(vc
, cpu_env
, va
, vb
);
859 gen_fp_exc_raise(rc
, fn11
);
862 #define IEEE_CMP3(name) \
863 static inline void glue(gen_, name)(DisasContext *ctx, \
864 int ra, int rb, int rc, int fn11) \
866 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
873 static inline uint64_t zapnot_mask(uint8_t lit
)
878 for (i
= 0; i
< 8; ++i
) {
879 if ((lit
>> i
) & 1) {
880 mask
|= 0xffull
<< (i
* 8);
886 /* Implement zapnot with an immediate operand, which expands to some
887 form of immediate AND. This is a basic building block in the
888 definition of many of the other byte manipulation instructions. */
889 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
893 tcg_gen_movi_i64(dest
, 0);
896 tcg_gen_ext8u_i64(dest
, src
);
899 tcg_gen_ext16u_i64(dest
, src
);
902 tcg_gen_ext32u_i64(dest
, src
);
905 tcg_gen_mov_i64(dest
, src
);
908 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
913 /* EXTWH, EXTLH, EXTQH */
914 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
915 uint8_t lit
, uint8_t byte_mask
)
918 tcg_gen_shli_i64(vc
, va
, (64 - lit
* 8) & 0x3f);
920 TCGv tmp
= tcg_temp_new();
921 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
922 tcg_gen_neg_i64(tmp
, tmp
);
923 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
924 tcg_gen_shl_i64(vc
, va
, tmp
);
927 gen_zapnoti(vc
, vc
, byte_mask
);
930 /* EXTBL, EXTWL, EXTLL, EXTQL */
931 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
932 uint8_t lit
, uint8_t byte_mask
)
935 tcg_gen_shri_i64(vc
, va
, (lit
& 7) * 8);
937 TCGv tmp
= tcg_temp_new();
938 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
939 tcg_gen_shli_i64(tmp
, tmp
, 3);
940 tcg_gen_shr_i64(vc
, va
, tmp
);
943 gen_zapnoti(vc
, vc
, byte_mask
);
946 /* INSWH, INSLH, INSQH */
947 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
948 uint8_t lit
, uint8_t byte_mask
)
950 TCGv tmp
= tcg_temp_new();
952 /* The instruction description has us left-shift the byte mask and extract
953 bits <15:8> and apply that zap at the end. This is equivalent to simply
954 performing the zap first and shifting afterward. */
955 gen_zapnoti(tmp
, va
, byte_mask
);
959 if (unlikely(lit
== 0)) {
960 tcg_gen_movi_i64(vc
, 0);
962 tcg_gen_shri_i64(vc
, tmp
, 64 - lit
* 8);
965 TCGv shift
= tcg_temp_new();
967 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
968 portably by splitting the shift into two parts: shift_count-1 and 1.
969 Arrange for the -1 by using ones-complement instead of
970 twos-complement in the negation: ~(B * 8) & 63. */
972 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
973 tcg_gen_not_i64(shift
, shift
);
974 tcg_gen_andi_i64(shift
, shift
, 0x3f);
976 tcg_gen_shr_i64(vc
, tmp
, shift
);
977 tcg_gen_shri_i64(vc
, vc
, 1);
978 tcg_temp_free(shift
);
983 /* INSBL, INSWL, INSLL, INSQL */
984 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
985 uint8_t lit
, uint8_t byte_mask
)
987 TCGv tmp
= tcg_temp_new();
989 /* The instruction description has us left-shift the byte mask
990 the same number of byte slots as the data and apply the zap
991 at the end. This is equivalent to simply performing the zap
992 first and shifting afterward. */
993 gen_zapnoti(tmp
, va
, byte_mask
);
996 tcg_gen_shli_i64(vc
, tmp
, (lit
& 7) * 8);
998 TCGv shift
= tcg_temp_new();
999 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1000 tcg_gen_shli_i64(shift
, shift
, 3);
1001 tcg_gen_shl_i64(vc
, tmp
, shift
);
1002 tcg_temp_free(shift
);
1007 /* MSKWH, MSKLH, MSKQH */
1008 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1009 uint8_t lit
, uint8_t byte_mask
)
1012 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1014 TCGv shift
= tcg_temp_new();
1015 TCGv mask
= tcg_temp_new();
1017 /* The instruction description is as above, where the byte_mask
1018 is shifted left, and then we extract bits <15:8>. This can be
1019 emulated with a right-shift on the expanded byte mask. This
1020 requires extra care because for an input <2:0> == 0 we need a
1021 shift of 64 bits in order to generate a zero. This is done by
1022 splitting the shift into two parts, the variable shift - 1
1023 followed by a constant 1 shift. The code we expand below is
1024 equivalent to ~(B * 8) & 63. */
1026 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1027 tcg_gen_not_i64(shift
, shift
);
1028 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1029 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1030 tcg_gen_shr_i64(mask
, mask
, shift
);
1031 tcg_gen_shri_i64(mask
, mask
, 1);
1033 tcg_gen_andc_i64(vc
, va
, mask
);
1035 tcg_temp_free(mask
);
1036 tcg_temp_free(shift
);
1040 /* MSKBL, MSKWL, MSKLL, MSKQL */
1041 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1042 uint8_t lit
, uint8_t byte_mask
)
1045 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1047 TCGv shift
= tcg_temp_new();
1048 TCGv mask
= tcg_temp_new();
1050 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1051 tcg_gen_shli_i64(shift
, shift
, 3);
1052 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1053 tcg_gen_shl_i64(mask
, mask
, shift
);
1055 tcg_gen_andc_i64(vc
, va
, mask
);
1057 tcg_temp_free(mask
);
1058 tcg_temp_free(shift
);
1062 static void gen_rx(int ra
, int set
)
1067 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1070 tmp
= tcg_const_i32(set
);
1071 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1072 tcg_temp_free_i32(tmp
);
1075 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1077 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1078 to internal cpu registers. */
1080 /* Unprivileged PAL call */
1081 if (palcode
>= 0x80 && palcode
< 0xC0) {
1085 /* No-op inside QEMU. */
1089 tcg_gen_ld_i64(cpu_ir
[IR_V0
], cpu_env
,
1090 offsetof(CPUAlphaState
, unique
));
1094 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1095 offsetof(CPUAlphaState
, unique
));
1104 #ifndef CONFIG_USER_ONLY
1105 /* Privileged PAL code */
1106 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1110 /* No-op inside QEMU. */
1114 /* No-op inside QEMU. */
1118 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1119 offsetof(CPUAlphaState
, vptptr
));
1123 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1124 offsetof(CPUAlphaState
, sysval
));
1128 tcg_gen_ld_i64(cpu_ir
[IR_V0
], cpu_env
,
1129 offsetof(CPUAlphaState
, sysval
));
1136 /* Note that we already know we're in kernel mode, so we know
1137 that PS only contains the 3 IPL bits. */
1138 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
,
1139 offsetof(CPUAlphaState
, ps
));
1141 /* But make sure and store only the 3 IPL bits from the user. */
1142 tmp
= tcg_temp_new();
1143 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1144 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1151 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
,
1152 offsetof(CPUAlphaState
, ps
));
1156 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1157 offsetof(CPUAlphaState
, usp
));
1161 tcg_gen_ld_i64(cpu_ir
[IR_V0
], cpu_env
,
1162 offsetof(CPUAlphaState
, usp
));
1166 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1167 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1177 return gen_invalid(ctx
);
1180 #ifdef CONFIG_USER_ONLY
1181 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1184 TCGv pc
= tcg_const_i64(ctx
->pc
);
1185 TCGv entry
= tcg_const_i64(palcode
& 0x80
1186 ? 0x2000 + (palcode
- 0x80) * 64
1187 : 0x1000 + palcode
* 64);
1189 gen_helper_call_pal(cpu_env
, pc
, entry
);
1191 tcg_temp_free(entry
);
1194 /* Since the destination is running in PALmode, we don't really
1195 need the page permissions check. We'll see the existence of
1196 the page when we create the TB, and we'll flush all TBs if
1197 we change the PAL base register. */
1198 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1200 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1201 return EXIT_GOTO_TB
;
1204 return EXIT_PC_UPDATED
;
1209 #ifndef CONFIG_USER_ONLY
1211 #define PR_BYTE 0x100000
1212 #define PR_LONG 0x200000
1214 static int cpu_pr_data(int pr
)
1217 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1218 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1219 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1220 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1221 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1222 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1223 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1224 case 7: return offsetof(CPUAlphaState
, palbr
);
1225 case 8: return offsetof(CPUAlphaState
, ptbr
);
1226 case 9: return offsetof(CPUAlphaState
, vptptr
);
1227 case 10: return offsetof(CPUAlphaState
, unique
);
1228 case 11: return offsetof(CPUAlphaState
, sysval
);
1229 case 12: return offsetof(CPUAlphaState
, usp
);
1232 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1234 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1237 return offsetof(CPUAlphaState
, alarm_expire
);
1242 static ExitStatus
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1244 int data
= cpu_pr_data(regno
);
1246 /* Special help for VMTIME and WALLTIME. */
1247 if (regno
== 250 || regno
== 249) {
1248 void (*helper
)(TCGv
) = gen_helper_get_walltime
;
1250 helper
= gen_helper_get_vmtime
;
1252 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
1256 return EXIT_PC_STALE
;
1263 /* The basic registers are data only, and unknown registers
1264 are read-zero, write-ignore. */
1266 tcg_gen_movi_i64(va
, 0);
1267 } else if (data
& PR_BYTE
) {
1268 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1269 } else if (data
& PR_LONG
) {
1270 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1272 tcg_gen_ld_i64(va
, cpu_env
, data
);
1277 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1285 gen_helper_tbia(cpu_env
);
1290 gen_helper_tbis(cpu_env
, vb
);
1295 tmp
= tcg_const_i64(1);
1296 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1297 offsetof(CPUState
, halted
));
1298 return gen_excp(ctx
, EXCP_HLT
, 0);
1302 gen_helper_halt(vb
);
1303 return EXIT_PC_STALE
;
1307 gen_helper_set_alarm(cpu_env
, vb
);
1312 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1313 /* Changing the PAL base register implies un-chaining all of the TBs
1314 that ended with a CALL_PAL. Since the base register usually only
1315 changes during boot, flushing everything works well. */
1316 gen_helper_tb_flush(cpu_env
);
1317 return EXIT_PC_STALE
;
1320 /* The basic registers are data only, and unknown registers
1321 are read-zero, write-ignore. */
1322 data
= cpu_pr_data(regno
);
1324 if (data
& PR_BYTE
) {
1325 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1326 } else if (data
& PR_LONG
) {
1327 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1329 tcg_gen_st_i64(vb
, cpu_env
, data
);
1337 #endif /* !USER_ONLY*/
1339 #define REQUIRE_NO_LIT \
1346 #define REQUIRE_TB_FLAG(FLAG) \
1348 if ((ctx->tb->flags & (FLAG)) == 0) { \
1353 #define REQUIRE_REG_31(WHICH) \
1355 if (WHICH != 31) { \
1360 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1362 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1364 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1365 bool islit
, real_islit
;
1366 TCGv va
, vb
, vc
, tmp
, tmp2
;
1370 /* Decode all instruction fields */
1371 opc
= extract32(insn
, 26, 6);
1372 ra
= extract32(insn
, 21, 5);
1373 rb
= extract32(insn
, 16, 5);
1374 rc
= extract32(insn
, 0, 5);
1375 real_islit
= islit
= extract32(insn
, 12, 1);
1376 lit
= extract32(insn
, 13, 8);
1378 disp21
= sextract32(insn
, 0, 21);
1379 disp16
= sextract32(insn
, 0, 16);
1380 disp12
= sextract32(insn
, 0, 12);
1382 fn11
= extract32(insn
, 5, 11);
1383 fpfn
= extract32(insn
, 5, 6);
1384 fn7
= extract32(insn
, 5, 7);
1386 if (rb
== 31 && !islit
) {
1395 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1421 disp16
= (uint32_t)disp16
<< 16;
1425 va
= dest_gpr(ctx
, ra
);
1426 /* It's worth special-casing immediate loads. */
1428 tcg_gen_movi_i64(va
, disp16
);
1430 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1436 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1437 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1441 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1445 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1446 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1450 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1451 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1455 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1456 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1460 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1464 vc
= dest_gpr(ctx
, rc
);
1465 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1469 /* Special case ADDL as SEXTL. */
1470 tcg_gen_ext32s_i64(vc
, vb
);
1474 /* Special case SUBQ as NEGQ. */
1475 tcg_gen_neg_i64(vc
, vb
);
1480 va
= load_gpr(ctx
, ra
);
1484 tcg_gen_add_i64(vc
, va
, vb
);
1485 tcg_gen_ext32s_i64(vc
, vc
);
1489 tmp
= tcg_temp_new();
1490 tcg_gen_shli_i64(tmp
, va
, 2);
1491 tcg_gen_add_i64(tmp
, tmp
, vb
);
1492 tcg_gen_ext32s_i64(vc
, tmp
);
1497 tcg_gen_sub_i64(vc
, va
, vb
);
1498 tcg_gen_ext32s_i64(vc
, vc
);
1502 tmp
= tcg_temp_new();
1503 tcg_gen_shli_i64(tmp
, va
, 2);
1504 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1505 tcg_gen_ext32s_i64(vc
, tmp
);
1510 gen_helper_cmpbge(vc
, va
, vb
);
1514 tmp
= tcg_temp_new();
1515 tcg_gen_shli_i64(tmp
, va
, 3);
1516 tcg_gen_add_i64(tmp
, tmp
, vb
);
1517 tcg_gen_ext32s_i64(vc
, tmp
);
1522 tmp
= tcg_temp_new();
1523 tcg_gen_shli_i64(tmp
, va
, 3);
1524 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1525 tcg_gen_ext32s_i64(vc
, tmp
);
1530 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1534 tcg_gen_add_i64(vc
, va
, vb
);
1538 tmp
= tcg_temp_new();
1539 tcg_gen_shli_i64(tmp
, va
, 2);
1540 tcg_gen_add_i64(vc
, tmp
, vb
);
1545 tcg_gen_sub_i64(vc
, va
, vb
);
1549 tmp
= tcg_temp_new();
1550 tcg_gen_shli_i64(tmp
, va
, 2);
1551 tcg_gen_sub_i64(vc
, tmp
, vb
);
1556 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1560 tmp
= tcg_temp_new();
1561 tcg_gen_shli_i64(tmp
, va
, 3);
1562 tcg_gen_add_i64(vc
, tmp
, vb
);
1567 tmp
= tcg_temp_new();
1568 tcg_gen_shli_i64(tmp
, va
, 3);
1569 tcg_gen_sub_i64(vc
, tmp
, vb
);
1574 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1578 tmp
= tcg_temp_new();
1579 tcg_gen_ext32s_i64(tmp
, va
);
1580 tcg_gen_ext32s_i64(vc
, vb
);
1581 tcg_gen_add_i64(tmp
, tmp
, vc
);
1582 tcg_gen_ext32s_i64(vc
, tmp
);
1583 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1588 tmp
= tcg_temp_new();
1589 tcg_gen_ext32s_i64(tmp
, va
);
1590 tcg_gen_ext32s_i64(vc
, vb
);
1591 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1592 tcg_gen_ext32s_i64(vc
, tmp
);
1593 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1598 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1602 tmp
= tcg_temp_new();
1603 tmp2
= tcg_temp_new();
1604 tcg_gen_eqv_i64(tmp
, va
, vb
);
1605 tcg_gen_mov_i64(tmp2
, va
);
1606 tcg_gen_add_i64(vc
, va
, vb
);
1607 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1608 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1609 tcg_gen_shri_i64(tmp
, tmp
, 63);
1610 tcg_gen_movi_i64(tmp2
, 0);
1611 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1613 tcg_temp_free(tmp2
);
1617 tmp
= tcg_temp_new();
1618 tmp2
= tcg_temp_new();
1619 tcg_gen_xor_i64(tmp
, va
, vb
);
1620 tcg_gen_mov_i64(tmp2
, va
);
1621 tcg_gen_sub_i64(vc
, va
, vb
);
1622 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1623 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1624 tcg_gen_shri_i64(tmp
, tmp
, 63);
1625 tcg_gen_movi_i64(tmp2
, 0);
1626 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1628 tcg_temp_free(tmp2
);
1632 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1642 /* Special case BIS as NOP. */
1646 /* Special case BIS as MOV. */
1647 vc
= dest_gpr(ctx
, rc
);
1649 tcg_gen_movi_i64(vc
, lit
);
1651 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1657 vc
= dest_gpr(ctx
, rc
);
1658 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1660 if (fn7
== 0x28 && ra
== 31) {
1661 /* Special case ORNOT as NOT. */
1662 tcg_gen_not_i64(vc
, vb
);
1666 va
= load_gpr(ctx
, ra
);
1670 tcg_gen_and_i64(vc
, va
, vb
);
1674 tcg_gen_andc_i64(vc
, va
, vb
);
1678 tmp
= tcg_temp_new();
1679 tcg_gen_andi_i64(tmp
, va
, 1);
1680 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1681 vb
, load_gpr(ctx
, rc
));
1686 tmp
= tcg_temp_new();
1687 tcg_gen_andi_i64(tmp
, va
, 1);
1688 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1689 vb
, load_gpr(ctx
, rc
));
1694 tcg_gen_or_i64(vc
, va
, vb
);
1698 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1699 vb
, load_gpr(ctx
, rc
));
1703 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1704 vb
, load_gpr(ctx
, rc
));
1708 tcg_gen_orc_i64(vc
, va
, vb
);
1712 tcg_gen_xor_i64(vc
, va
, vb
);
1716 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1717 vb
, load_gpr(ctx
, rc
));
1721 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1722 vb
, load_gpr(ctx
, rc
));
1726 tcg_gen_eqv_i64(vc
, va
, vb
);
1732 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1733 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1738 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1739 vb
, load_gpr(ctx
, rc
));
1743 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1744 vb
, load_gpr(ctx
, rc
));
1749 tcg_gen_movi_i64(vc
, ctx
->implver
);
1757 vc
= dest_gpr(ctx
, rc
);
1758 va
= load_gpr(ctx
, ra
);
1762 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1766 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1770 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1774 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1778 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1782 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1786 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1790 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1794 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1799 gen_zapnoti(vc
, va
, ~lit
);
1801 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1807 gen_zapnoti(vc
, va
, lit
);
1809 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1814 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1819 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1821 tmp
= tcg_temp_new();
1822 vb
= load_gpr(ctx
, rb
);
1823 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1824 tcg_gen_shr_i64(vc
, va
, tmp
);
1830 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1835 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1837 tmp
= tcg_temp_new();
1838 vb
= load_gpr(ctx
, rb
);
1839 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1840 tcg_gen_shl_i64(vc
, va
, tmp
);
1846 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1851 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1853 tmp
= tcg_temp_new();
1854 vb
= load_gpr(ctx
, rb
);
1855 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1856 tcg_gen_sar_i64(vc
, va
, tmp
);
1862 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1866 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1870 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1874 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1878 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1882 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1886 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1890 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1894 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1902 vc
= dest_gpr(ctx
, rc
);
1903 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1904 va
= load_gpr(ctx
, ra
);
1908 tcg_gen_mul_i64(vc
, va
, vb
);
1909 tcg_gen_ext32s_i64(vc
, vc
);
1913 tcg_gen_mul_i64(vc
, va
, vb
);
1917 tmp
= tcg_temp_new();
1918 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1923 tmp
= tcg_temp_new();
1924 tcg_gen_ext32s_i64(tmp
, va
);
1925 tcg_gen_ext32s_i64(vc
, vb
);
1926 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1927 tcg_gen_ext32s_i64(vc
, tmp
);
1928 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1933 tmp
= tcg_temp_new();
1934 tmp2
= tcg_temp_new();
1935 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1936 tcg_gen_sari_i64(tmp2
, vc
, 63);
1937 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1939 tcg_temp_free(tmp2
);
1947 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
1948 vc
= dest_fpr(ctx
, rc
);
1949 switch (fpfn
) { /* fn11 & 0x3F */
1953 t32
= tcg_temp_new_i32();
1954 va
= load_gpr(ctx
, ra
);
1955 tcg_gen_trunc_i64_i32(t32
, va
);
1956 gen_helper_memory_to_s(vc
, t32
);
1957 tcg_temp_free_i32(t32
);
1962 vb
= load_fpr(ctx
, rb
);
1963 gen_helper_sqrtf(vc
, cpu_env
, vb
);
1968 gen_sqrts(ctx
, rb
, rc
, fn11
);
1973 t32
= tcg_temp_new_i32();
1974 va
= load_gpr(ctx
, ra
);
1975 tcg_gen_trunc_i64_i32(t32
, va
);
1976 gen_helper_memory_to_f(vc
, t32
);
1977 tcg_temp_free_i32(t32
);
1982 va
= load_gpr(ctx
, ra
);
1983 tcg_gen_mov_i64(vc
, va
);
1988 vb
= load_fpr(ctx
, rb
);
1989 gen_helper_sqrtg(vc
, cpu_env
, vb
);
1994 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2002 /* VAX floating point */
2003 /* XXX: rounding mode and trap are ignored (!) */
2004 vc
= dest_fpr(ctx
, rc
);
2005 vb
= load_fpr(ctx
, rb
);
2006 va
= load_fpr(ctx
, ra
);
2007 switch (fpfn
) { /* fn11 & 0x3F */
2010 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2014 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2018 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2022 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2030 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2034 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2038 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2042 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2046 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2050 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2054 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2059 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2068 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2073 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2078 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2086 /* IEEE floating-point */
2087 switch (fpfn
) { /* fn11 & 0x3F */
2090 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2094 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2098 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2102 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2106 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2110 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2114 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2118 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2122 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2126 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2130 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2134 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2138 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2140 gen_cvtst(ctx
, rb
, rc
, fn11
);
2143 gen_cvtts(ctx
, rb
, rc
, fn11
);
2149 gen_cvttq(ctx
, rb
, rc
, fn11
);
2154 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2159 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2171 vc
= dest_fpr(ctx
, rc
);
2172 vb
= load_fpr(ctx
, rb
);
2178 /* Special case CPYS as FNOP. */
2180 vc
= dest_fpr(ctx
, rc
);
2181 va
= load_fpr(ctx
, ra
);
2183 /* Special case CPYS as FMOV. */
2184 tcg_gen_mov_i64(vc
, va
);
2186 vb
= load_fpr(ctx
, rb
);
2187 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2193 vc
= dest_fpr(ctx
, rc
);
2194 vb
= load_fpr(ctx
, rb
);
2195 va
= load_fpr(ctx
, ra
);
2196 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2200 vc
= dest_fpr(ctx
, rc
);
2201 vb
= load_fpr(ctx
, rb
);
2202 va
= load_fpr(ctx
, ra
);
2203 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2207 va
= load_fpr(ctx
, ra
);
2208 gen_helper_store_fpcr(cpu_env
, va
);
2209 if (ctx
->tb_rm
== QUAL_RM_D
) {
2210 /* Re-do the copy of the rounding mode to fp_status
2211 the next time we use dynamic rounding. */
2217 va
= dest_fpr(ctx
, ra
);
2218 gen_helper_load_fpcr(va
, cpu_env
);
2222 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2226 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2230 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2234 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2238 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2242 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2244 case 0x030: /* CVTQL */
2245 case 0x130: /* CVTQL/V */
2246 case 0x530: /* CVTQL/SV */
2248 vc
= dest_fpr(ctx
, rc
);
2249 vb
= load_fpr(ctx
, rb
);
2250 gen_helper_cvtql(vc
, cpu_env
, vb
);
2251 gen_fp_exc_raise(rc
, fn11
);
2259 switch ((uint16_t)disp16
) {
2286 va
= dest_gpr(ctx
, ra
);
2287 if (ctx
->tb
->cflags
& CF_USE_ICOUNT
) {
2289 gen_helper_load_pcc(va
, cpu_env
);
2291 ret
= EXIT_PC_STALE
;
2293 gen_helper_load_pcc(va
, cpu_env
);
2321 /* HW_MFPR (PALcode) */
2322 #ifndef CONFIG_USER_ONLY
2323 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2324 va
= dest_gpr(ctx
, ra
);
2325 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2332 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2333 prediction stack action, which of course we don't implement. */
2334 vb
= load_gpr(ctx
, rb
);
2335 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2337 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2339 ret
= EXIT_PC_UPDATED
;
2343 /* HW_LD (PALcode) */
2344 #ifndef CONFIG_USER_ONLY
2345 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2347 TCGv addr
= tcg_temp_new();
2348 vb
= load_gpr(ctx
, rb
);
2349 va
= dest_gpr(ctx
, ra
);
2351 tcg_gen_addi_i64(addr
, vb
, disp12
);
2352 switch ((insn
>> 12) & 0xF) {
2354 /* Longword physical access (hw_ldl/p) */
2355 gen_helper_ldl_phys(va
, cpu_env
, addr
);
2358 /* Quadword physical access (hw_ldq/p) */
2359 gen_helper_ldq_phys(va
, cpu_env
, addr
);
2362 /* Longword physical access with lock (hw_ldl_l/p) */
2363 gen_helper_ldl_l_phys(va
, cpu_env
, addr
);
2366 /* Quadword physical access with lock (hw_ldq_l/p) */
2367 gen_helper_ldq_l_phys(va
, cpu_env
, addr
);
2370 /* Longword virtual PTE fetch (hw_ldl/v) */
2373 /* Quadword virtual PTE fetch (hw_ldq/v) */
2377 /* Incpu_ir[ra]id */
2380 /* Incpu_ir[ra]id */
2383 /* Longword virtual access (hw_ldl) */
2386 /* Quadword virtual access (hw_ldq) */
2389 /* Longword virtual access with protection check (hw_ldl/w) */
2390 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2393 /* Quadword virtual access with protection check (hw_ldq/w) */
2394 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2397 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2400 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2403 /* Longword virtual access with alternate access mode and
2404 protection checks (hw_ldl/wa) */
2405 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2408 /* Quadword virtual access with alternate access mode and
2409 protection checks (hw_ldq/wa) */
2410 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2413 tcg_temp_free(addr
);
2421 vc
= dest_gpr(ctx
, rc
);
2424 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2426 va
= load_fpr(ctx
, ra
);
2427 tcg_gen_mov_i64(vc
, va
);
2429 } else if (fn7
== 0x78) {
2431 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2433 t32
= tcg_temp_new_i32();
2434 va
= load_fpr(ctx
, ra
);
2435 gen_helper_s_to_memory(t32
, va
);
2436 tcg_gen_ext_i32_i64(vc
, t32
);
2437 tcg_temp_free_i32(t32
);
2441 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2445 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2447 tcg_gen_ext8s_i64(vc
, vb
);
2451 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2453 tcg_gen_ext16s_i64(vc
, vb
);
2457 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2460 gen_helper_ctpop(vc
, vb
);
2464 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2466 va
= load_gpr(ctx
, ra
);
2467 gen_helper_perr(vc
, va
, vb
);
2471 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2474 gen_helper_ctlz(vc
, vb
);
2478 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2481 gen_helper_cttz(vc
, vb
);
2485 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2488 gen_helper_unpkbw(vc
, vb
);
2492 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2495 gen_helper_unpkbl(vc
, vb
);
2499 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2502 gen_helper_pkwb(vc
, vb
);
2506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2509 gen_helper_pklb(vc
, vb
);
2513 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2514 va
= load_gpr(ctx
, ra
);
2515 gen_helper_minsb8(vc
, va
, vb
);
2519 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2520 va
= load_gpr(ctx
, ra
);
2521 gen_helper_minsw4(vc
, va
, vb
);
2525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2526 va
= load_gpr(ctx
, ra
);
2527 gen_helper_minub8(vc
, va
, vb
);
2531 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2532 va
= load_gpr(ctx
, ra
);
2533 gen_helper_minuw4(vc
, va
, vb
);
2537 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2538 va
= load_gpr(ctx
, ra
);
2539 gen_helper_maxub8(vc
, va
, vb
);
2543 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2544 va
= load_gpr(ctx
, ra
);
2545 gen_helper_maxuw4(vc
, va
, vb
);
2549 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2550 va
= load_gpr(ctx
, ra
);
2551 gen_helper_maxsb8(vc
, va
, vb
);
2555 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2556 va
= load_gpr(ctx
, ra
);
2557 gen_helper_maxsw4(vc
, va
, vb
);
2565 /* HW_MTPR (PALcode) */
2566 #ifndef CONFIG_USER_ONLY
2567 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2568 vb
= load_gpr(ctx
, rb
);
2569 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2576 /* HW_RET (PALcode) */
2577 #ifndef CONFIG_USER_ONLY
2578 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2580 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2581 address from EXC_ADDR. This turns out to be useful for our
2582 emulation PALcode, so continue to accept it. */
2583 tmp
= tcg_temp_new();
2584 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2585 gen_helper_hw_ret(cpu_env
, tmp
);
2588 gen_helper_hw_ret(cpu_env
, load_gpr(ctx
, rb
));
2590 ret
= EXIT_PC_UPDATED
;
2597 /* HW_ST (PALcode) */
2598 #ifndef CONFIG_USER_ONLY
2599 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2601 TCGv addr
= tcg_temp_new();
2602 va
= load_gpr(ctx
, ra
);
2603 vb
= load_gpr(ctx
, rb
);
2605 tcg_gen_addi_i64(addr
, vb
, disp12
);
2606 switch ((insn
>> 12) & 0xF) {
2608 /* Longword physical access */
2609 gen_helper_stl_phys(cpu_env
, addr
, va
);
2612 /* Quadword physical access */
2613 gen_helper_stq_phys(cpu_env
, addr
, va
);
2616 /* Longword physical access with lock */
2617 gen_helper_stl_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2620 /* Quadword physical access with lock */
2621 gen_helper_stq_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2624 /* Longword virtual access */
2627 /* Quadword virtual access */
2648 /* Longword virtual access with alternate access mode */
2651 /* Quadword virtual access with alternate access mode */
2660 tcg_temp_free(addr
);
2668 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2672 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2676 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2680 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2684 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2688 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2692 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2696 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2700 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2704 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2708 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2712 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2716 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2720 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2724 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
2728 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
2732 ret
= gen_bdirect(ctx
, ra
, disp21
);
2734 case 0x31: /* FBEQ */
2735 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2737 case 0x32: /* FBLT */
2738 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2740 case 0x33: /* FBLE */
2741 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2745 ret
= gen_bdirect(ctx
, ra
, disp21
);
2747 case 0x35: /* FBNE */
2748 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2750 case 0x36: /* FBGE */
2751 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2753 case 0x37: /* FBGT */
2754 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2758 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2762 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2766 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2770 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2774 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2778 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2782 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2786 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2789 ret
= gen_invalid(ctx
);
2796 static inline void gen_intermediate_code_internal(AlphaCPU
*cpu
,
2797 TranslationBlock
*tb
,
2800 CPUState
*cs
= CPU(cpu
);
2801 CPUAlphaState
*env
= &cpu
->env
;
2802 DisasContext ctx
, *ctxp
= &ctx
;
2803 target_ulong pc_start
;
2804 target_ulong pc_mask
;
2816 ctx
.mem_idx
= cpu_mmu_index(env
);
2817 ctx
.implver
= env
->implver
;
2818 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2820 /* ??? Every TB begins with unset rounding mode, to be initialized on
2821 the first fp insn of the TB. Alternately we could define a proper
2822 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2823 to reset the FP_STATUS to that default at the end of any TB that
2824 changes the default. We could even (gasp) dynamiclly figure out
2825 what default would be most efficient given the running program. */
2827 /* Similarly for flush-to-zero. */
2831 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2832 if (max_insns
== 0) {
2833 max_insns
= CF_COUNT_MASK
;
2836 if (in_superpage(&ctx
, pc_start
)) {
2837 pc_mask
= (1ULL << 41) - 1;
2839 pc_mask
= ~TARGET_PAGE_MASK
;
2844 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
2845 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
2846 if (bp
->pc
== ctx
.pc
) {
2847 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2853 j
= tcg_op_buf_count();
2857 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2860 tcg_ctx
.gen_opc_pc
[lj
] = ctx
.pc
;
2861 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
2862 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
2864 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2867 insn
= cpu_ldl_code(env
, ctx
.pc
);
2870 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2871 tcg_gen_debug_insn_start(ctx
.pc
);
2874 TCGV_UNUSED_I64(ctx
.zero
);
2875 TCGV_UNUSED_I64(ctx
.sink
);
2876 TCGV_UNUSED_I64(ctx
.lit
);
2879 ret
= translate_one(ctxp
, insn
);
2881 if (!TCGV_IS_UNUSED_I64(ctx
.sink
)) {
2882 tcg_gen_discard_i64(ctx
.sink
);
2883 tcg_temp_free(ctx
.sink
);
2885 if (!TCGV_IS_UNUSED_I64(ctx
.zero
)) {
2886 tcg_temp_free(ctx
.zero
);
2888 if (!TCGV_IS_UNUSED_I64(ctx
.lit
)) {
2889 tcg_temp_free(ctx
.lit
);
2892 /* If we reach a page boundary, are single stepping,
2893 or exhaust instruction count, stop generation. */
2895 && ((ctx
.pc
& pc_mask
) == 0
2896 || tcg_op_buf_full()
2897 || num_insns
>= max_insns
2899 || ctx
.singlestep_enabled
)) {
2900 ret
= EXIT_PC_STALE
;
2902 } while (ret
== NO_EXIT
);
2904 if (tb
->cflags
& CF_LAST_IO
) {
2913 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2915 case EXIT_PC_UPDATED
:
2916 if (ctx
.singlestep_enabled
) {
2917 gen_excp_1(EXCP_DEBUG
, 0);
2926 gen_tb_end(tb
, num_insns
);
2929 j
= tcg_op_buf_count();
2932 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2935 tb
->size
= ctx
.pc
- pc_start
;
2936 tb
->icount
= num_insns
;
2940 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2941 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2942 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 1);
2948 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2950 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, false);
2953 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2955 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, true);
2958 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
2960 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];