2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
34 #undef ALPHA_DEBUG_DISAS
35 #define CONFIG_SOFTFLOAT_INLINE
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DISAS(...) do { } while (0)
43 typedef struct DisasContext DisasContext
;
45 DisasContextBase base
;
47 #ifdef CONFIG_USER_ONLY
55 /* implver and amask values for this CPU. */
59 /* Current rounding mode for this TB. */
61 /* Current flush-to-zero setting for this TB. */
64 /* The set of registers active in the current context. */
67 /* Temporaries for $31 and $f31 as source and destination. */
72 #ifdef CONFIG_USER_ONLY
73 #define UNALIGN(C) (C)->unalign
78 /* Target-specific return values from translate_one, indicating the
79 state of the TB. Note that DISAS_NEXT indicates that we are not
81 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
82 #define DISAS_PC_UPDATED DISAS_TARGET_1
83 #define DISAS_PC_STALE DISAS_TARGET_2
85 /* global register indexes */
86 static TCGv cpu_std_ir
[31];
87 static TCGv cpu_fir
[31];
89 static TCGv cpu_lock_addr
;
90 static TCGv cpu_lock_value
;
92 #ifndef CONFIG_USER_ONLY
93 static TCGv cpu_pal_ir
[31];
96 #include "exec/gen-icount.h"
98 void alpha_translate_init(void)
100 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
102 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
103 static const GlobalVar vars
[] = {
111 /* Use the symbolic register names that match the disassembler. */
112 static const char greg_names
[31][4] = {
113 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
114 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
115 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
116 "t10", "t11", "ra", "t12", "at", "gp", "sp"
118 static const char freg_names
[31][4] = {
119 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
120 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
121 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
122 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
124 #ifndef CONFIG_USER_ONLY
125 static const char shadow_names
[8][8] = {
126 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
127 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
133 for (i
= 0; i
< 31; i
++) {
134 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
135 offsetof(CPUAlphaState
, ir
[i
]),
139 for (i
= 0; i
< 31; i
++) {
140 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
141 offsetof(CPUAlphaState
, fir
[i
]),
145 #ifndef CONFIG_USER_ONLY
146 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
147 for (i
= 0; i
< 8; i
++) {
148 int r
= (i
== 7 ? 25 : i
+ 8);
149 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
150 offsetof(CPUAlphaState
,
156 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
157 const GlobalVar
*v
= &vars
[i
];
158 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
162 static TCGv
load_zero(DisasContext
*ctx
)
165 ctx
->zero
= tcg_constant_i64(0);
170 static TCGv
dest_sink(DisasContext
*ctx
)
173 ctx
->sink
= tcg_temp_new();
178 static void free_context_temps(DisasContext
*ctx
)
181 tcg_gen_discard_i64(ctx
->sink
);
182 tcg_temp_free(ctx
->sink
);
187 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
189 if (likely(reg
< 31)) {
192 return load_zero(ctx
);
196 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
197 uint8_t lit
, bool islit
)
200 return tcg_constant_i64(lit
);
201 } else if (likely(reg
< 31)) {
204 return load_zero(ctx
);
208 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
210 if (likely(reg
< 31)) {
213 return dest_sink(ctx
);
217 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
219 if (likely(reg
< 31)) {
222 return load_zero(ctx
);
226 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
228 if (likely(reg
< 31)) {
231 return dest_sink(ctx
);
235 static int get_flag_ofs(unsigned shift
)
237 int ofs
= offsetof(CPUAlphaState
, flags
);
239 ofs
+= 3 - (shift
/ 8);
246 static void ld_flag_byte(TCGv val
, unsigned shift
)
248 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
251 static void st_flag_byte(TCGv val
, unsigned shift
)
253 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
256 static void gen_excp_1(int exception
, int error_code
)
260 tmp1
= tcg_constant_i32(exception
);
261 tmp2
= tcg_constant_i32(error_code
);
262 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
265 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
267 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
268 gen_excp_1(exception
, error_code
);
269 return DISAS_NORETURN
;
272 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
274 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
277 static void gen_ldf(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
279 TCGv_i32 tmp32
= tcg_temp_new_i32();
280 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
281 gen_helper_memory_to_f(dest
, tmp32
);
282 tcg_temp_free_i32(tmp32
);
285 static void gen_ldg(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
287 TCGv tmp
= tcg_temp_new();
288 tcg_gen_qemu_ld_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
289 gen_helper_memory_to_g(dest
, tmp
);
293 static void gen_lds(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
295 TCGv_i32 tmp32
= tcg_temp_new_i32();
296 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
297 gen_helper_memory_to_s(dest
, tmp32
);
298 tcg_temp_free_i32(tmp32
);
301 static void gen_ldt(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
303 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
306 static void gen_load_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
307 void (*func
)(DisasContext
*, TCGv
, TCGv
))
309 /* Loads to $f31 are prefetches, which we can treat as nops. */
310 if (likely(ra
!= 31)) {
311 TCGv addr
= tcg_temp_new();
312 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
313 func(ctx
, cpu_fir
[ra
], addr
);
318 static void gen_load_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
319 MemOp op
, bool clear
, bool locked
)
323 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
324 prefetches, which we can treat as nops. No worries about
325 missed exceptions here. */
326 if (unlikely(ra
== 31)) {
330 addr
= tcg_temp_new();
331 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
333 tcg_gen_andi_i64(addr
, addr
, ~0x7);
334 } else if (!locked
) {
339 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, op
);
342 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
343 tcg_gen_mov_i64(cpu_lock_value
, dest
);
348 static void gen_stf(DisasContext
*ctx
, TCGv src
, TCGv addr
)
350 TCGv_i32 tmp32
= tcg_temp_new_i32();
351 gen_helper_f_to_memory(tmp32
, addr
);
352 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
353 tcg_temp_free_i32(tmp32
);
356 static void gen_stg(DisasContext
*ctx
, TCGv src
, TCGv addr
)
358 TCGv tmp
= tcg_temp_new();
359 gen_helper_g_to_memory(tmp
, src
);
360 tcg_gen_qemu_st_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
364 static void gen_sts(DisasContext
*ctx
, TCGv src
, TCGv addr
)
366 TCGv_i32 tmp32
= tcg_temp_new_i32();
367 gen_helper_s_to_memory(tmp32
, src
);
368 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
369 tcg_temp_free_i32(tmp32
);
372 static void gen_stt(DisasContext
*ctx
, TCGv src
, TCGv addr
)
374 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
377 static void gen_store_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
378 void (*func
)(DisasContext
*, TCGv
, TCGv
))
380 TCGv addr
= tcg_temp_new();
381 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
382 func(ctx
, load_fpr(ctx
, ra
), addr
);
386 static void gen_store_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
387 MemOp op
, bool clear
)
391 addr
= tcg_temp_new();
392 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
394 tcg_gen_andi_i64(addr
, addr
, ~0x7);
399 src
= load_gpr(ctx
, ra
);
400 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, op
);
405 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
406 int32_t disp16
, int mem_idx
,
409 TCGLabel
*lab_fail
, *lab_done
;
412 addr
= tcg_temp_new_i64();
413 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
414 free_context_temps(ctx
);
416 lab_fail
= gen_new_label();
417 lab_done
= gen_new_label();
418 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
419 tcg_temp_free_i64(addr
);
421 val
= tcg_temp_new_i64();
422 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
423 load_gpr(ctx
, ra
), mem_idx
, op
);
424 free_context_temps(ctx
);
427 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
429 tcg_temp_free_i64(val
);
430 tcg_gen_br(lab_done
);
432 gen_set_label(lab_fail
);
434 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
437 gen_set_label(lab_done
);
438 tcg_gen_movi_i64(cpu_lock_addr
, -1);
442 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
444 return translator_use_goto_tb(&ctx
->base
, dest
);
447 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
449 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
452 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
455 /* Notice branch-to-next; used to initialize RA with the PC. */
458 } else if (use_goto_tb(ctx
, dest
)) {
460 tcg_gen_movi_i64(cpu_pc
, dest
);
461 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
462 return DISAS_NORETURN
;
464 tcg_gen_movi_i64(cpu_pc
, dest
);
465 return DISAS_PC_UPDATED
;
469 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
470 TCGv cmp
, int32_t disp
)
472 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
473 TCGLabel
*lab_true
= gen_new_label();
475 if (use_goto_tb(ctx
, dest
)) {
476 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
479 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
480 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
482 gen_set_label(lab_true
);
484 tcg_gen_movi_i64(cpu_pc
, dest
);
485 tcg_gen_exit_tb(ctx
->base
.tb
, 1);
487 return DISAS_NORETURN
;
489 TCGv_i64 z
= load_zero(ctx
);
490 TCGv_i64 d
= tcg_constant_i64(dest
);
491 TCGv_i64 p
= tcg_constant_i64(ctx
->base
.pc_next
);
493 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
494 return DISAS_PC_UPDATED
;
498 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
499 int32_t disp
, int mask
)
502 TCGv tmp
= tcg_temp_new();
505 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
506 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
510 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
513 /* Fold -0.0 for comparison with COND. */
515 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
517 uint64_t mzero
= 1ull << 63;
522 /* For <= or >, the -0.0 value directly compares the way we want. */
523 tcg_gen_mov_i64(dest
, src
);
528 /* For == or !=, we can simply mask off the sign bit and compare. */
529 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
534 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
535 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
536 tcg_gen_neg_i64(dest
, dest
);
537 tcg_gen_and_i64(dest
, dest
, src
);
545 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
548 TCGv cmp_tmp
= tcg_temp_new();
551 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
552 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
553 tcg_temp_free(cmp_tmp
);
557 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
562 vb
= load_fpr(ctx
, rb
);
564 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
566 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
571 #define QUAL_RM_N 0x080 /* Round mode nearest even */
572 #define QUAL_RM_C 0x000 /* Round mode chopped */
573 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
574 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
575 #define QUAL_RM_MASK 0x0c0
577 #define QUAL_U 0x100 /* Underflow enable (fp output) */
578 #define QUAL_V 0x100 /* Overflow enable (int output) */
579 #define QUAL_S 0x400 /* Software completion enable */
580 #define QUAL_I 0x200 /* Inexact detection enable */
582 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
586 fn11
&= QUAL_RM_MASK
;
587 if (fn11
== ctx
->tb_rm
) {
592 tmp
= tcg_temp_new_i32();
595 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
598 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
601 tcg_gen_movi_i32(tmp
, float_round_down
);
604 tcg_gen_ld8u_i32(tmp
, cpu_env
,
605 offsetof(CPUAlphaState
, fpcr_dyn_round
));
609 #if defined(CONFIG_SOFTFLOAT_INLINE)
610 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
611 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
612 sets the one field. */
613 tcg_gen_st8_i32(tmp
, cpu_env
,
614 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
616 gen_helper_setroundmode(tmp
);
619 tcg_temp_free_i32(tmp
);
622 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
627 if (fn11
== ctx
->tb_ftz
) {
632 tmp
= tcg_temp_new_i32();
634 /* Underflow is enabled, use the FPCR setting. */
635 tcg_gen_ld8u_i32(tmp
, cpu_env
,
636 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
638 /* Underflow is disabled, force flush-to-zero. */
639 tcg_gen_movi_i32(tmp
, 1);
642 #if defined(CONFIG_SOFTFLOAT_INLINE)
643 tcg_gen_st8_i32(tmp
, cpu_env
,
644 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
646 gen_helper_setflushzero(tmp
);
649 tcg_temp_free_i32(tmp
);
652 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
656 if (unlikely(reg
== 31)) {
657 val
= load_zero(ctx
);
660 if ((fn11
& QUAL_S
) == 0) {
662 gen_helper_ieee_input_cmp(cpu_env
, val
);
664 gen_helper_ieee_input(cpu_env
, val
);
667 #ifndef CONFIG_USER_ONLY
668 /* In system mode, raise exceptions for denormals like real
669 hardware. In user mode, proceed as if the OS completion
670 handler is handling the denormal as per spec. */
671 gen_helper_ieee_input_s(cpu_env
, val
);
678 static void gen_fp_exc_raise(int rc
, int fn11
)
680 /* ??? We ought to be able to do something with imprecise exceptions.
681 E.g. notice we're still in the trap shadow of something within the
682 TB and do not generate the code to signal the exception; end the TB
683 when an exception is forced to arrive, either by consumption of a
684 register value or TRAPB or EXCB. */
688 if (!(fn11
& QUAL_U
)) {
689 /* Note that QUAL_U == QUAL_V, so ignore either. */
690 ignore
|= FPCR_UNF
| FPCR_IOV
;
692 if (!(fn11
& QUAL_I
)) {
695 ign
= tcg_constant_i32(ignore
);
697 /* ??? Pass in the regno of the destination so that the helper can
698 set EXC_MASK, which contains a bitmask of destination registers
699 that have caused arithmetic traps. A simple userspace emulation
700 does not require this. We do need it for a guest kernel's entArith,
701 or if we were to do something clever with imprecise exceptions. */
702 reg
= tcg_constant_i32(rc
+ 32);
704 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
706 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
710 static void gen_cvtlq(TCGv vc
, TCGv vb
)
712 TCGv tmp
= tcg_temp_new();
714 /* The arithmetic right shift here, plus the sign-extended mask below
715 yields a sign-extended result without an explicit ext32s_i64. */
716 tcg_gen_shri_i64(tmp
, vb
, 29);
717 tcg_gen_sari_i64(vc
, vb
, 32);
718 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
723 static void gen_ieee_arith2(DisasContext
*ctx
,
724 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
725 int rb
, int rc
, int fn11
)
729 gen_qual_roundmode(ctx
, fn11
);
730 gen_qual_flushzero(ctx
, fn11
);
732 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
733 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
735 gen_fp_exc_raise(rc
, fn11
);
738 #define IEEE_ARITH2(name) \
739 static inline void glue(gen_, name)(DisasContext *ctx, \
740 int rb, int rc, int fn11) \
742 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
749 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
753 /* No need to set flushzero, since we have an integer output. */
754 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
755 vc
= dest_fpr(ctx
, rc
);
757 /* Almost all integer conversions use cropped rounding;
758 special case that. */
759 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
760 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
762 gen_qual_roundmode(ctx
, fn11
);
763 gen_helper_cvttq(vc
, cpu_env
, vb
);
765 gen_fp_exc_raise(rc
, fn11
);
768 static void gen_ieee_intcvt(DisasContext
*ctx
,
769 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
770 int rb
, int rc
, int fn11
)
774 gen_qual_roundmode(ctx
, fn11
);
775 vb
= load_fpr(ctx
, rb
);
776 vc
= dest_fpr(ctx
, rc
);
778 /* The only exception that can be raised by integer conversion
779 is inexact. Thus we only need to worry about exceptions when
780 inexact handling is requested. */
782 helper(vc
, cpu_env
, vb
);
783 gen_fp_exc_raise(rc
, fn11
);
785 helper(vc
, cpu_env
, vb
);
789 #define IEEE_INTCVT(name) \
790 static inline void glue(gen_, name)(DisasContext *ctx, \
791 int rb, int rc, int fn11) \
793 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
798 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
800 TCGv vmask
= tcg_constant_i64(mask
);
801 TCGv tmp
= tcg_temp_new_i64();
804 tcg_gen_andc_i64(tmp
, vmask
, va
);
806 tcg_gen_and_i64(tmp
, va
, vmask
);
809 tcg_gen_andc_i64(vc
, vb
, vmask
);
810 tcg_gen_or_i64(vc
, vc
, tmp
);
815 static void gen_ieee_arith3(DisasContext
*ctx
,
816 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
817 int ra
, int rb
, int rc
, int fn11
)
821 gen_qual_roundmode(ctx
, fn11
);
822 gen_qual_flushzero(ctx
, fn11
);
824 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
825 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
826 vc
= dest_fpr(ctx
, rc
);
827 helper(vc
, cpu_env
, va
, vb
);
829 gen_fp_exc_raise(rc
, fn11
);
832 #define IEEE_ARITH3(name) \
833 static inline void glue(gen_, name)(DisasContext *ctx, \
834 int ra, int rb, int rc, int fn11) \
836 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
847 static void gen_ieee_compare(DisasContext
*ctx
,
848 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
849 int ra
, int rb
, int rc
, int fn11
)
853 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
854 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
855 vc
= dest_fpr(ctx
, rc
);
856 helper(vc
, cpu_env
, va
, vb
);
858 gen_fp_exc_raise(rc
, fn11
);
861 #define IEEE_CMP3(name) \
862 static inline void glue(gen_, name)(DisasContext *ctx, \
863 int ra, int rb, int rc, int fn11) \
865 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
872 static inline uint64_t zapnot_mask(uint8_t lit
)
877 for (i
= 0; i
< 8; ++i
) {
878 if ((lit
>> i
) & 1) {
879 mask
|= 0xffull
<< (i
* 8);
885 /* Implement zapnot with an immediate operand, which expands to some
886 form of immediate AND. This is a basic building block in the
887 definition of many of the other byte manipulation instructions. */
888 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
892 tcg_gen_movi_i64(dest
, 0);
895 tcg_gen_ext8u_i64(dest
, src
);
898 tcg_gen_ext16u_i64(dest
, src
);
901 tcg_gen_ext32u_i64(dest
, src
);
904 tcg_gen_mov_i64(dest
, src
);
907 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
912 /* EXTWH, EXTLH, EXTQH */
913 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
914 uint8_t lit
, uint8_t byte_mask
)
917 int pos
= (64 - lit
* 8) & 0x3f;
918 int len
= cto32(byte_mask
) * 8;
920 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
922 tcg_gen_movi_i64(vc
, 0);
925 TCGv tmp
= tcg_temp_new();
926 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
927 tcg_gen_neg_i64(tmp
, tmp
);
928 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
929 tcg_gen_shl_i64(vc
, va
, tmp
);
932 gen_zapnoti(vc
, vc
, byte_mask
);
935 /* EXTBL, EXTWL, EXTLL, EXTQL */
936 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
937 uint8_t lit
, uint8_t byte_mask
)
940 int pos
= (lit
& 7) * 8;
941 int len
= cto32(byte_mask
) * 8;
942 if (pos
+ len
>= 64) {
945 tcg_gen_extract_i64(vc
, va
, pos
, len
);
947 TCGv tmp
= tcg_temp_new();
948 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
949 tcg_gen_shli_i64(tmp
, tmp
, 3);
950 tcg_gen_shr_i64(vc
, va
, tmp
);
952 gen_zapnoti(vc
, vc
, byte_mask
);
956 /* INSWH, INSLH, INSQH */
957 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
958 uint8_t lit
, uint8_t byte_mask
)
961 int pos
= 64 - (lit
& 7) * 8;
962 int len
= cto32(byte_mask
) * 8;
964 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
966 tcg_gen_movi_i64(vc
, 0);
969 TCGv tmp
= tcg_temp_new();
970 TCGv shift
= tcg_temp_new();
972 /* The instruction description has us left-shift the byte mask
973 and extract bits <15:8> and apply that zap at the end. This
974 is equivalent to simply performing the zap first and shifting
976 gen_zapnoti(tmp
, va
, byte_mask
);
978 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
979 portably by splitting the shift into two parts: shift_count-1 and 1.
980 Arrange for the -1 by using ones-complement instead of
981 twos-complement in the negation: ~(B * 8) & 63. */
983 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
984 tcg_gen_not_i64(shift
, shift
);
985 tcg_gen_andi_i64(shift
, shift
, 0x3f);
987 tcg_gen_shr_i64(vc
, tmp
, shift
);
988 tcg_gen_shri_i64(vc
, vc
, 1);
989 tcg_temp_free(shift
);
994 /* INSBL, INSWL, INSLL, INSQL */
995 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
996 uint8_t lit
, uint8_t byte_mask
)
999 int pos
= (lit
& 7) * 8;
1000 int len
= cto32(byte_mask
) * 8;
1001 if (pos
+ len
> 64) {
1004 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
1006 TCGv tmp
= tcg_temp_new();
1007 TCGv shift
= tcg_temp_new();
1009 /* The instruction description has us left-shift the byte mask
1010 and extract bits <15:8> and apply that zap at the end. This
1011 is equivalent to simply performing the zap first and shifting
1013 gen_zapnoti(tmp
, va
, byte_mask
);
1015 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1016 tcg_gen_shli_i64(shift
, shift
, 3);
1017 tcg_gen_shl_i64(vc
, tmp
, shift
);
1018 tcg_temp_free(shift
);
1023 /* MSKWH, MSKLH, MSKQH */
1024 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1025 uint8_t lit
, uint8_t byte_mask
)
1028 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1030 TCGv shift
= tcg_temp_new();
1031 TCGv mask
= tcg_temp_new();
1033 /* The instruction description is as above, where the byte_mask
1034 is shifted left, and then we extract bits <15:8>. This can be
1035 emulated with a right-shift on the expanded byte mask. This
1036 requires extra care because for an input <2:0> == 0 we need a
1037 shift of 64 bits in order to generate a zero. This is done by
1038 splitting the shift into two parts, the variable shift - 1
1039 followed by a constant 1 shift. The code we expand below is
1040 equivalent to ~(B * 8) & 63. */
1042 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1043 tcg_gen_not_i64(shift
, shift
);
1044 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1045 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1046 tcg_gen_shr_i64(mask
, mask
, shift
);
1047 tcg_gen_shri_i64(mask
, mask
, 1);
1049 tcg_gen_andc_i64(vc
, va
, mask
);
1051 tcg_temp_free(mask
);
1052 tcg_temp_free(shift
);
1056 /* MSKBL, MSKWL, MSKLL, MSKQL */
1057 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1058 uint8_t lit
, uint8_t byte_mask
)
1061 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1063 TCGv shift
= tcg_temp_new();
1064 TCGv mask
= tcg_temp_new();
1066 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1067 tcg_gen_shli_i64(shift
, shift
, 3);
1068 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1069 tcg_gen_shl_i64(mask
, mask
, shift
);
1071 tcg_gen_andc_i64(vc
, va
, mask
);
1073 tcg_temp_free(mask
);
1074 tcg_temp_free(shift
);
1078 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1081 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1084 st_flag_byte(tcg_constant_i64(set
), ENV_FLAG_RX_SHIFT
);
1087 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1089 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1090 to internal cpu registers. */
1092 /* Unprivileged PAL call */
1093 if (palcode
>= 0x80 && palcode
< 0xC0) {
1097 /* No-op inside QEMU. */
1101 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1102 offsetof(CPUAlphaState
, unique
));
1106 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1107 offsetof(CPUAlphaState
, unique
));
1116 #ifndef CONFIG_USER_ONLY
1117 /* Privileged PAL code */
1118 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1122 /* No-op inside QEMU. */
1126 /* No-op inside QEMU. */
1130 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1131 offsetof(CPUAlphaState
, vptptr
));
1135 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1136 offsetof(CPUAlphaState
, sysval
));
1140 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1141 offsetof(CPUAlphaState
, sysval
));
1146 /* Note that we already know we're in kernel mode, so we know
1147 that PS only contains the 3 IPL bits. */
1148 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1150 /* But make sure and store only the 3 IPL bits from the user. */
1152 TCGv tmp
= tcg_temp_new();
1153 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1154 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1158 /* Allow interrupts to be recognized right away. */
1159 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1160 return DISAS_PC_UPDATED_NOCHAIN
;
1164 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1169 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1170 offsetof(CPUAlphaState
, usp
));
1174 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1175 offsetof(CPUAlphaState
, usp
));
1179 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1180 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1185 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
1186 -offsetof(AlphaCPU
, env
) +
1187 offsetof(CPUState
, halted
));
1188 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1189 return gen_excp(ctx
, EXCP_HALTED
, 0);
1198 return gen_invalid(ctx
);
1201 #ifdef CONFIG_USER_ONLY
1202 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1205 TCGv tmp
= tcg_temp_new();
1206 uint64_t exc_addr
= ctx
->base
.pc_next
;
1207 uint64_t entry
= ctx
->palbr
;
1209 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1212 tcg_gen_movi_i64(tmp
, 1);
1213 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1216 tcg_gen_movi_i64(tmp
, exc_addr
);
1217 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1220 entry
+= (palcode
& 0x80
1221 ? 0x2000 + (palcode
- 0x80) * 64
1222 : 0x1000 + palcode
* 64);
1224 tcg_gen_movi_i64(cpu_pc
, entry
);
1225 return DISAS_PC_UPDATED
;
1230 #ifndef CONFIG_USER_ONLY
1232 #define PR_LONG 0x200000
1234 static int cpu_pr_data(int pr
)
1237 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1238 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1239 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1240 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1241 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1242 case 7: return offsetof(CPUAlphaState
, palbr
);
1243 case 8: return offsetof(CPUAlphaState
, ptbr
);
1244 case 9: return offsetof(CPUAlphaState
, vptptr
);
1245 case 10: return offsetof(CPUAlphaState
, unique
);
1246 case 11: return offsetof(CPUAlphaState
, sysval
);
1247 case 12: return offsetof(CPUAlphaState
, usp
);
1250 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1253 return offsetof(CPUAlphaState
, alarm_expire
);
1258 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1260 void (*helper
)(TCGv
);
1265 /* Accessing the "non-shadow" general registers. */
1266 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1267 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1270 case 250: /* WALLTIME */
1271 helper
= gen_helper_get_walltime
;
1273 case 249: /* VMTIME */
1274 helper
= gen_helper_get_vmtime
;
1276 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1279 return DISAS_PC_STALE
;
1286 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1289 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1293 /* The basic registers are data only, and unknown registers
1294 are read-zero, write-ignore. */
1295 data
= cpu_pr_data(regno
);
1297 tcg_gen_movi_i64(va
, 0);
1298 } else if (data
& PR_LONG
) {
1299 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1301 tcg_gen_ld_i64(va
, cpu_env
, data
);
1309 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1312 DisasJumpType ret
= DISAS_NEXT
;
1317 gen_helper_tbia(cpu_env
);
1322 gen_helper_tbis(cpu_env
, vb
);
1327 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
1328 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, halted
));
1329 return gen_excp(ctx
, EXCP_HALTED
, 0);
1333 gen_helper_halt(vb
);
1334 return DISAS_PC_STALE
;
1338 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1340 ret
= DISAS_PC_STALE
;
1342 gen_helper_set_alarm(cpu_env
, vb
);
1347 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1348 /* Changing the PAL base register implies un-chaining all of the TBs
1349 that ended with a CALL_PAL. Since the base register usually only
1350 changes during boot, flushing everything works well. */
1351 gen_helper_tb_flush(cpu_env
);
1352 return DISAS_PC_STALE
;
1355 /* Accessing the "non-shadow" general registers. */
1356 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1357 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1361 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1364 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1368 /* The basic registers are data only, and unknown registers
1369 are read-zero, write-ignore. */
1370 data
= cpu_pr_data(regno
);
1372 if (data
& PR_LONG
) {
1373 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1375 tcg_gen_st_i64(vb
, cpu_env
, data
);
1383 #endif /* !USER_ONLY*/
1385 #define REQUIRE_NO_LIT \
1392 #define REQUIRE_AMASK(FLAG) \
1394 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1399 #define REQUIRE_TB_FLAG(FLAG) \
1401 if ((ctx->tbflags & (FLAG)) == 0) { \
1406 #define REQUIRE_REG_31(WHICH) \
1408 if (WHICH != 31) { \
1413 #define REQUIRE_FEN \
1415 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1420 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1422 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1424 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1425 bool islit
, real_islit
;
1426 TCGv va
, vb
, vc
, tmp
, tmp2
;
1430 /* Decode all instruction fields */
1431 opc
= extract32(insn
, 26, 6);
1432 ra
= extract32(insn
, 21, 5);
1433 rb
= extract32(insn
, 16, 5);
1434 rc
= extract32(insn
, 0, 5);
1435 real_islit
= islit
= extract32(insn
, 12, 1);
1436 lit
= extract32(insn
, 13, 8);
1438 disp21
= sextract32(insn
, 0, 21);
1439 disp16
= sextract32(insn
, 0, 16);
1440 disp12
= sextract32(insn
, 0, 12);
1442 fn11
= extract32(insn
, 5, 11);
1443 fpfn
= extract32(insn
, 5, 6);
1444 fn7
= extract32(insn
, 5, 7);
1446 if (rb
== 31 && !islit
) {
1455 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1481 disp16
= (uint32_t)disp16
<< 16;
1485 va
= dest_gpr(ctx
, ra
);
1486 /* It's worth special-casing immediate loads. */
1488 tcg_gen_movi_i64(va
, disp16
);
1490 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1497 gen_load_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0, 0);
1501 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 1, 0);
1506 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0, 0);
1511 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0);
1516 gen_store_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0);
1520 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 1);
1524 vc
= dest_gpr(ctx
, rc
);
1525 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1529 /* Special case ADDL as SEXTL. */
1530 tcg_gen_ext32s_i64(vc
, vb
);
1534 /* Special case SUBQ as NEGQ. */
1535 tcg_gen_neg_i64(vc
, vb
);
1540 va
= load_gpr(ctx
, ra
);
1544 tcg_gen_add_i64(vc
, va
, vb
);
1545 tcg_gen_ext32s_i64(vc
, vc
);
1549 tmp
= tcg_temp_new();
1550 tcg_gen_shli_i64(tmp
, va
, 2);
1551 tcg_gen_add_i64(tmp
, tmp
, vb
);
1552 tcg_gen_ext32s_i64(vc
, tmp
);
1557 tcg_gen_sub_i64(vc
, va
, vb
);
1558 tcg_gen_ext32s_i64(vc
, vc
);
1562 tmp
= tcg_temp_new();
1563 tcg_gen_shli_i64(tmp
, va
, 2);
1564 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1565 tcg_gen_ext32s_i64(vc
, tmp
);
1571 /* Special case 0 >= X as X == 0. */
1572 gen_helper_cmpbe0(vc
, vb
);
1574 gen_helper_cmpbge(vc
, va
, vb
);
1579 tmp
= tcg_temp_new();
1580 tcg_gen_shli_i64(tmp
, va
, 3);
1581 tcg_gen_add_i64(tmp
, tmp
, vb
);
1582 tcg_gen_ext32s_i64(vc
, tmp
);
1587 tmp
= tcg_temp_new();
1588 tcg_gen_shli_i64(tmp
, va
, 3);
1589 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1590 tcg_gen_ext32s_i64(vc
, tmp
);
1595 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1599 tcg_gen_add_i64(vc
, va
, vb
);
1603 tmp
= tcg_temp_new();
1604 tcg_gen_shli_i64(tmp
, va
, 2);
1605 tcg_gen_add_i64(vc
, tmp
, vb
);
1610 tcg_gen_sub_i64(vc
, va
, vb
);
1614 tmp
= tcg_temp_new();
1615 tcg_gen_shli_i64(tmp
, va
, 2);
1616 tcg_gen_sub_i64(vc
, tmp
, vb
);
1621 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1625 tmp
= tcg_temp_new();
1626 tcg_gen_shli_i64(tmp
, va
, 3);
1627 tcg_gen_add_i64(vc
, tmp
, vb
);
1632 tmp
= tcg_temp_new();
1633 tcg_gen_shli_i64(tmp
, va
, 3);
1634 tcg_gen_sub_i64(vc
, tmp
, vb
);
1639 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1643 tmp
= tcg_temp_new();
1644 tcg_gen_ext32s_i64(tmp
, va
);
1645 tcg_gen_ext32s_i64(vc
, vb
);
1646 tcg_gen_add_i64(tmp
, tmp
, vc
);
1647 tcg_gen_ext32s_i64(vc
, tmp
);
1648 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1653 tmp
= tcg_temp_new();
1654 tcg_gen_ext32s_i64(tmp
, va
);
1655 tcg_gen_ext32s_i64(vc
, vb
);
1656 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1657 tcg_gen_ext32s_i64(vc
, tmp
);
1658 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1663 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1667 tmp
= tcg_temp_new();
1668 tmp2
= tcg_temp_new();
1669 tcg_gen_eqv_i64(tmp
, va
, vb
);
1670 tcg_gen_mov_i64(tmp2
, va
);
1671 tcg_gen_add_i64(vc
, va
, vb
);
1672 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1673 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1674 tcg_gen_shri_i64(tmp
, tmp
, 63);
1675 tcg_gen_movi_i64(tmp2
, 0);
1676 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1678 tcg_temp_free(tmp2
);
1682 tmp
= tcg_temp_new();
1683 tmp2
= tcg_temp_new();
1684 tcg_gen_xor_i64(tmp
, va
, vb
);
1685 tcg_gen_mov_i64(tmp2
, va
);
1686 tcg_gen_sub_i64(vc
, va
, vb
);
1687 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1688 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1689 tcg_gen_shri_i64(tmp
, tmp
, 63);
1690 tcg_gen_movi_i64(tmp2
, 0);
1691 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1693 tcg_temp_free(tmp2
);
1697 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1707 /* Special case BIS as NOP. */
1711 /* Special case BIS as MOV. */
1712 vc
= dest_gpr(ctx
, rc
);
1714 tcg_gen_movi_i64(vc
, lit
);
1716 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1722 vc
= dest_gpr(ctx
, rc
);
1723 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1725 if (fn7
== 0x28 && ra
== 31) {
1726 /* Special case ORNOT as NOT. */
1727 tcg_gen_not_i64(vc
, vb
);
1731 va
= load_gpr(ctx
, ra
);
1735 tcg_gen_and_i64(vc
, va
, vb
);
1739 tcg_gen_andc_i64(vc
, va
, vb
);
1743 tmp
= tcg_temp_new();
1744 tcg_gen_andi_i64(tmp
, va
, 1);
1745 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1746 vb
, load_gpr(ctx
, rc
));
1751 tmp
= tcg_temp_new();
1752 tcg_gen_andi_i64(tmp
, va
, 1);
1753 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1754 vb
, load_gpr(ctx
, rc
));
1759 tcg_gen_or_i64(vc
, va
, vb
);
1763 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1764 vb
, load_gpr(ctx
, rc
));
1768 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1769 vb
, load_gpr(ctx
, rc
));
1773 tcg_gen_orc_i64(vc
, va
, vb
);
1777 tcg_gen_xor_i64(vc
, va
, vb
);
1781 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1782 vb
, load_gpr(ctx
, rc
));
1786 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1787 vb
, load_gpr(ctx
, rc
));
1791 tcg_gen_eqv_i64(vc
, va
, vb
);
1796 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1800 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1801 vb
, load_gpr(ctx
, rc
));
1805 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1806 vb
, load_gpr(ctx
, rc
));
1811 tcg_gen_movi_i64(vc
, ctx
->implver
);
1819 vc
= dest_gpr(ctx
, rc
);
1820 va
= load_gpr(ctx
, ra
);
1824 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1828 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1832 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1836 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1840 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1844 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1848 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1852 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1856 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1861 gen_zapnoti(vc
, va
, ~lit
);
1863 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1869 gen_zapnoti(vc
, va
, lit
);
1871 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1876 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1881 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1883 tmp
= tcg_temp_new();
1884 vb
= load_gpr(ctx
, rb
);
1885 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1886 tcg_gen_shr_i64(vc
, va
, tmp
);
1892 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1897 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1899 tmp
= tcg_temp_new();
1900 vb
= load_gpr(ctx
, rb
);
1901 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1902 tcg_gen_shl_i64(vc
, va
, tmp
);
1908 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1913 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1915 tmp
= tcg_temp_new();
1916 vb
= load_gpr(ctx
, rb
);
1917 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1918 tcg_gen_sar_i64(vc
, va
, tmp
);
1924 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1928 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1932 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1936 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1940 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1944 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1948 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1952 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1956 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1964 vc
= dest_gpr(ctx
, rc
);
1965 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1966 va
= load_gpr(ctx
, ra
);
1970 tcg_gen_mul_i64(vc
, va
, vb
);
1971 tcg_gen_ext32s_i64(vc
, vc
);
1975 tcg_gen_mul_i64(vc
, va
, vb
);
1979 tmp
= tcg_temp_new();
1980 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1985 tmp
= tcg_temp_new();
1986 tcg_gen_ext32s_i64(tmp
, va
);
1987 tcg_gen_ext32s_i64(vc
, vb
);
1988 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1989 tcg_gen_ext32s_i64(vc
, tmp
);
1990 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1995 tmp
= tcg_temp_new();
1996 tmp2
= tcg_temp_new();
1997 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1998 tcg_gen_sari_i64(tmp2
, vc
, 63);
1999 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
2001 tcg_temp_free(tmp2
);
2010 vc
= dest_fpr(ctx
, rc
);
2011 switch (fpfn
) { /* fn11 & 0x3F */
2016 t32
= tcg_temp_new_i32();
2017 va
= load_gpr(ctx
, ra
);
2018 tcg_gen_extrl_i64_i32(t32
, va
);
2019 gen_helper_memory_to_s(vc
, t32
);
2020 tcg_temp_free_i32(t32
);
2026 vb
= load_fpr(ctx
, rb
);
2027 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2033 gen_sqrts(ctx
, rb
, rc
, fn11
);
2039 t32
= tcg_temp_new_i32();
2040 va
= load_gpr(ctx
, ra
);
2041 tcg_gen_extrl_i64_i32(t32
, va
);
2042 gen_helper_memory_to_f(vc
, t32
);
2043 tcg_temp_free_i32(t32
);
2049 va
= load_gpr(ctx
, ra
);
2050 tcg_gen_mov_i64(vc
, va
);
2056 vb
= load_fpr(ctx
, rb
);
2057 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2063 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2071 /* VAX floating point */
2072 /* XXX: rounding mode and trap are ignored (!) */
2073 vc
= dest_fpr(ctx
, rc
);
2074 vb
= load_fpr(ctx
, rb
);
2075 va
= load_fpr(ctx
, ra
);
2076 switch (fpfn
) { /* fn11 & 0x3F */
2080 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2085 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2090 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2095 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2104 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2109 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2114 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2119 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2124 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2129 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2134 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2140 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2150 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2156 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2162 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2170 /* IEEE floating-point */
2171 switch (fpfn
) { /* fn11 & 0x3F */
2175 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2180 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2185 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2190 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2195 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2200 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2205 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2210 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2215 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2220 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2225 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2230 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2235 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2237 gen_cvtst(ctx
, rb
, rc
, fn11
);
2240 gen_cvtts(ctx
, rb
, rc
, fn11
);
2247 gen_cvttq(ctx
, rb
, rc
, fn11
);
2253 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2259 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2272 vc
= dest_fpr(ctx
, rc
);
2273 vb
= load_fpr(ctx
, rb
);
2280 /* Special case CPYS as FNOP. */
2282 vc
= dest_fpr(ctx
, rc
);
2283 va
= load_fpr(ctx
, ra
);
2285 /* Special case CPYS as FMOV. */
2286 tcg_gen_mov_i64(vc
, va
);
2288 vb
= load_fpr(ctx
, rb
);
2289 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2296 vc
= dest_fpr(ctx
, rc
);
2297 vb
= load_fpr(ctx
, rb
);
2298 va
= load_fpr(ctx
, ra
);
2299 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2304 vc
= dest_fpr(ctx
, rc
);
2305 vb
= load_fpr(ctx
, rb
);
2306 va
= load_fpr(ctx
, ra
);
2307 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2312 va
= load_fpr(ctx
, ra
);
2313 gen_helper_store_fpcr(cpu_env
, va
);
2314 if (ctx
->tb_rm
== QUAL_RM_D
) {
2315 /* Re-do the copy of the rounding mode to fp_status
2316 the next time we use dynamic rounding. */
2323 va
= dest_fpr(ctx
, ra
);
2324 gen_helper_load_fpcr(va
, cpu_env
);
2329 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2334 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2339 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2344 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2349 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2354 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2356 case 0x030: /* CVTQL */
2357 case 0x130: /* CVTQL/V */
2358 case 0x530: /* CVTQL/SV */
2361 vc
= dest_fpr(ctx
, rc
);
2362 vb
= load_fpr(ctx
, rb
);
2363 gen_helper_cvtql(vc
, cpu_env
, vb
);
2364 gen_fp_exc_raise(rc
, fn11
);
2372 switch ((uint16_t)disp16
) {
2383 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2387 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2399 va
= dest_gpr(ctx
, ra
);
2400 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2402 gen_helper_load_pcc(va
, cpu_env
);
2403 ret
= DISAS_PC_STALE
;
2405 gen_helper_load_pcc(va
, cpu_env
);
2433 /* HW_MFPR (PALcode) */
2434 #ifndef CONFIG_USER_ONLY
2435 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2436 va
= dest_gpr(ctx
, ra
);
2437 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2444 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2445 prediction stack action, which of course we don't implement. */
2446 vb
= load_gpr(ctx
, rb
);
2447 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2449 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2451 ret
= DISAS_PC_UPDATED
;
2455 /* HW_LD (PALcode) */
2456 #ifndef CONFIG_USER_ONLY
2457 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2459 TCGv addr
= tcg_temp_new();
2460 vb
= load_gpr(ctx
, rb
);
2461 va
= dest_gpr(ctx
, ra
);
2463 tcg_gen_addi_i64(addr
, vb
, disp12
);
2464 switch ((insn
>> 12) & 0xF) {
2466 /* Longword physical access (hw_ldl/p) */
2467 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2470 /* Quadword physical access (hw_ldq/p) */
2471 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEUQ
);
2474 /* Longword physical access with lock (hw_ldl_l/p) */
2475 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2476 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2477 tcg_gen_mov_i64(cpu_lock_value
, va
);
2480 /* Quadword physical access with lock (hw_ldq_l/p) */
2481 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEUQ
);
2482 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2483 tcg_gen_mov_i64(cpu_lock_value
, va
);
2486 /* Longword virtual PTE fetch (hw_ldl/v) */
2489 /* Quadword virtual PTE fetch (hw_ldq/v) */
2499 /* Longword virtual access (hw_ldl) */
2502 /* Quadword virtual access (hw_ldq) */
2505 /* Longword virtual access with protection check (hw_ldl/w) */
2506 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2509 /* Quadword virtual access with protection check (hw_ldq/w) */
2510 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEUQ
);
2513 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2516 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2519 /* Longword virtual access with alternate access mode and
2520 protection checks (hw_ldl/wa) */
2521 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2524 /* Quadword virtual access with alternate access mode and
2525 protection checks (hw_ldq/wa) */
2526 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEUQ
);
2529 tcg_temp_free(addr
);
2537 vc
= dest_gpr(ctx
, rc
);
2542 va
= load_fpr(ctx
, ra
);
2543 tcg_gen_mov_i64(vc
, va
);
2545 } else if (fn7
== 0x78) {
2549 t32
= tcg_temp_new_i32();
2550 va
= load_fpr(ctx
, ra
);
2551 gen_helper_s_to_memory(t32
, va
);
2552 tcg_gen_ext_i32_i64(vc
, t32
);
2553 tcg_temp_free_i32(t32
);
2557 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2563 tcg_gen_ext8s_i64(vc
, vb
);
2569 tcg_gen_ext16s_i64(vc
, vb
);
2576 tcg_gen_ctpop_i64(vc
, vb
);
2582 va
= load_gpr(ctx
, ra
);
2583 gen_helper_perr(vc
, va
, vb
);
2590 tcg_gen_clzi_i64(vc
, vb
, 64);
2597 tcg_gen_ctzi_i64(vc
, vb
, 64);
2604 gen_helper_unpkbw(vc
, vb
);
2611 gen_helper_unpkbl(vc
, vb
);
2618 gen_helper_pkwb(vc
, vb
);
2625 gen_helper_pklb(vc
, vb
);
2630 va
= load_gpr(ctx
, ra
);
2631 gen_helper_minsb8(vc
, va
, vb
);
2636 va
= load_gpr(ctx
, ra
);
2637 gen_helper_minsw4(vc
, va
, vb
);
2642 va
= load_gpr(ctx
, ra
);
2643 gen_helper_minub8(vc
, va
, vb
);
2648 va
= load_gpr(ctx
, ra
);
2649 gen_helper_minuw4(vc
, va
, vb
);
2654 va
= load_gpr(ctx
, ra
);
2655 gen_helper_maxub8(vc
, va
, vb
);
2660 va
= load_gpr(ctx
, ra
);
2661 gen_helper_maxuw4(vc
, va
, vb
);
2666 va
= load_gpr(ctx
, ra
);
2667 gen_helper_maxsb8(vc
, va
, vb
);
2672 va
= load_gpr(ctx
, ra
);
2673 gen_helper_maxsw4(vc
, va
, vb
);
2681 /* HW_MTPR (PALcode) */
2682 #ifndef CONFIG_USER_ONLY
2683 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2684 vb
= load_gpr(ctx
, rb
);
2685 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2692 /* HW_RET (PALcode) */
2693 #ifndef CONFIG_USER_ONLY
2694 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2696 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2697 address from EXC_ADDR. This turns out to be useful for our
2698 emulation PALcode, so continue to accept it. */
2699 vb
= dest_sink(ctx
);
2700 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2702 vb
= load_gpr(ctx
, rb
);
2704 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2705 st_flag_byte(load_zero(ctx
), ENV_FLAG_RX_SHIFT
);
2706 tmp
= tcg_temp_new();
2707 tcg_gen_andi_i64(tmp
, vb
, 1);
2708 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2710 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2711 /* Allow interrupts to be recognized right away. */
2712 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2719 /* HW_ST (PALcode) */
2720 #ifndef CONFIG_USER_ONLY
2721 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2723 switch ((insn
>> 12) & 0xF) {
2725 /* Longword physical access */
2726 va
= load_gpr(ctx
, ra
);
2727 vb
= load_gpr(ctx
, rb
);
2728 tmp
= tcg_temp_new();
2729 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2730 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2734 /* Quadword physical access */
2735 va
= load_gpr(ctx
, ra
);
2736 vb
= load_gpr(ctx
, rb
);
2737 tmp
= tcg_temp_new();
2738 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2739 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEUQ
);
2743 /* Longword physical access with lock */
2744 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2745 MMU_PHYS_IDX
, MO_LESL
);
2748 /* Quadword physical access with lock */
2749 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2750 MMU_PHYS_IDX
, MO_LEUQ
);
2753 /* Longword virtual access */
2756 /* Quadword virtual access */
2777 /* Longword virtual access with alternate access mode */
2780 /* Quadword virtual access with alternate access mode */
2797 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldf
);
2802 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldg
);
2807 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_lds
);
2812 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldt
);
2817 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stf
);
2822 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stg
);
2827 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_sts
);
2832 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stt
);
2836 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
, 0, 0);
2840 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0, 0);
2844 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
, 0, 1);
2848 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0, 1);
2852 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUL
, 0);
2856 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0);
2860 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2861 ctx
->mem_idx
, MO_LESL
);
2865 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2866 ctx
->mem_idx
, MO_LEUQ
);
2870 ret
= gen_bdirect(ctx
, ra
, disp21
);
2872 case 0x31: /* FBEQ */
2874 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2876 case 0x32: /* FBLT */
2878 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2880 case 0x33: /* FBLE */
2882 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2886 ret
= gen_bdirect(ctx
, ra
, disp21
);
2888 case 0x35: /* FBNE */
2890 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2892 case 0x36: /* FBGE */
2894 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2896 case 0x37: /* FBGT */
2898 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2902 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2906 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2910 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2914 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2918 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2922 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2926 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2930 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2933 ret
= gen_invalid(ctx
);
2936 ret
= gen_excp(ctx
, EXCP_FEN
, 0);
2943 static void alpha_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
2945 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2946 CPUAlphaState
*env
= cpu
->env_ptr
;
2949 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2950 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2951 ctx
->implver
= env
->implver
;
2952 ctx
->amask
= env
->amask
;
2954 #ifdef CONFIG_USER_ONLY
2955 ctx
->ir
= cpu_std_ir
;
2956 ctx
->unalign
= (ctx
->tbflags
& TB_FLAG_UNALIGN
? MO_UNALN
: MO_ALIGN
);
2958 ctx
->palbr
= env
->palbr
;
2959 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2962 /* ??? Every TB begins with unset rounding mode, to be initialized on
2963 the first fp insn of the TB. Alternately we could define a proper
2964 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2965 to reset the FP_STATUS to that default at the end of any TB that
2966 changes the default. We could even (gasp) dynamiclly figure out
2967 what default would be most efficient given the running program. */
2969 /* Similarly for flush-to-zero. */
2975 /* Bound the number of insns to execute to those left on the page. */
2976 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
2977 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
2980 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2984 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2986 tcg_gen_insn_start(dcbase
->pc_next
);
2989 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2991 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2992 CPUAlphaState
*env
= cpu
->env_ptr
;
2993 uint32_t insn
= translator_ldl(env
, &ctx
->base
, ctx
->base
.pc_next
);
2995 ctx
->base
.pc_next
+= 4;
2996 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
2998 free_context_temps(ctx
);
2999 translator_loop_temp_check(&ctx
->base
);
3002 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
3004 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
3006 switch (ctx
->base
.is_jmp
) {
3007 case DISAS_NORETURN
:
3009 case DISAS_TOO_MANY
:
3010 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
3012 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3013 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
3016 case DISAS_PC_STALE
:
3017 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3019 case DISAS_PC_UPDATED
:
3020 tcg_gen_lookup_and_goto_ptr();
3022 case DISAS_PC_UPDATED_NOCHAIN
:
3023 tcg_gen_exit_tb(NULL
, 0);
3026 g_assert_not_reached();
3030 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
,
3031 CPUState
*cpu
, FILE *logfile
)
3033 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
3034 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
3037 static const TranslatorOps alpha_tr_ops
= {
3038 .init_disas_context
= alpha_tr_init_disas_context
,
3039 .tb_start
= alpha_tr_tb_start
,
3040 .insn_start
= alpha_tr_insn_start
,
3041 .translate_insn
= alpha_tr_translate_insn
,
3042 .tb_stop
= alpha_tr_tb_stop
,
3043 .disas_log
= alpha_tr_disas_log
,
3046 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
3049 translator_loop(&alpha_tr_ops
, &dc
.base
, cpu
, tb
, max_insns
);
3052 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,