qom: Introduce CPUClass.tcg_initialize
[qemu/ar7.git] / target / alpha / translate.c
blob3c8d1dc3331d91f02e3ec7006dc1242a445b41c9
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/translator.h"
32 #include "exec/log.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 DisasContextBase base;
48 #ifndef CONFIG_USER_ONLY
49 uint64_t palbr;
50 #endif
51 uint32_t tbflags;
52 int mem_idx;
54 /* implver and amask values for this CPU. */
55 int implver;
56 int amask;
58 /* Current rounding mode for this TB. */
59 int tb_rm;
60 /* Current flush-to-zero setting for this TB. */
61 int tb_ftz;
63 /* The set of registers active in the current context. */
64 TCGv *ir;
66 /* Temporaries for $31 and $f31 as source and destination. */
67 TCGv zero;
68 TCGv sink;
69 /* Temporary for immediate constants. */
70 TCGv lit;
73 /* Target-specific return values from translate_one, indicating the
74 state of the TB. Note that DISAS_NEXT indicates that we are not
75 exiting the TB. */
76 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
77 #define DISAS_PC_UPDATED DISAS_TARGET_1
78 #define DISAS_PC_STALE DISAS_TARGET_2
80 /* global register indexes */
81 static TCGv_env cpu_env;
82 static TCGv cpu_std_ir[31];
83 static TCGv cpu_fir[31];
84 static TCGv cpu_pc;
85 static TCGv cpu_lock_addr;
86 static TCGv cpu_lock_value;
88 #ifndef CONFIG_USER_ONLY
89 static TCGv cpu_pal_ir[31];
90 #endif
92 #include "exec/gen-icount.h"
94 void alpha_translate_init(void)
96 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
98 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
99 static const GlobalVar vars[] = {
100 DEF_VAR(pc),
101 DEF_VAR(lock_addr),
102 DEF_VAR(lock_value),
105 #undef DEF_VAR
107 /* Use the symbolic register names that match the disassembler. */
108 static const char greg_names[31][4] = {
109 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
110 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
111 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
112 "t10", "t11", "ra", "t12", "at", "gp", "sp"
114 static const char freg_names[31][4] = {
115 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
116 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
117 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
118 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
120 #ifndef CONFIG_USER_ONLY
121 static const char shadow_names[8][8] = {
122 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
123 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
125 #endif
127 int i;
129 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
130 tcg_ctx.tcg_env = cpu_env;
132 for (i = 0; i < 31; i++) {
133 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
134 offsetof(CPUAlphaState, ir[i]),
135 greg_names[i]);
138 for (i = 0; i < 31; i++) {
139 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
140 offsetof(CPUAlphaState, fir[i]),
141 freg_names[i]);
144 #ifndef CONFIG_USER_ONLY
145 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
146 for (i = 0; i < 8; i++) {
147 int r = (i == 7 ? 25 : i + 8);
148 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
149 offsetof(CPUAlphaState,
150 shadow[i]),
151 shadow_names[i]);
153 #endif
155 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
156 const GlobalVar *v = &vars[i];
157 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
161 static TCGv load_zero(DisasContext *ctx)
163 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
164 ctx->zero = tcg_const_i64(0);
166 return ctx->zero;
169 static TCGv dest_sink(DisasContext *ctx)
171 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
172 ctx->sink = tcg_temp_new();
174 return ctx->sink;
177 static void free_context_temps(DisasContext *ctx)
179 if (!TCGV_IS_UNUSED_I64(ctx->sink)) {
180 tcg_gen_discard_i64(ctx->sink);
181 tcg_temp_free(ctx->sink);
182 TCGV_UNUSED_I64(ctx->sink);
184 if (!TCGV_IS_UNUSED_I64(ctx->zero)) {
185 tcg_temp_free(ctx->zero);
186 TCGV_UNUSED_I64(ctx->zero);
188 if (!TCGV_IS_UNUSED_I64(ctx->lit)) {
189 tcg_temp_free(ctx->lit);
190 TCGV_UNUSED_I64(ctx->lit);
194 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
196 if (likely(reg < 31)) {
197 return ctx->ir[reg];
198 } else {
199 return load_zero(ctx);
203 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
204 uint8_t lit, bool islit)
206 if (islit) {
207 ctx->lit = tcg_const_i64(lit);
208 return ctx->lit;
209 } else if (likely(reg < 31)) {
210 return ctx->ir[reg];
211 } else {
212 return load_zero(ctx);
216 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
218 if (likely(reg < 31)) {
219 return ctx->ir[reg];
220 } else {
221 return dest_sink(ctx);
225 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
227 if (likely(reg < 31)) {
228 return cpu_fir[reg];
229 } else {
230 return load_zero(ctx);
234 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
236 if (likely(reg < 31)) {
237 return cpu_fir[reg];
238 } else {
239 return dest_sink(ctx);
243 static int get_flag_ofs(unsigned shift)
245 int ofs = offsetof(CPUAlphaState, flags);
246 #ifdef HOST_WORDS_BIGENDIAN
247 ofs += 3 - (shift / 8);
248 #else
249 ofs += shift / 8;
250 #endif
251 return ofs;
254 static void ld_flag_byte(TCGv val, unsigned shift)
256 tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
259 static void st_flag_byte(TCGv val, unsigned shift)
261 tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
264 static void gen_excp_1(int exception, int error_code)
266 TCGv_i32 tmp1, tmp2;
268 tmp1 = tcg_const_i32(exception);
269 tmp2 = tcg_const_i32(error_code);
270 gen_helper_excp(cpu_env, tmp1, tmp2);
271 tcg_temp_free_i32(tmp2);
272 tcg_temp_free_i32(tmp1);
275 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
277 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
278 gen_excp_1(exception, error_code);
279 return DISAS_NORETURN;
282 static inline DisasJumpType gen_invalid(DisasContext *ctx)
284 return gen_excp(ctx, EXCP_OPCDEC, 0);
287 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
289 TCGv_i32 tmp32 = tcg_temp_new_i32();
290 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
291 gen_helper_memory_to_f(t0, tmp32);
292 tcg_temp_free_i32(tmp32);
295 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
297 TCGv tmp = tcg_temp_new();
298 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
299 gen_helper_memory_to_g(t0, tmp);
300 tcg_temp_free(tmp);
303 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
305 TCGv_i32 tmp32 = tcg_temp_new_i32();
306 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
307 gen_helper_memory_to_s(t0, tmp32);
308 tcg_temp_free_i32(tmp32);
311 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
313 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
314 tcg_gen_mov_i64(cpu_lock_addr, t1);
315 tcg_gen_mov_i64(cpu_lock_value, t0);
318 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
320 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
321 tcg_gen_mov_i64(cpu_lock_addr, t1);
322 tcg_gen_mov_i64(cpu_lock_value, t0);
325 static inline void gen_load_mem(DisasContext *ctx,
326 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
327 int flags),
328 int ra, int rb, int32_t disp16, bool fp,
329 bool clear)
331 TCGv tmp, addr, va;
333 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
334 prefetches, which we can treat as nops. No worries about
335 missed exceptions here. */
336 if (unlikely(ra == 31)) {
337 return;
340 tmp = tcg_temp_new();
341 addr = load_gpr(ctx, rb);
343 if (disp16) {
344 tcg_gen_addi_i64(tmp, addr, disp16);
345 addr = tmp;
347 if (clear) {
348 tcg_gen_andi_i64(tmp, addr, ~0x7);
349 addr = tmp;
352 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
353 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
355 tcg_temp_free(tmp);
358 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
360 TCGv_i32 tmp32 = tcg_temp_new_i32();
361 gen_helper_f_to_memory(tmp32, t0);
362 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
363 tcg_temp_free_i32(tmp32);
366 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
368 TCGv tmp = tcg_temp_new();
369 gen_helper_g_to_memory(tmp, t0);
370 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
371 tcg_temp_free(tmp);
374 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
376 TCGv_i32 tmp32 = tcg_temp_new_i32();
377 gen_helper_s_to_memory(tmp32, t0);
378 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
379 tcg_temp_free_i32(tmp32);
382 static inline void gen_store_mem(DisasContext *ctx,
383 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
384 int flags),
385 int ra, int rb, int32_t disp16, bool fp,
386 bool clear)
388 TCGv tmp, addr, va;
390 tmp = tcg_temp_new();
391 addr = load_gpr(ctx, rb);
393 if (disp16) {
394 tcg_gen_addi_i64(tmp, addr, disp16);
395 addr = tmp;
397 if (clear) {
398 tcg_gen_andi_i64(tmp, addr, ~0x7);
399 addr = tmp;
402 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
403 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
405 tcg_temp_free(tmp);
408 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
409 int32_t disp16, int mem_idx,
410 TCGMemOp op)
412 TCGLabel *lab_fail, *lab_done;
413 TCGv addr, val;
415 addr = tcg_temp_new_i64();
416 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
417 free_context_temps(ctx);
419 lab_fail = gen_new_label();
420 lab_done = gen_new_label();
421 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
422 tcg_temp_free_i64(addr);
424 val = tcg_temp_new_i64();
425 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
426 load_gpr(ctx, ra), mem_idx, op);
427 free_context_temps(ctx);
429 if (ra != 31) {
430 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
432 tcg_temp_free_i64(val);
433 tcg_gen_br(lab_done);
435 gen_set_label(lab_fail);
436 if (ra != 31) {
437 tcg_gen_movi_i64(ctx->ir[ra], 0);
440 gen_set_label(lab_done);
441 tcg_gen_movi_i64(cpu_lock_addr, -1);
442 return DISAS_NEXT;
445 static bool in_superpage(DisasContext *ctx, int64_t addr)
447 #ifndef CONFIG_USER_ONLY
448 return ((ctx->tbflags & ENV_FLAG_PS_USER) == 0
449 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
450 && ((addr >> 41) & 3) == 2);
451 #else
452 return false;
453 #endif
456 static bool use_exit_tb(DisasContext *ctx)
458 return ((ctx->base.tb->cflags & CF_LAST_IO)
459 || ctx->base.singlestep_enabled
460 || singlestep);
463 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
465 /* Suppress goto_tb in the case of single-steping and IO. */
466 if (unlikely(use_exit_tb(ctx))) {
467 return false;
469 #ifndef CONFIG_USER_ONLY
470 /* If the destination is in the superpage, the page perms can't change. */
471 if (in_superpage(ctx, dest)) {
472 return true;
474 /* Check for the dest on the same page as the start of the TB. */
475 return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
476 #else
477 return true;
478 #endif
481 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
483 uint64_t dest = ctx->base.pc_next + (disp << 2);
485 if (ra != 31) {
486 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
489 /* Notice branch-to-next; used to initialize RA with the PC. */
490 if (disp == 0) {
491 return 0;
492 } else if (use_goto_tb(ctx, dest)) {
493 tcg_gen_goto_tb(0);
494 tcg_gen_movi_i64(cpu_pc, dest);
495 tcg_gen_exit_tb((uintptr_t)ctx->base.tb);
496 return DISAS_NORETURN;
497 } else {
498 tcg_gen_movi_i64(cpu_pc, dest);
499 return DISAS_PC_UPDATED;
503 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
504 TCGv cmp, int32_t disp)
506 uint64_t dest = ctx->base.pc_next + (disp << 2);
507 TCGLabel *lab_true = gen_new_label();
509 if (use_goto_tb(ctx, dest)) {
510 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
512 tcg_gen_goto_tb(0);
513 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
514 tcg_gen_exit_tb((uintptr_t)ctx->base.tb);
516 gen_set_label(lab_true);
517 tcg_gen_goto_tb(1);
518 tcg_gen_movi_i64(cpu_pc, dest);
519 tcg_gen_exit_tb((uintptr_t)ctx->base.tb + 1);
521 return DISAS_NORETURN;
522 } else {
523 TCGv_i64 z = tcg_const_i64(0);
524 TCGv_i64 d = tcg_const_i64(dest);
525 TCGv_i64 p = tcg_const_i64(ctx->base.pc_next);
527 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
529 tcg_temp_free_i64(z);
530 tcg_temp_free_i64(d);
531 tcg_temp_free_i64(p);
532 return DISAS_PC_UPDATED;
536 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
537 int32_t disp, int mask)
539 if (mask) {
540 TCGv tmp = tcg_temp_new();
541 DisasJumpType ret;
543 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
544 ret = gen_bcond_internal(ctx, cond, tmp, disp);
545 tcg_temp_free(tmp);
546 return ret;
548 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
551 /* Fold -0.0 for comparison with COND. */
553 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
555 uint64_t mzero = 1ull << 63;
557 switch (cond) {
558 case TCG_COND_LE:
559 case TCG_COND_GT:
560 /* For <= or >, the -0.0 value directly compares the way we want. */
561 tcg_gen_mov_i64(dest, src);
562 break;
564 case TCG_COND_EQ:
565 case TCG_COND_NE:
566 /* For == or !=, we can simply mask off the sign bit and compare. */
567 tcg_gen_andi_i64(dest, src, mzero - 1);
568 break;
570 case TCG_COND_GE:
571 case TCG_COND_LT:
572 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
573 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
574 tcg_gen_neg_i64(dest, dest);
575 tcg_gen_and_i64(dest, dest, src);
576 break;
578 default:
579 abort();
583 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
584 int32_t disp)
586 TCGv cmp_tmp = tcg_temp_new();
587 DisasJumpType ret;
589 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
590 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
591 tcg_temp_free(cmp_tmp);
592 return ret;
595 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
597 TCGv_i64 va, vb, z;
599 z = load_zero(ctx);
600 vb = load_fpr(ctx, rb);
601 va = tcg_temp_new();
602 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
604 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
606 tcg_temp_free(va);
609 #define QUAL_RM_N 0x080 /* Round mode nearest even */
610 #define QUAL_RM_C 0x000 /* Round mode chopped */
611 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
612 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
613 #define QUAL_RM_MASK 0x0c0
615 #define QUAL_U 0x100 /* Underflow enable (fp output) */
616 #define QUAL_V 0x100 /* Overflow enable (int output) */
617 #define QUAL_S 0x400 /* Software completion enable */
618 #define QUAL_I 0x200 /* Inexact detection enable */
620 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
622 TCGv_i32 tmp;
624 fn11 &= QUAL_RM_MASK;
625 if (fn11 == ctx->tb_rm) {
626 return;
628 ctx->tb_rm = fn11;
630 tmp = tcg_temp_new_i32();
631 switch (fn11) {
632 case QUAL_RM_N:
633 tcg_gen_movi_i32(tmp, float_round_nearest_even);
634 break;
635 case QUAL_RM_C:
636 tcg_gen_movi_i32(tmp, float_round_to_zero);
637 break;
638 case QUAL_RM_M:
639 tcg_gen_movi_i32(tmp, float_round_down);
640 break;
641 case QUAL_RM_D:
642 tcg_gen_ld8u_i32(tmp, cpu_env,
643 offsetof(CPUAlphaState, fpcr_dyn_round));
644 break;
647 #if defined(CONFIG_SOFTFLOAT_INLINE)
648 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
649 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
650 sets the one field. */
651 tcg_gen_st8_i32(tmp, cpu_env,
652 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
653 #else
654 gen_helper_setroundmode(tmp);
655 #endif
657 tcg_temp_free_i32(tmp);
660 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
662 TCGv_i32 tmp;
664 fn11 &= QUAL_U;
665 if (fn11 == ctx->tb_ftz) {
666 return;
668 ctx->tb_ftz = fn11;
670 tmp = tcg_temp_new_i32();
671 if (fn11) {
672 /* Underflow is enabled, use the FPCR setting. */
673 tcg_gen_ld8u_i32(tmp, cpu_env,
674 offsetof(CPUAlphaState, fpcr_flush_to_zero));
675 } else {
676 /* Underflow is disabled, force flush-to-zero. */
677 tcg_gen_movi_i32(tmp, 1);
680 #if defined(CONFIG_SOFTFLOAT_INLINE)
681 tcg_gen_st8_i32(tmp, cpu_env,
682 offsetof(CPUAlphaState, fp_status.flush_to_zero));
683 #else
684 gen_helper_setflushzero(tmp);
685 #endif
687 tcg_temp_free_i32(tmp);
690 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
692 TCGv val;
694 if (unlikely(reg == 31)) {
695 val = load_zero(ctx);
696 } else {
697 val = cpu_fir[reg];
698 if ((fn11 & QUAL_S) == 0) {
699 if (is_cmp) {
700 gen_helper_ieee_input_cmp(cpu_env, val);
701 } else {
702 gen_helper_ieee_input(cpu_env, val);
704 } else {
705 #ifndef CONFIG_USER_ONLY
706 /* In system mode, raise exceptions for denormals like real
707 hardware. In user mode, proceed as if the OS completion
708 handler is handling the denormal as per spec. */
709 gen_helper_ieee_input_s(cpu_env, val);
710 #endif
713 return val;
716 static void gen_fp_exc_raise(int rc, int fn11)
718 /* ??? We ought to be able to do something with imprecise exceptions.
719 E.g. notice we're still in the trap shadow of something within the
720 TB and do not generate the code to signal the exception; end the TB
721 when an exception is forced to arrive, either by consumption of a
722 register value or TRAPB or EXCB. */
723 TCGv_i32 reg, ign;
724 uint32_t ignore = 0;
726 if (!(fn11 & QUAL_U)) {
727 /* Note that QUAL_U == QUAL_V, so ignore either. */
728 ignore |= FPCR_UNF | FPCR_IOV;
730 if (!(fn11 & QUAL_I)) {
731 ignore |= FPCR_INE;
733 ign = tcg_const_i32(ignore);
735 /* ??? Pass in the regno of the destination so that the helper can
736 set EXC_MASK, which contains a bitmask of destination registers
737 that have caused arithmetic traps. A simple userspace emulation
738 does not require this. We do need it for a guest kernel's entArith,
739 or if we were to do something clever with imprecise exceptions. */
740 reg = tcg_const_i32(rc + 32);
741 if (fn11 & QUAL_S) {
742 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
743 } else {
744 gen_helper_fp_exc_raise(cpu_env, ign, reg);
747 tcg_temp_free_i32(reg);
748 tcg_temp_free_i32(ign);
751 static void gen_cvtlq(TCGv vc, TCGv vb)
753 TCGv tmp = tcg_temp_new();
755 /* The arithmetic right shift here, plus the sign-extended mask below
756 yields a sign-extended result without an explicit ext32s_i64. */
757 tcg_gen_shri_i64(tmp, vb, 29);
758 tcg_gen_sari_i64(vc, vb, 32);
759 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
761 tcg_temp_free(tmp);
764 static void gen_ieee_arith2(DisasContext *ctx,
765 void (*helper)(TCGv, TCGv_ptr, TCGv),
766 int rb, int rc, int fn11)
768 TCGv vb;
770 gen_qual_roundmode(ctx, fn11);
771 gen_qual_flushzero(ctx, fn11);
773 vb = gen_ieee_input(ctx, rb, fn11, 0);
774 helper(dest_fpr(ctx, rc), cpu_env, vb);
776 gen_fp_exc_raise(rc, fn11);
779 #define IEEE_ARITH2(name) \
780 static inline void glue(gen_, name)(DisasContext *ctx, \
781 int rb, int rc, int fn11) \
783 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
785 IEEE_ARITH2(sqrts)
786 IEEE_ARITH2(sqrtt)
787 IEEE_ARITH2(cvtst)
788 IEEE_ARITH2(cvtts)
790 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
792 TCGv vb, vc;
794 /* No need to set flushzero, since we have an integer output. */
795 vb = gen_ieee_input(ctx, rb, fn11, 0);
796 vc = dest_fpr(ctx, rc);
798 /* Almost all integer conversions use cropped rounding;
799 special case that. */
800 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
801 gen_helper_cvttq_c(vc, cpu_env, vb);
802 } else {
803 gen_qual_roundmode(ctx, fn11);
804 gen_helper_cvttq(vc, cpu_env, vb);
806 gen_fp_exc_raise(rc, fn11);
809 static void gen_ieee_intcvt(DisasContext *ctx,
810 void (*helper)(TCGv, TCGv_ptr, TCGv),
811 int rb, int rc, int fn11)
813 TCGv vb, vc;
815 gen_qual_roundmode(ctx, fn11);
816 vb = load_fpr(ctx, rb);
817 vc = dest_fpr(ctx, rc);
819 /* The only exception that can be raised by integer conversion
820 is inexact. Thus we only need to worry about exceptions when
821 inexact handling is requested. */
822 if (fn11 & QUAL_I) {
823 helper(vc, cpu_env, vb);
824 gen_fp_exc_raise(rc, fn11);
825 } else {
826 helper(vc, cpu_env, vb);
830 #define IEEE_INTCVT(name) \
831 static inline void glue(gen_, name)(DisasContext *ctx, \
832 int rb, int rc, int fn11) \
834 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
836 IEEE_INTCVT(cvtqs)
837 IEEE_INTCVT(cvtqt)
839 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
841 TCGv vmask = tcg_const_i64(mask);
842 TCGv tmp = tcg_temp_new_i64();
844 if (inv_a) {
845 tcg_gen_andc_i64(tmp, vmask, va);
846 } else {
847 tcg_gen_and_i64(tmp, va, vmask);
850 tcg_gen_andc_i64(vc, vb, vmask);
851 tcg_gen_or_i64(vc, vc, tmp);
853 tcg_temp_free(vmask);
854 tcg_temp_free(tmp);
857 static void gen_ieee_arith3(DisasContext *ctx,
858 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
859 int ra, int rb, int rc, int fn11)
861 TCGv va, vb, vc;
863 gen_qual_roundmode(ctx, fn11);
864 gen_qual_flushzero(ctx, fn11);
866 va = gen_ieee_input(ctx, ra, fn11, 0);
867 vb = gen_ieee_input(ctx, rb, fn11, 0);
868 vc = dest_fpr(ctx, rc);
869 helper(vc, cpu_env, va, vb);
871 gen_fp_exc_raise(rc, fn11);
874 #define IEEE_ARITH3(name) \
875 static inline void glue(gen_, name)(DisasContext *ctx, \
876 int ra, int rb, int rc, int fn11) \
878 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
880 IEEE_ARITH3(adds)
881 IEEE_ARITH3(subs)
882 IEEE_ARITH3(muls)
883 IEEE_ARITH3(divs)
884 IEEE_ARITH3(addt)
885 IEEE_ARITH3(subt)
886 IEEE_ARITH3(mult)
887 IEEE_ARITH3(divt)
889 static void gen_ieee_compare(DisasContext *ctx,
890 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
891 int ra, int rb, int rc, int fn11)
893 TCGv va, vb, vc;
895 va = gen_ieee_input(ctx, ra, fn11, 1);
896 vb = gen_ieee_input(ctx, rb, fn11, 1);
897 vc = dest_fpr(ctx, rc);
898 helper(vc, cpu_env, va, vb);
900 gen_fp_exc_raise(rc, fn11);
903 #define IEEE_CMP3(name) \
904 static inline void glue(gen_, name)(DisasContext *ctx, \
905 int ra, int rb, int rc, int fn11) \
907 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
909 IEEE_CMP3(cmptun)
910 IEEE_CMP3(cmpteq)
911 IEEE_CMP3(cmptlt)
912 IEEE_CMP3(cmptle)
914 static inline uint64_t zapnot_mask(uint8_t lit)
916 uint64_t mask = 0;
917 int i;
919 for (i = 0; i < 8; ++i) {
920 if ((lit >> i) & 1) {
921 mask |= 0xffull << (i * 8);
924 return mask;
927 /* Implement zapnot with an immediate operand, which expands to some
928 form of immediate AND. This is a basic building block in the
929 definition of many of the other byte manipulation instructions. */
930 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
932 switch (lit) {
933 case 0x00:
934 tcg_gen_movi_i64(dest, 0);
935 break;
936 case 0x01:
937 tcg_gen_ext8u_i64(dest, src);
938 break;
939 case 0x03:
940 tcg_gen_ext16u_i64(dest, src);
941 break;
942 case 0x0f:
943 tcg_gen_ext32u_i64(dest, src);
944 break;
945 case 0xff:
946 tcg_gen_mov_i64(dest, src);
947 break;
948 default:
949 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
950 break;
954 /* EXTWH, EXTLH, EXTQH */
955 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
956 uint8_t lit, uint8_t byte_mask)
958 if (islit) {
959 int pos = (64 - lit * 8) & 0x3f;
960 int len = cto32(byte_mask) * 8;
961 if (pos < len) {
962 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
963 } else {
964 tcg_gen_movi_i64(vc, 0);
966 } else {
967 TCGv tmp = tcg_temp_new();
968 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
969 tcg_gen_neg_i64(tmp, tmp);
970 tcg_gen_andi_i64(tmp, tmp, 0x3f);
971 tcg_gen_shl_i64(vc, va, tmp);
972 tcg_temp_free(tmp);
974 gen_zapnoti(vc, vc, byte_mask);
977 /* EXTBL, EXTWL, EXTLL, EXTQL */
978 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
979 uint8_t lit, uint8_t byte_mask)
981 if (islit) {
982 int pos = (lit & 7) * 8;
983 int len = cto32(byte_mask) * 8;
984 if (pos + len >= 64) {
985 len = 64 - pos;
987 tcg_gen_extract_i64(vc, va, pos, len);
988 } else {
989 TCGv tmp = tcg_temp_new();
990 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
991 tcg_gen_shli_i64(tmp, tmp, 3);
992 tcg_gen_shr_i64(vc, va, tmp);
993 tcg_temp_free(tmp);
994 gen_zapnoti(vc, vc, byte_mask);
998 /* INSWH, INSLH, INSQH */
999 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1000 uint8_t lit, uint8_t byte_mask)
1002 if (islit) {
1003 int pos = 64 - (lit & 7) * 8;
1004 int len = cto32(byte_mask) * 8;
1005 if (pos < len) {
1006 tcg_gen_extract_i64(vc, va, pos, len - pos);
1007 } else {
1008 tcg_gen_movi_i64(vc, 0);
1010 } else {
1011 TCGv tmp = tcg_temp_new();
1012 TCGv shift = tcg_temp_new();
1014 /* The instruction description has us left-shift the byte mask
1015 and extract bits <15:8> and apply that zap at the end. This
1016 is equivalent to simply performing the zap first and shifting
1017 afterward. */
1018 gen_zapnoti(tmp, va, byte_mask);
1020 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1021 portably by splitting the shift into two parts: shift_count-1 and 1.
1022 Arrange for the -1 by using ones-complement instead of
1023 twos-complement in the negation: ~(B * 8) & 63. */
1025 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1026 tcg_gen_not_i64(shift, shift);
1027 tcg_gen_andi_i64(shift, shift, 0x3f);
1029 tcg_gen_shr_i64(vc, tmp, shift);
1030 tcg_gen_shri_i64(vc, vc, 1);
1031 tcg_temp_free(shift);
1032 tcg_temp_free(tmp);
1036 /* INSBL, INSWL, INSLL, INSQL */
1037 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1038 uint8_t lit, uint8_t byte_mask)
1040 if (islit) {
1041 int pos = (lit & 7) * 8;
1042 int len = cto32(byte_mask) * 8;
1043 if (pos + len > 64) {
1044 len = 64 - pos;
1046 tcg_gen_deposit_z_i64(vc, va, pos, len);
1047 } else {
1048 TCGv tmp = tcg_temp_new();
1049 TCGv shift = tcg_temp_new();
1051 /* The instruction description has us left-shift the byte mask
1052 and extract bits <15:8> and apply that zap at the end. This
1053 is equivalent to simply performing the zap first and shifting
1054 afterward. */
1055 gen_zapnoti(tmp, va, byte_mask);
1057 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1058 tcg_gen_shli_i64(shift, shift, 3);
1059 tcg_gen_shl_i64(vc, tmp, shift);
1060 tcg_temp_free(shift);
1061 tcg_temp_free(tmp);
1065 /* MSKWH, MSKLH, MSKQH */
1066 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1067 uint8_t lit, uint8_t byte_mask)
1069 if (islit) {
1070 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1071 } else {
1072 TCGv shift = tcg_temp_new();
1073 TCGv mask = tcg_temp_new();
1075 /* The instruction description is as above, where the byte_mask
1076 is shifted left, and then we extract bits <15:8>. This can be
1077 emulated with a right-shift on the expanded byte mask. This
1078 requires extra care because for an input <2:0> == 0 we need a
1079 shift of 64 bits in order to generate a zero. This is done by
1080 splitting the shift into two parts, the variable shift - 1
1081 followed by a constant 1 shift. The code we expand below is
1082 equivalent to ~(B * 8) & 63. */
1084 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1085 tcg_gen_not_i64(shift, shift);
1086 tcg_gen_andi_i64(shift, shift, 0x3f);
1087 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1088 tcg_gen_shr_i64(mask, mask, shift);
1089 tcg_gen_shri_i64(mask, mask, 1);
1091 tcg_gen_andc_i64(vc, va, mask);
1093 tcg_temp_free(mask);
1094 tcg_temp_free(shift);
1098 /* MSKBL, MSKWL, MSKLL, MSKQL */
1099 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1100 uint8_t lit, uint8_t byte_mask)
1102 if (islit) {
1103 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1104 } else {
1105 TCGv shift = tcg_temp_new();
1106 TCGv mask = tcg_temp_new();
1108 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1109 tcg_gen_shli_i64(shift, shift, 3);
1110 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1111 tcg_gen_shl_i64(mask, mask, shift);
1113 tcg_gen_andc_i64(vc, va, mask);
1115 tcg_temp_free(mask);
1116 tcg_temp_free(shift);
1120 static void gen_rx(DisasContext *ctx, int ra, int set)
1122 TCGv tmp;
1124 if (ra != 31) {
1125 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1128 tmp = tcg_const_i64(set);
1129 st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1130 tcg_temp_free(tmp);
1133 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1135 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1136 to internal cpu registers. */
1138 /* Unprivileged PAL call */
1139 if (palcode >= 0x80 && palcode < 0xC0) {
1140 switch (palcode) {
1141 case 0x86:
1142 /* IMB */
1143 /* No-op inside QEMU. */
1144 break;
1145 case 0x9E:
1146 /* RDUNIQUE */
1147 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1148 offsetof(CPUAlphaState, unique));
1149 break;
1150 case 0x9F:
1151 /* WRUNIQUE */
1152 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1153 offsetof(CPUAlphaState, unique));
1154 break;
1155 default:
1156 palcode &= 0xbf;
1157 goto do_call_pal;
1159 return DISAS_NEXT;
1162 #ifndef CONFIG_USER_ONLY
1163 /* Privileged PAL code */
1164 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1165 switch (palcode) {
1166 case 0x01:
1167 /* CFLUSH */
1168 /* No-op inside QEMU. */
1169 break;
1170 case 0x02:
1171 /* DRAINA */
1172 /* No-op inside QEMU. */
1173 break;
1174 case 0x2D:
1175 /* WRVPTPTR */
1176 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1177 offsetof(CPUAlphaState, vptptr));
1178 break;
1179 case 0x31:
1180 /* WRVAL */
1181 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1182 offsetof(CPUAlphaState, sysval));
1183 break;
1184 case 0x32:
1185 /* RDVAL */
1186 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1187 offsetof(CPUAlphaState, sysval));
1188 break;
1190 case 0x35:
1191 /* SWPIPL */
1192 /* Note that we already know we're in kernel mode, so we know
1193 that PS only contains the 3 IPL bits. */
1194 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1196 /* But make sure and store only the 3 IPL bits from the user. */
1198 TCGv tmp = tcg_temp_new();
1199 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1200 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1201 tcg_temp_free(tmp);
1204 /* Allow interrupts to be recognized right away. */
1205 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1206 return DISAS_PC_UPDATED_NOCHAIN;
1208 case 0x36:
1209 /* RDPS */
1210 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1211 break;
1213 case 0x38:
1214 /* WRUSP */
1215 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1216 offsetof(CPUAlphaState, usp));
1217 break;
1218 case 0x3A:
1219 /* RDUSP */
1220 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1221 offsetof(CPUAlphaState, usp));
1222 break;
1223 case 0x3C:
1224 /* WHAMI */
1225 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1226 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1227 break;
1229 case 0x3E:
1230 /* WTINT */
1232 TCGv_i32 tmp = tcg_const_i32(1);
1233 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1234 offsetof(CPUState, halted));
1235 tcg_temp_free_i32(tmp);
1237 tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1238 return gen_excp(ctx, EXCP_HALTED, 0);
1240 default:
1241 palcode &= 0x3f;
1242 goto do_call_pal;
1244 return DISAS_NEXT;
1246 #endif
1247 return gen_invalid(ctx);
1249 do_call_pal:
1250 #ifdef CONFIG_USER_ONLY
1251 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1252 #else
1254 TCGv tmp = tcg_temp_new();
1255 uint64_t exc_addr = ctx->base.pc_next;
1256 uint64_t entry = ctx->palbr;
1258 if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1259 exc_addr |= 1;
1260 } else {
1261 tcg_gen_movi_i64(tmp, 1);
1262 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1265 tcg_gen_movi_i64(tmp, exc_addr);
1266 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1267 tcg_temp_free(tmp);
1269 entry += (palcode & 0x80
1270 ? 0x2000 + (palcode - 0x80) * 64
1271 : 0x1000 + palcode * 64);
1273 /* Since the destination is running in PALmode, we don't really
1274 need the page permissions check. We'll see the existence of
1275 the page when we create the TB, and we'll flush all TBs if
1276 we change the PAL base register. */
1277 if (!use_exit_tb(ctx)) {
1278 tcg_gen_goto_tb(0);
1279 tcg_gen_movi_i64(cpu_pc, entry);
1280 tcg_gen_exit_tb((uintptr_t)ctx->base.tb);
1281 return DISAS_NORETURN;
1282 } else {
1283 tcg_gen_movi_i64(cpu_pc, entry);
1284 return DISAS_PC_UPDATED;
1287 #endif
1290 #ifndef CONFIG_USER_ONLY
1292 #define PR_LONG 0x200000
1294 static int cpu_pr_data(int pr)
1296 switch (pr) {
1297 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1298 case 3: return offsetof(CPUAlphaState, trap_arg0);
1299 case 4: return offsetof(CPUAlphaState, trap_arg1);
1300 case 5: return offsetof(CPUAlphaState, trap_arg2);
1301 case 6: return offsetof(CPUAlphaState, exc_addr);
1302 case 7: return offsetof(CPUAlphaState, palbr);
1303 case 8: return offsetof(CPUAlphaState, ptbr);
1304 case 9: return offsetof(CPUAlphaState, vptptr);
1305 case 10: return offsetof(CPUAlphaState, unique);
1306 case 11: return offsetof(CPUAlphaState, sysval);
1307 case 12: return offsetof(CPUAlphaState, usp);
1309 case 40 ... 63:
1310 return offsetof(CPUAlphaState, scratch[pr - 40]);
1312 case 251:
1313 return offsetof(CPUAlphaState, alarm_expire);
1315 return 0;
1318 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1320 void (*helper)(TCGv);
1321 int data;
1323 switch (regno) {
1324 case 32 ... 39:
1325 /* Accessing the "non-shadow" general registers. */
1326 regno = regno == 39 ? 25 : regno - 32 + 8;
1327 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1328 break;
1330 case 250: /* WALLTIME */
1331 helper = gen_helper_get_walltime;
1332 goto do_helper;
1333 case 249: /* VMTIME */
1334 helper = gen_helper_get_vmtime;
1335 do_helper:
1336 if (use_icount) {
1337 gen_io_start();
1338 helper(va);
1339 gen_io_end();
1340 return DISAS_PC_STALE;
1341 } else {
1342 helper(va);
1344 break;
1346 case 0: /* PS */
1347 ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1348 break;
1349 case 1: /* FEN */
1350 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1351 break;
1353 default:
1354 /* The basic registers are data only, and unknown registers
1355 are read-zero, write-ignore. */
1356 data = cpu_pr_data(regno);
1357 if (data == 0) {
1358 tcg_gen_movi_i64(va, 0);
1359 } else if (data & PR_LONG) {
1360 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1361 } else {
1362 tcg_gen_ld_i64(va, cpu_env, data);
1364 break;
1367 return DISAS_NEXT;
1370 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1372 int data;
1374 switch (regno) {
1375 case 255:
1376 /* TBIA */
1377 gen_helper_tbia(cpu_env);
1378 break;
1380 case 254:
1381 /* TBIS */
1382 gen_helper_tbis(cpu_env, vb);
1383 break;
1385 case 253:
1386 /* WAIT */
1388 TCGv_i32 tmp = tcg_const_i32(1);
1389 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1390 offsetof(CPUState, halted));
1391 tcg_temp_free_i32(tmp);
1393 return gen_excp(ctx, EXCP_HALTED, 0);
1395 case 252:
1396 /* HALT */
1397 gen_helper_halt(vb);
1398 return DISAS_PC_STALE;
1400 case 251:
1401 /* ALARM */
1402 gen_helper_set_alarm(cpu_env, vb);
1403 break;
1405 case 7:
1406 /* PALBR */
1407 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1408 /* Changing the PAL base register implies un-chaining all of the TBs
1409 that ended with a CALL_PAL. Since the base register usually only
1410 changes during boot, flushing everything works well. */
1411 gen_helper_tb_flush(cpu_env);
1412 return DISAS_PC_STALE;
1414 case 32 ... 39:
1415 /* Accessing the "non-shadow" general registers. */
1416 regno = regno == 39 ? 25 : regno - 32 + 8;
1417 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1418 break;
1420 case 0: /* PS */
1421 st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1422 break;
1423 case 1: /* FEN */
1424 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1425 break;
1427 default:
1428 /* The basic registers are data only, and unknown registers
1429 are read-zero, write-ignore. */
1430 data = cpu_pr_data(regno);
1431 if (data != 0) {
1432 if (data & PR_LONG) {
1433 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1434 } else {
1435 tcg_gen_st_i64(vb, cpu_env, data);
1438 break;
1441 return DISAS_NEXT;
1443 #endif /* !USER_ONLY*/
1445 #define REQUIRE_NO_LIT \
1446 do { \
1447 if (real_islit) { \
1448 goto invalid_opc; \
1450 } while (0)
1452 #define REQUIRE_AMASK(FLAG) \
1453 do { \
1454 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1455 goto invalid_opc; \
1457 } while (0)
1459 #define REQUIRE_TB_FLAG(FLAG) \
1460 do { \
1461 if ((ctx->tbflags & (FLAG)) == 0) { \
1462 goto invalid_opc; \
1464 } while (0)
1466 #define REQUIRE_REG_31(WHICH) \
1467 do { \
1468 if (WHICH != 31) { \
1469 goto invalid_opc; \
1471 } while (0)
1473 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1475 int32_t disp21, disp16, disp12 __attribute__((unused));
1476 uint16_t fn11;
1477 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1478 bool islit, real_islit;
1479 TCGv va, vb, vc, tmp, tmp2;
1480 TCGv_i32 t32;
1481 DisasJumpType ret;
1483 /* Decode all instruction fields */
1484 opc = extract32(insn, 26, 6);
1485 ra = extract32(insn, 21, 5);
1486 rb = extract32(insn, 16, 5);
1487 rc = extract32(insn, 0, 5);
1488 real_islit = islit = extract32(insn, 12, 1);
1489 lit = extract32(insn, 13, 8);
1491 disp21 = sextract32(insn, 0, 21);
1492 disp16 = sextract32(insn, 0, 16);
1493 disp12 = sextract32(insn, 0, 12);
1495 fn11 = extract32(insn, 5, 11);
1496 fpfn = extract32(insn, 5, 6);
1497 fn7 = extract32(insn, 5, 7);
1499 if (rb == 31 && !islit) {
1500 islit = true;
1501 lit = 0;
1504 ret = DISAS_NEXT;
1505 switch (opc) {
1506 case 0x00:
1507 /* CALL_PAL */
1508 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1509 break;
1510 case 0x01:
1511 /* OPC01 */
1512 goto invalid_opc;
1513 case 0x02:
1514 /* OPC02 */
1515 goto invalid_opc;
1516 case 0x03:
1517 /* OPC03 */
1518 goto invalid_opc;
1519 case 0x04:
1520 /* OPC04 */
1521 goto invalid_opc;
1522 case 0x05:
1523 /* OPC05 */
1524 goto invalid_opc;
1525 case 0x06:
1526 /* OPC06 */
1527 goto invalid_opc;
1528 case 0x07:
1529 /* OPC07 */
1530 goto invalid_opc;
1532 case 0x09:
1533 /* LDAH */
1534 disp16 = (uint32_t)disp16 << 16;
1535 /* fall through */
1536 case 0x08:
1537 /* LDA */
1538 va = dest_gpr(ctx, ra);
1539 /* It's worth special-casing immediate loads. */
1540 if (rb == 31) {
1541 tcg_gen_movi_i64(va, disp16);
1542 } else {
1543 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1545 break;
1547 case 0x0A:
1548 /* LDBU */
1549 REQUIRE_AMASK(BWX);
1550 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1551 break;
1552 case 0x0B:
1553 /* LDQ_U */
1554 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1555 break;
1556 case 0x0C:
1557 /* LDWU */
1558 REQUIRE_AMASK(BWX);
1559 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1560 break;
1561 case 0x0D:
1562 /* STW */
1563 REQUIRE_AMASK(BWX);
1564 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1565 break;
1566 case 0x0E:
1567 /* STB */
1568 REQUIRE_AMASK(BWX);
1569 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1570 break;
1571 case 0x0F:
1572 /* STQ_U */
1573 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1574 break;
1576 case 0x10:
1577 vc = dest_gpr(ctx, rc);
1578 vb = load_gpr_lit(ctx, rb, lit, islit);
1580 if (ra == 31) {
1581 if (fn7 == 0x00) {
1582 /* Special case ADDL as SEXTL. */
1583 tcg_gen_ext32s_i64(vc, vb);
1584 break;
1586 if (fn7 == 0x29) {
1587 /* Special case SUBQ as NEGQ. */
1588 tcg_gen_neg_i64(vc, vb);
1589 break;
1593 va = load_gpr(ctx, ra);
1594 switch (fn7) {
1595 case 0x00:
1596 /* ADDL */
1597 tcg_gen_add_i64(vc, va, vb);
1598 tcg_gen_ext32s_i64(vc, vc);
1599 break;
1600 case 0x02:
1601 /* S4ADDL */
1602 tmp = tcg_temp_new();
1603 tcg_gen_shli_i64(tmp, va, 2);
1604 tcg_gen_add_i64(tmp, tmp, vb);
1605 tcg_gen_ext32s_i64(vc, tmp);
1606 tcg_temp_free(tmp);
1607 break;
1608 case 0x09:
1609 /* SUBL */
1610 tcg_gen_sub_i64(vc, va, vb);
1611 tcg_gen_ext32s_i64(vc, vc);
1612 break;
1613 case 0x0B:
1614 /* S4SUBL */
1615 tmp = tcg_temp_new();
1616 tcg_gen_shli_i64(tmp, va, 2);
1617 tcg_gen_sub_i64(tmp, tmp, vb);
1618 tcg_gen_ext32s_i64(vc, tmp);
1619 tcg_temp_free(tmp);
1620 break;
1621 case 0x0F:
1622 /* CMPBGE */
1623 if (ra == 31) {
1624 /* Special case 0 >= X as X == 0. */
1625 gen_helper_cmpbe0(vc, vb);
1626 } else {
1627 gen_helper_cmpbge(vc, va, vb);
1629 break;
1630 case 0x12:
1631 /* S8ADDL */
1632 tmp = tcg_temp_new();
1633 tcg_gen_shli_i64(tmp, va, 3);
1634 tcg_gen_add_i64(tmp, tmp, vb);
1635 tcg_gen_ext32s_i64(vc, tmp);
1636 tcg_temp_free(tmp);
1637 break;
1638 case 0x1B:
1639 /* S8SUBL */
1640 tmp = tcg_temp_new();
1641 tcg_gen_shli_i64(tmp, va, 3);
1642 tcg_gen_sub_i64(tmp, tmp, vb);
1643 tcg_gen_ext32s_i64(vc, tmp);
1644 tcg_temp_free(tmp);
1645 break;
1646 case 0x1D:
1647 /* CMPULT */
1648 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1649 break;
1650 case 0x20:
1651 /* ADDQ */
1652 tcg_gen_add_i64(vc, va, vb);
1653 break;
1654 case 0x22:
1655 /* S4ADDQ */
1656 tmp = tcg_temp_new();
1657 tcg_gen_shli_i64(tmp, va, 2);
1658 tcg_gen_add_i64(vc, tmp, vb);
1659 tcg_temp_free(tmp);
1660 break;
1661 case 0x29:
1662 /* SUBQ */
1663 tcg_gen_sub_i64(vc, va, vb);
1664 break;
1665 case 0x2B:
1666 /* S4SUBQ */
1667 tmp = tcg_temp_new();
1668 tcg_gen_shli_i64(tmp, va, 2);
1669 tcg_gen_sub_i64(vc, tmp, vb);
1670 tcg_temp_free(tmp);
1671 break;
1672 case 0x2D:
1673 /* CMPEQ */
1674 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1675 break;
1676 case 0x32:
1677 /* S8ADDQ */
1678 tmp = tcg_temp_new();
1679 tcg_gen_shli_i64(tmp, va, 3);
1680 tcg_gen_add_i64(vc, tmp, vb);
1681 tcg_temp_free(tmp);
1682 break;
1683 case 0x3B:
1684 /* S8SUBQ */
1685 tmp = tcg_temp_new();
1686 tcg_gen_shli_i64(tmp, va, 3);
1687 tcg_gen_sub_i64(vc, tmp, vb);
1688 tcg_temp_free(tmp);
1689 break;
1690 case 0x3D:
1691 /* CMPULE */
1692 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1693 break;
1694 case 0x40:
1695 /* ADDL/V */
1696 tmp = tcg_temp_new();
1697 tcg_gen_ext32s_i64(tmp, va);
1698 tcg_gen_ext32s_i64(vc, vb);
1699 tcg_gen_add_i64(tmp, tmp, vc);
1700 tcg_gen_ext32s_i64(vc, tmp);
1701 gen_helper_check_overflow(cpu_env, vc, tmp);
1702 tcg_temp_free(tmp);
1703 break;
1704 case 0x49:
1705 /* SUBL/V */
1706 tmp = tcg_temp_new();
1707 tcg_gen_ext32s_i64(tmp, va);
1708 tcg_gen_ext32s_i64(vc, vb);
1709 tcg_gen_sub_i64(tmp, tmp, vc);
1710 tcg_gen_ext32s_i64(vc, tmp);
1711 gen_helper_check_overflow(cpu_env, vc, tmp);
1712 tcg_temp_free(tmp);
1713 break;
1714 case 0x4D:
1715 /* CMPLT */
1716 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1717 break;
1718 case 0x60:
1719 /* ADDQ/V */
1720 tmp = tcg_temp_new();
1721 tmp2 = tcg_temp_new();
1722 tcg_gen_eqv_i64(tmp, va, vb);
1723 tcg_gen_mov_i64(tmp2, va);
1724 tcg_gen_add_i64(vc, va, vb);
1725 tcg_gen_xor_i64(tmp2, tmp2, vc);
1726 tcg_gen_and_i64(tmp, tmp, tmp2);
1727 tcg_gen_shri_i64(tmp, tmp, 63);
1728 tcg_gen_movi_i64(tmp2, 0);
1729 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1730 tcg_temp_free(tmp);
1731 tcg_temp_free(tmp2);
1732 break;
1733 case 0x69:
1734 /* SUBQ/V */
1735 tmp = tcg_temp_new();
1736 tmp2 = tcg_temp_new();
1737 tcg_gen_xor_i64(tmp, va, vb);
1738 tcg_gen_mov_i64(tmp2, va);
1739 tcg_gen_sub_i64(vc, va, vb);
1740 tcg_gen_xor_i64(tmp2, tmp2, vc);
1741 tcg_gen_and_i64(tmp, tmp, tmp2);
1742 tcg_gen_shri_i64(tmp, tmp, 63);
1743 tcg_gen_movi_i64(tmp2, 0);
1744 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1745 tcg_temp_free(tmp);
1746 tcg_temp_free(tmp2);
1747 break;
1748 case 0x6D:
1749 /* CMPLE */
1750 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1751 break;
1752 default:
1753 goto invalid_opc;
1755 break;
1757 case 0x11:
1758 if (fn7 == 0x20) {
1759 if (rc == 31) {
1760 /* Special case BIS as NOP. */
1761 break;
1763 if (ra == 31) {
1764 /* Special case BIS as MOV. */
1765 vc = dest_gpr(ctx, rc);
1766 if (islit) {
1767 tcg_gen_movi_i64(vc, lit);
1768 } else {
1769 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1771 break;
1775 vc = dest_gpr(ctx, rc);
1776 vb = load_gpr_lit(ctx, rb, lit, islit);
1778 if (fn7 == 0x28 && ra == 31) {
1779 /* Special case ORNOT as NOT. */
1780 tcg_gen_not_i64(vc, vb);
1781 break;
1784 va = load_gpr(ctx, ra);
1785 switch (fn7) {
1786 case 0x00:
1787 /* AND */
1788 tcg_gen_and_i64(vc, va, vb);
1789 break;
1790 case 0x08:
1791 /* BIC */
1792 tcg_gen_andc_i64(vc, va, vb);
1793 break;
1794 case 0x14:
1795 /* CMOVLBS */
1796 tmp = tcg_temp_new();
1797 tcg_gen_andi_i64(tmp, va, 1);
1798 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1799 vb, load_gpr(ctx, rc));
1800 tcg_temp_free(tmp);
1801 break;
1802 case 0x16:
1803 /* CMOVLBC */
1804 tmp = tcg_temp_new();
1805 tcg_gen_andi_i64(tmp, va, 1);
1806 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1807 vb, load_gpr(ctx, rc));
1808 tcg_temp_free(tmp);
1809 break;
1810 case 0x20:
1811 /* BIS */
1812 tcg_gen_or_i64(vc, va, vb);
1813 break;
1814 case 0x24:
1815 /* CMOVEQ */
1816 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1817 vb, load_gpr(ctx, rc));
1818 break;
1819 case 0x26:
1820 /* CMOVNE */
1821 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1822 vb, load_gpr(ctx, rc));
1823 break;
1824 case 0x28:
1825 /* ORNOT */
1826 tcg_gen_orc_i64(vc, va, vb);
1827 break;
1828 case 0x40:
1829 /* XOR */
1830 tcg_gen_xor_i64(vc, va, vb);
1831 break;
1832 case 0x44:
1833 /* CMOVLT */
1834 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1835 vb, load_gpr(ctx, rc));
1836 break;
1837 case 0x46:
1838 /* CMOVGE */
1839 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1840 vb, load_gpr(ctx, rc));
1841 break;
1842 case 0x48:
1843 /* EQV */
1844 tcg_gen_eqv_i64(vc, va, vb);
1845 break;
1846 case 0x61:
1847 /* AMASK */
1848 REQUIRE_REG_31(ra);
1849 tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1850 break;
1851 case 0x64:
1852 /* CMOVLE */
1853 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1854 vb, load_gpr(ctx, rc));
1855 break;
1856 case 0x66:
1857 /* CMOVGT */
1858 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1859 vb, load_gpr(ctx, rc));
1860 break;
1861 case 0x6C:
1862 /* IMPLVER */
1863 REQUIRE_REG_31(ra);
1864 tcg_gen_movi_i64(vc, ctx->implver);
1865 break;
1866 default:
1867 goto invalid_opc;
1869 break;
1871 case 0x12:
1872 vc = dest_gpr(ctx, rc);
1873 va = load_gpr(ctx, ra);
1874 switch (fn7) {
1875 case 0x02:
1876 /* MSKBL */
1877 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1878 break;
1879 case 0x06:
1880 /* EXTBL */
1881 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1882 break;
1883 case 0x0B:
1884 /* INSBL */
1885 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1886 break;
1887 case 0x12:
1888 /* MSKWL */
1889 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1890 break;
1891 case 0x16:
1892 /* EXTWL */
1893 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1894 break;
1895 case 0x1B:
1896 /* INSWL */
1897 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1898 break;
1899 case 0x22:
1900 /* MSKLL */
1901 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1902 break;
1903 case 0x26:
1904 /* EXTLL */
1905 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1906 break;
1907 case 0x2B:
1908 /* INSLL */
1909 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1910 break;
1911 case 0x30:
1912 /* ZAP */
1913 if (islit) {
1914 gen_zapnoti(vc, va, ~lit);
1915 } else {
1916 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1918 break;
1919 case 0x31:
1920 /* ZAPNOT */
1921 if (islit) {
1922 gen_zapnoti(vc, va, lit);
1923 } else {
1924 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1926 break;
1927 case 0x32:
1928 /* MSKQL */
1929 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1930 break;
1931 case 0x34:
1932 /* SRL */
1933 if (islit) {
1934 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1935 } else {
1936 tmp = tcg_temp_new();
1937 vb = load_gpr(ctx, rb);
1938 tcg_gen_andi_i64(tmp, vb, 0x3f);
1939 tcg_gen_shr_i64(vc, va, tmp);
1940 tcg_temp_free(tmp);
1942 break;
1943 case 0x36:
1944 /* EXTQL */
1945 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1946 break;
1947 case 0x39:
1948 /* SLL */
1949 if (islit) {
1950 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1951 } else {
1952 tmp = tcg_temp_new();
1953 vb = load_gpr(ctx, rb);
1954 tcg_gen_andi_i64(tmp, vb, 0x3f);
1955 tcg_gen_shl_i64(vc, va, tmp);
1956 tcg_temp_free(tmp);
1958 break;
1959 case 0x3B:
1960 /* INSQL */
1961 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1962 break;
1963 case 0x3C:
1964 /* SRA */
1965 if (islit) {
1966 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1967 } else {
1968 tmp = tcg_temp_new();
1969 vb = load_gpr(ctx, rb);
1970 tcg_gen_andi_i64(tmp, vb, 0x3f);
1971 tcg_gen_sar_i64(vc, va, tmp);
1972 tcg_temp_free(tmp);
1974 break;
1975 case 0x52:
1976 /* MSKWH */
1977 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1978 break;
1979 case 0x57:
1980 /* INSWH */
1981 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1982 break;
1983 case 0x5A:
1984 /* EXTWH */
1985 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1986 break;
1987 case 0x62:
1988 /* MSKLH */
1989 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1990 break;
1991 case 0x67:
1992 /* INSLH */
1993 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1994 break;
1995 case 0x6A:
1996 /* EXTLH */
1997 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1998 break;
1999 case 0x72:
2000 /* MSKQH */
2001 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
2002 break;
2003 case 0x77:
2004 /* INSQH */
2005 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
2006 break;
2007 case 0x7A:
2008 /* EXTQH */
2009 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
2010 break;
2011 default:
2012 goto invalid_opc;
2014 break;
2016 case 0x13:
2017 vc = dest_gpr(ctx, rc);
2018 vb = load_gpr_lit(ctx, rb, lit, islit);
2019 va = load_gpr(ctx, ra);
2020 switch (fn7) {
2021 case 0x00:
2022 /* MULL */
2023 tcg_gen_mul_i64(vc, va, vb);
2024 tcg_gen_ext32s_i64(vc, vc);
2025 break;
2026 case 0x20:
2027 /* MULQ */
2028 tcg_gen_mul_i64(vc, va, vb);
2029 break;
2030 case 0x30:
2031 /* UMULH */
2032 tmp = tcg_temp_new();
2033 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2034 tcg_temp_free(tmp);
2035 break;
2036 case 0x40:
2037 /* MULL/V */
2038 tmp = tcg_temp_new();
2039 tcg_gen_ext32s_i64(tmp, va);
2040 tcg_gen_ext32s_i64(vc, vb);
2041 tcg_gen_mul_i64(tmp, tmp, vc);
2042 tcg_gen_ext32s_i64(vc, tmp);
2043 gen_helper_check_overflow(cpu_env, vc, tmp);
2044 tcg_temp_free(tmp);
2045 break;
2046 case 0x60:
2047 /* MULQ/V */
2048 tmp = tcg_temp_new();
2049 tmp2 = tcg_temp_new();
2050 tcg_gen_muls2_i64(vc, tmp, va, vb);
2051 tcg_gen_sari_i64(tmp2, vc, 63);
2052 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2053 tcg_temp_free(tmp);
2054 tcg_temp_free(tmp2);
2055 break;
2056 default:
2057 goto invalid_opc;
2059 break;
2061 case 0x14:
2062 REQUIRE_AMASK(FIX);
2063 vc = dest_fpr(ctx, rc);
2064 switch (fpfn) { /* fn11 & 0x3F */
2065 case 0x04:
2066 /* ITOFS */
2067 REQUIRE_REG_31(rb);
2068 t32 = tcg_temp_new_i32();
2069 va = load_gpr(ctx, ra);
2070 tcg_gen_extrl_i64_i32(t32, va);
2071 gen_helper_memory_to_s(vc, t32);
2072 tcg_temp_free_i32(t32);
2073 break;
2074 case 0x0A:
2075 /* SQRTF */
2076 REQUIRE_REG_31(ra);
2077 vb = load_fpr(ctx, rb);
2078 gen_helper_sqrtf(vc, cpu_env, vb);
2079 break;
2080 case 0x0B:
2081 /* SQRTS */
2082 REQUIRE_REG_31(ra);
2083 gen_sqrts(ctx, rb, rc, fn11);
2084 break;
2085 case 0x14:
2086 /* ITOFF */
2087 REQUIRE_REG_31(rb);
2088 t32 = tcg_temp_new_i32();
2089 va = load_gpr(ctx, ra);
2090 tcg_gen_extrl_i64_i32(t32, va);
2091 gen_helper_memory_to_f(vc, t32);
2092 tcg_temp_free_i32(t32);
2093 break;
2094 case 0x24:
2095 /* ITOFT */
2096 REQUIRE_REG_31(rb);
2097 va = load_gpr(ctx, ra);
2098 tcg_gen_mov_i64(vc, va);
2099 break;
2100 case 0x2A:
2101 /* SQRTG */
2102 REQUIRE_REG_31(ra);
2103 vb = load_fpr(ctx, rb);
2104 gen_helper_sqrtg(vc, cpu_env, vb);
2105 break;
2106 case 0x02B:
2107 /* SQRTT */
2108 REQUIRE_REG_31(ra);
2109 gen_sqrtt(ctx, rb, rc, fn11);
2110 break;
2111 default:
2112 goto invalid_opc;
2114 break;
2116 case 0x15:
2117 /* VAX floating point */
2118 /* XXX: rounding mode and trap are ignored (!) */
2119 vc = dest_fpr(ctx, rc);
2120 vb = load_fpr(ctx, rb);
2121 va = load_fpr(ctx, ra);
2122 switch (fpfn) { /* fn11 & 0x3F */
2123 case 0x00:
2124 /* ADDF */
2125 gen_helper_addf(vc, cpu_env, va, vb);
2126 break;
2127 case 0x01:
2128 /* SUBF */
2129 gen_helper_subf(vc, cpu_env, va, vb);
2130 break;
2131 case 0x02:
2132 /* MULF */
2133 gen_helper_mulf(vc, cpu_env, va, vb);
2134 break;
2135 case 0x03:
2136 /* DIVF */
2137 gen_helper_divf(vc, cpu_env, va, vb);
2138 break;
2139 case 0x1E:
2140 /* CVTDG -- TODO */
2141 REQUIRE_REG_31(ra);
2142 goto invalid_opc;
2143 case 0x20:
2144 /* ADDG */
2145 gen_helper_addg(vc, cpu_env, va, vb);
2146 break;
2147 case 0x21:
2148 /* SUBG */
2149 gen_helper_subg(vc, cpu_env, va, vb);
2150 break;
2151 case 0x22:
2152 /* MULG */
2153 gen_helper_mulg(vc, cpu_env, va, vb);
2154 break;
2155 case 0x23:
2156 /* DIVG */
2157 gen_helper_divg(vc, cpu_env, va, vb);
2158 break;
2159 case 0x25:
2160 /* CMPGEQ */
2161 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2162 break;
2163 case 0x26:
2164 /* CMPGLT */
2165 gen_helper_cmpglt(vc, cpu_env, va, vb);
2166 break;
2167 case 0x27:
2168 /* CMPGLE */
2169 gen_helper_cmpgle(vc, cpu_env, va, vb);
2170 break;
2171 case 0x2C:
2172 /* CVTGF */
2173 REQUIRE_REG_31(ra);
2174 gen_helper_cvtgf(vc, cpu_env, vb);
2175 break;
2176 case 0x2D:
2177 /* CVTGD -- TODO */
2178 REQUIRE_REG_31(ra);
2179 goto invalid_opc;
2180 case 0x2F:
2181 /* CVTGQ */
2182 REQUIRE_REG_31(ra);
2183 gen_helper_cvtgq(vc, cpu_env, vb);
2184 break;
2185 case 0x3C:
2186 /* CVTQF */
2187 REQUIRE_REG_31(ra);
2188 gen_helper_cvtqf(vc, cpu_env, vb);
2189 break;
2190 case 0x3E:
2191 /* CVTQG */
2192 REQUIRE_REG_31(ra);
2193 gen_helper_cvtqg(vc, cpu_env, vb);
2194 break;
2195 default:
2196 goto invalid_opc;
2198 break;
2200 case 0x16:
2201 /* IEEE floating-point */
2202 switch (fpfn) { /* fn11 & 0x3F */
2203 case 0x00:
2204 /* ADDS */
2205 gen_adds(ctx, ra, rb, rc, fn11);
2206 break;
2207 case 0x01:
2208 /* SUBS */
2209 gen_subs(ctx, ra, rb, rc, fn11);
2210 break;
2211 case 0x02:
2212 /* MULS */
2213 gen_muls(ctx, ra, rb, rc, fn11);
2214 break;
2215 case 0x03:
2216 /* DIVS */
2217 gen_divs(ctx, ra, rb, rc, fn11);
2218 break;
2219 case 0x20:
2220 /* ADDT */
2221 gen_addt(ctx, ra, rb, rc, fn11);
2222 break;
2223 case 0x21:
2224 /* SUBT */
2225 gen_subt(ctx, ra, rb, rc, fn11);
2226 break;
2227 case 0x22:
2228 /* MULT */
2229 gen_mult(ctx, ra, rb, rc, fn11);
2230 break;
2231 case 0x23:
2232 /* DIVT */
2233 gen_divt(ctx, ra, rb, rc, fn11);
2234 break;
2235 case 0x24:
2236 /* CMPTUN */
2237 gen_cmptun(ctx, ra, rb, rc, fn11);
2238 break;
2239 case 0x25:
2240 /* CMPTEQ */
2241 gen_cmpteq(ctx, ra, rb, rc, fn11);
2242 break;
2243 case 0x26:
2244 /* CMPTLT */
2245 gen_cmptlt(ctx, ra, rb, rc, fn11);
2246 break;
2247 case 0x27:
2248 /* CMPTLE */
2249 gen_cmptle(ctx, ra, rb, rc, fn11);
2250 break;
2251 case 0x2C:
2252 REQUIRE_REG_31(ra);
2253 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2254 /* CVTST */
2255 gen_cvtst(ctx, rb, rc, fn11);
2256 } else {
2257 /* CVTTS */
2258 gen_cvtts(ctx, rb, rc, fn11);
2260 break;
2261 case 0x2F:
2262 /* CVTTQ */
2263 REQUIRE_REG_31(ra);
2264 gen_cvttq(ctx, rb, rc, fn11);
2265 break;
2266 case 0x3C:
2267 /* CVTQS */
2268 REQUIRE_REG_31(ra);
2269 gen_cvtqs(ctx, rb, rc, fn11);
2270 break;
2271 case 0x3E:
2272 /* CVTQT */
2273 REQUIRE_REG_31(ra);
2274 gen_cvtqt(ctx, rb, rc, fn11);
2275 break;
2276 default:
2277 goto invalid_opc;
2279 break;
2281 case 0x17:
2282 switch (fn11) {
2283 case 0x010:
2284 /* CVTLQ */
2285 REQUIRE_REG_31(ra);
2286 vc = dest_fpr(ctx, rc);
2287 vb = load_fpr(ctx, rb);
2288 gen_cvtlq(vc, vb);
2289 break;
2290 case 0x020:
2291 /* CPYS */
2292 if (rc == 31) {
2293 /* Special case CPYS as FNOP. */
2294 } else {
2295 vc = dest_fpr(ctx, rc);
2296 va = load_fpr(ctx, ra);
2297 if (ra == rb) {
2298 /* Special case CPYS as FMOV. */
2299 tcg_gen_mov_i64(vc, va);
2300 } else {
2301 vb = load_fpr(ctx, rb);
2302 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2305 break;
2306 case 0x021:
2307 /* CPYSN */
2308 vc = dest_fpr(ctx, rc);
2309 vb = load_fpr(ctx, rb);
2310 va = load_fpr(ctx, ra);
2311 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2312 break;
2313 case 0x022:
2314 /* CPYSE */
2315 vc = dest_fpr(ctx, rc);
2316 vb = load_fpr(ctx, rb);
2317 va = load_fpr(ctx, ra);
2318 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2319 break;
2320 case 0x024:
2321 /* MT_FPCR */
2322 va = load_fpr(ctx, ra);
2323 gen_helper_store_fpcr(cpu_env, va);
2324 if (ctx->tb_rm == QUAL_RM_D) {
2325 /* Re-do the copy of the rounding mode to fp_status
2326 the next time we use dynamic rounding. */
2327 ctx->tb_rm = -1;
2329 break;
2330 case 0x025:
2331 /* MF_FPCR */
2332 va = dest_fpr(ctx, ra);
2333 gen_helper_load_fpcr(va, cpu_env);
2334 break;
2335 case 0x02A:
2336 /* FCMOVEQ */
2337 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2338 break;
2339 case 0x02B:
2340 /* FCMOVNE */
2341 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2342 break;
2343 case 0x02C:
2344 /* FCMOVLT */
2345 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2346 break;
2347 case 0x02D:
2348 /* FCMOVGE */
2349 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2350 break;
2351 case 0x02E:
2352 /* FCMOVLE */
2353 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2354 break;
2355 case 0x02F:
2356 /* FCMOVGT */
2357 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2358 break;
2359 case 0x030: /* CVTQL */
2360 case 0x130: /* CVTQL/V */
2361 case 0x530: /* CVTQL/SV */
2362 REQUIRE_REG_31(ra);
2363 vc = dest_fpr(ctx, rc);
2364 vb = load_fpr(ctx, rb);
2365 gen_helper_cvtql(vc, cpu_env, vb);
2366 gen_fp_exc_raise(rc, fn11);
2367 break;
2368 default:
2369 goto invalid_opc;
2371 break;
2373 case 0x18:
2374 switch ((uint16_t)disp16) {
2375 case 0x0000:
2376 /* TRAPB */
2377 /* No-op. */
2378 break;
2379 case 0x0400:
2380 /* EXCB */
2381 /* No-op. */
2382 break;
2383 case 0x4000:
2384 /* MB */
2385 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2386 break;
2387 case 0x4400:
2388 /* WMB */
2389 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2390 break;
2391 case 0x8000:
2392 /* FETCH */
2393 /* No-op */
2394 break;
2395 case 0xA000:
2396 /* FETCH_M */
2397 /* No-op */
2398 break;
2399 case 0xC000:
2400 /* RPCC */
2401 va = dest_gpr(ctx, ra);
2402 if (ctx->base.tb->cflags & CF_USE_ICOUNT) {
2403 gen_io_start();
2404 gen_helper_load_pcc(va, cpu_env);
2405 gen_io_end();
2406 ret = DISAS_PC_STALE;
2407 } else {
2408 gen_helper_load_pcc(va, cpu_env);
2410 break;
2411 case 0xE000:
2412 /* RC */
2413 gen_rx(ctx, ra, 0);
2414 break;
2415 case 0xE800:
2416 /* ECB */
2417 break;
2418 case 0xF000:
2419 /* RS */
2420 gen_rx(ctx, ra, 1);
2421 break;
2422 case 0xF800:
2423 /* WH64 */
2424 /* No-op */
2425 break;
2426 case 0xFC00:
2427 /* WH64EN */
2428 /* No-op */
2429 break;
2430 default:
2431 goto invalid_opc;
2433 break;
2435 case 0x19:
2436 /* HW_MFPR (PALcode) */
2437 #ifndef CONFIG_USER_ONLY
2438 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2439 va = dest_gpr(ctx, ra);
2440 ret = gen_mfpr(ctx, va, insn & 0xffff);
2441 break;
2442 #else
2443 goto invalid_opc;
2444 #endif
2446 case 0x1A:
2447 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2448 prediction stack action, which of course we don't implement. */
2449 vb = load_gpr(ctx, rb);
2450 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2451 if (ra != 31) {
2452 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2454 ret = DISAS_PC_UPDATED;
2455 break;
2457 case 0x1B:
2458 /* HW_LD (PALcode) */
2459 #ifndef CONFIG_USER_ONLY
2460 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2462 TCGv addr = tcg_temp_new();
2463 vb = load_gpr(ctx, rb);
2464 va = dest_gpr(ctx, ra);
2466 tcg_gen_addi_i64(addr, vb, disp12);
2467 switch ((insn >> 12) & 0xF) {
2468 case 0x0:
2469 /* Longword physical access (hw_ldl/p) */
2470 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2471 break;
2472 case 0x1:
2473 /* Quadword physical access (hw_ldq/p) */
2474 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2475 break;
2476 case 0x2:
2477 /* Longword physical access with lock (hw_ldl_l/p) */
2478 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2479 break;
2480 case 0x3:
2481 /* Quadword physical access with lock (hw_ldq_l/p) */
2482 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2483 break;
2484 case 0x4:
2485 /* Longword virtual PTE fetch (hw_ldl/v) */
2486 goto invalid_opc;
2487 case 0x5:
2488 /* Quadword virtual PTE fetch (hw_ldq/v) */
2489 goto invalid_opc;
2490 break;
2491 case 0x6:
2492 /* Invalid */
2493 goto invalid_opc;
2494 case 0x7:
2495 /* Invaliid */
2496 goto invalid_opc;
2497 case 0x8:
2498 /* Longword virtual access (hw_ldl) */
2499 goto invalid_opc;
2500 case 0x9:
2501 /* Quadword virtual access (hw_ldq) */
2502 goto invalid_opc;
2503 case 0xA:
2504 /* Longword virtual access with protection check (hw_ldl/w) */
2505 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2506 break;
2507 case 0xB:
2508 /* Quadword virtual access with protection check (hw_ldq/w) */
2509 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2510 break;
2511 case 0xC:
2512 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2513 goto invalid_opc;
2514 case 0xD:
2515 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2516 goto invalid_opc;
2517 case 0xE:
2518 /* Longword virtual access with alternate access mode and
2519 protection checks (hw_ldl/wa) */
2520 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2521 break;
2522 case 0xF:
2523 /* Quadword virtual access with alternate access mode and
2524 protection checks (hw_ldq/wa) */
2525 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2526 break;
2528 tcg_temp_free(addr);
2529 break;
2531 #else
2532 goto invalid_opc;
2533 #endif
2535 case 0x1C:
2536 vc = dest_gpr(ctx, rc);
2537 if (fn7 == 0x70) {
2538 /* FTOIT */
2539 REQUIRE_AMASK(FIX);
2540 REQUIRE_REG_31(rb);
2541 va = load_fpr(ctx, ra);
2542 tcg_gen_mov_i64(vc, va);
2543 break;
2544 } else if (fn7 == 0x78) {
2545 /* FTOIS */
2546 REQUIRE_AMASK(FIX);
2547 REQUIRE_REG_31(rb);
2548 t32 = tcg_temp_new_i32();
2549 va = load_fpr(ctx, ra);
2550 gen_helper_s_to_memory(t32, va);
2551 tcg_gen_ext_i32_i64(vc, t32);
2552 tcg_temp_free_i32(t32);
2553 break;
2556 vb = load_gpr_lit(ctx, rb, lit, islit);
2557 switch (fn7) {
2558 case 0x00:
2559 /* SEXTB */
2560 REQUIRE_AMASK(BWX);
2561 REQUIRE_REG_31(ra);
2562 tcg_gen_ext8s_i64(vc, vb);
2563 break;
2564 case 0x01:
2565 /* SEXTW */
2566 REQUIRE_AMASK(BWX);
2567 REQUIRE_REG_31(ra);
2568 tcg_gen_ext16s_i64(vc, vb);
2569 break;
2570 case 0x30:
2571 /* CTPOP */
2572 REQUIRE_AMASK(CIX);
2573 REQUIRE_REG_31(ra);
2574 REQUIRE_NO_LIT;
2575 tcg_gen_ctpop_i64(vc, vb);
2576 break;
2577 case 0x31:
2578 /* PERR */
2579 REQUIRE_AMASK(MVI);
2580 REQUIRE_NO_LIT;
2581 va = load_gpr(ctx, ra);
2582 gen_helper_perr(vc, va, vb);
2583 break;
2584 case 0x32:
2585 /* CTLZ */
2586 REQUIRE_AMASK(CIX);
2587 REQUIRE_REG_31(ra);
2588 REQUIRE_NO_LIT;
2589 tcg_gen_clzi_i64(vc, vb, 64);
2590 break;
2591 case 0x33:
2592 /* CTTZ */
2593 REQUIRE_AMASK(CIX);
2594 REQUIRE_REG_31(ra);
2595 REQUIRE_NO_LIT;
2596 tcg_gen_ctzi_i64(vc, vb, 64);
2597 break;
2598 case 0x34:
2599 /* UNPKBW */
2600 REQUIRE_AMASK(MVI);
2601 REQUIRE_REG_31(ra);
2602 REQUIRE_NO_LIT;
2603 gen_helper_unpkbw(vc, vb);
2604 break;
2605 case 0x35:
2606 /* UNPKBL */
2607 REQUIRE_AMASK(MVI);
2608 REQUIRE_REG_31(ra);
2609 REQUIRE_NO_LIT;
2610 gen_helper_unpkbl(vc, vb);
2611 break;
2612 case 0x36:
2613 /* PKWB */
2614 REQUIRE_AMASK(MVI);
2615 REQUIRE_REG_31(ra);
2616 REQUIRE_NO_LIT;
2617 gen_helper_pkwb(vc, vb);
2618 break;
2619 case 0x37:
2620 /* PKLB */
2621 REQUIRE_AMASK(MVI);
2622 REQUIRE_REG_31(ra);
2623 REQUIRE_NO_LIT;
2624 gen_helper_pklb(vc, vb);
2625 break;
2626 case 0x38:
2627 /* MINSB8 */
2628 REQUIRE_AMASK(MVI);
2629 va = load_gpr(ctx, ra);
2630 gen_helper_minsb8(vc, va, vb);
2631 break;
2632 case 0x39:
2633 /* MINSW4 */
2634 REQUIRE_AMASK(MVI);
2635 va = load_gpr(ctx, ra);
2636 gen_helper_minsw4(vc, va, vb);
2637 break;
2638 case 0x3A:
2639 /* MINUB8 */
2640 REQUIRE_AMASK(MVI);
2641 va = load_gpr(ctx, ra);
2642 gen_helper_minub8(vc, va, vb);
2643 break;
2644 case 0x3B:
2645 /* MINUW4 */
2646 REQUIRE_AMASK(MVI);
2647 va = load_gpr(ctx, ra);
2648 gen_helper_minuw4(vc, va, vb);
2649 break;
2650 case 0x3C:
2651 /* MAXUB8 */
2652 REQUIRE_AMASK(MVI);
2653 va = load_gpr(ctx, ra);
2654 gen_helper_maxub8(vc, va, vb);
2655 break;
2656 case 0x3D:
2657 /* MAXUW4 */
2658 REQUIRE_AMASK(MVI);
2659 va = load_gpr(ctx, ra);
2660 gen_helper_maxuw4(vc, va, vb);
2661 break;
2662 case 0x3E:
2663 /* MAXSB8 */
2664 REQUIRE_AMASK(MVI);
2665 va = load_gpr(ctx, ra);
2666 gen_helper_maxsb8(vc, va, vb);
2667 break;
2668 case 0x3F:
2669 /* MAXSW4 */
2670 REQUIRE_AMASK(MVI);
2671 va = load_gpr(ctx, ra);
2672 gen_helper_maxsw4(vc, va, vb);
2673 break;
2674 default:
2675 goto invalid_opc;
2677 break;
2679 case 0x1D:
2680 /* HW_MTPR (PALcode) */
2681 #ifndef CONFIG_USER_ONLY
2682 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2683 vb = load_gpr(ctx, rb);
2684 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2685 break;
2686 #else
2687 goto invalid_opc;
2688 #endif
2690 case 0x1E:
2691 /* HW_RET (PALcode) */
2692 #ifndef CONFIG_USER_ONLY
2693 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2694 if (rb == 31) {
2695 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2696 address from EXC_ADDR. This turns out to be useful for our
2697 emulation PALcode, so continue to accept it. */
2698 ctx->lit = vb = tcg_temp_new();
2699 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2700 } else {
2701 vb = load_gpr(ctx, rb);
2703 tcg_gen_movi_i64(cpu_lock_addr, -1);
2704 tmp = tcg_temp_new();
2705 tcg_gen_movi_i64(tmp, 0);
2706 st_flag_byte(tmp, ENV_FLAG_RX_SHIFT);
2707 tcg_gen_andi_i64(tmp, vb, 1);
2708 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2709 tcg_temp_free(tmp);
2710 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2711 /* Allow interrupts to be recognized right away. */
2712 ret = DISAS_PC_UPDATED_NOCHAIN;
2713 break;
2714 #else
2715 goto invalid_opc;
2716 #endif
2718 case 0x1F:
2719 /* HW_ST (PALcode) */
2720 #ifndef CONFIG_USER_ONLY
2721 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2723 switch ((insn >> 12) & 0xF) {
2724 case 0x0:
2725 /* Longword physical access */
2726 va = load_gpr(ctx, ra);
2727 vb = load_gpr(ctx, rb);
2728 tmp = tcg_temp_new();
2729 tcg_gen_addi_i64(tmp, vb, disp12);
2730 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2731 tcg_temp_free(tmp);
2732 break;
2733 case 0x1:
2734 /* Quadword physical access */
2735 va = load_gpr(ctx, ra);
2736 vb = load_gpr(ctx, rb);
2737 tmp = tcg_temp_new();
2738 tcg_gen_addi_i64(tmp, vb, disp12);
2739 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2740 tcg_temp_free(tmp);
2741 break;
2742 case 0x2:
2743 /* Longword physical access with lock */
2744 ret = gen_store_conditional(ctx, ra, rb, disp12,
2745 MMU_PHYS_IDX, MO_LESL);
2746 break;
2747 case 0x3:
2748 /* Quadword physical access with lock */
2749 ret = gen_store_conditional(ctx, ra, rb, disp12,
2750 MMU_PHYS_IDX, MO_LEQ);
2751 break;
2752 case 0x4:
2753 /* Longword virtual access */
2754 goto invalid_opc;
2755 case 0x5:
2756 /* Quadword virtual access */
2757 goto invalid_opc;
2758 case 0x6:
2759 /* Invalid */
2760 goto invalid_opc;
2761 case 0x7:
2762 /* Invalid */
2763 goto invalid_opc;
2764 case 0x8:
2765 /* Invalid */
2766 goto invalid_opc;
2767 case 0x9:
2768 /* Invalid */
2769 goto invalid_opc;
2770 case 0xA:
2771 /* Invalid */
2772 goto invalid_opc;
2773 case 0xB:
2774 /* Invalid */
2775 goto invalid_opc;
2776 case 0xC:
2777 /* Longword virtual access with alternate access mode */
2778 goto invalid_opc;
2779 case 0xD:
2780 /* Quadword virtual access with alternate access mode */
2781 goto invalid_opc;
2782 case 0xE:
2783 /* Invalid */
2784 goto invalid_opc;
2785 case 0xF:
2786 /* Invalid */
2787 goto invalid_opc;
2789 break;
2791 #else
2792 goto invalid_opc;
2793 #endif
2794 case 0x20:
2795 /* LDF */
2796 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2797 break;
2798 case 0x21:
2799 /* LDG */
2800 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2801 break;
2802 case 0x22:
2803 /* LDS */
2804 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2805 break;
2806 case 0x23:
2807 /* LDT */
2808 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2809 break;
2810 case 0x24:
2811 /* STF */
2812 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2813 break;
2814 case 0x25:
2815 /* STG */
2816 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2817 break;
2818 case 0x26:
2819 /* STS */
2820 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2821 break;
2822 case 0x27:
2823 /* STT */
2824 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2825 break;
2826 case 0x28:
2827 /* LDL */
2828 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2829 break;
2830 case 0x29:
2831 /* LDQ */
2832 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2833 break;
2834 case 0x2A:
2835 /* LDL_L */
2836 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2837 break;
2838 case 0x2B:
2839 /* LDQ_L */
2840 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2841 break;
2842 case 0x2C:
2843 /* STL */
2844 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2845 break;
2846 case 0x2D:
2847 /* STQ */
2848 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2849 break;
2850 case 0x2E:
2851 /* STL_C */
2852 ret = gen_store_conditional(ctx, ra, rb, disp16,
2853 ctx->mem_idx, MO_LESL);
2854 break;
2855 case 0x2F:
2856 /* STQ_C */
2857 ret = gen_store_conditional(ctx, ra, rb, disp16,
2858 ctx->mem_idx, MO_LEQ);
2859 break;
2860 case 0x30:
2861 /* BR */
2862 ret = gen_bdirect(ctx, ra, disp21);
2863 break;
2864 case 0x31: /* FBEQ */
2865 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2866 break;
2867 case 0x32: /* FBLT */
2868 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2869 break;
2870 case 0x33: /* FBLE */
2871 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2872 break;
2873 case 0x34:
2874 /* BSR */
2875 ret = gen_bdirect(ctx, ra, disp21);
2876 break;
2877 case 0x35: /* FBNE */
2878 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2879 break;
2880 case 0x36: /* FBGE */
2881 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2882 break;
2883 case 0x37: /* FBGT */
2884 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2885 break;
2886 case 0x38:
2887 /* BLBC */
2888 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2889 break;
2890 case 0x39:
2891 /* BEQ */
2892 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2893 break;
2894 case 0x3A:
2895 /* BLT */
2896 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2897 break;
2898 case 0x3B:
2899 /* BLE */
2900 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2901 break;
2902 case 0x3C:
2903 /* BLBS */
2904 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2905 break;
2906 case 0x3D:
2907 /* BNE */
2908 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2909 break;
2910 case 0x3E:
2911 /* BGE */
2912 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2913 break;
2914 case 0x3F:
2915 /* BGT */
2916 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2917 break;
2918 invalid_opc:
2919 ret = gen_invalid(ctx);
2920 break;
2923 return ret;
2926 static int alpha_tr_init_disas_context(DisasContextBase *dcbase,
2927 CPUState *cpu, int max_insns)
2929 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2930 CPUAlphaState *env = cpu->env_ptr;
2931 int64_t bound, mask;
2933 ctx->tbflags = ctx->base.tb->flags;
2934 ctx->mem_idx = cpu_mmu_index(env, false);
2935 ctx->implver = env->implver;
2936 ctx->amask = env->amask;
2938 #ifdef CONFIG_USER_ONLY
2939 ctx->ir = cpu_std_ir;
2940 #else
2941 ctx->palbr = env->palbr;
2942 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2943 #endif
2945 /* ??? Every TB begins with unset rounding mode, to be initialized on
2946 the first fp insn of the TB. Alternately we could define a proper
2947 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2948 to reset the FP_STATUS to that default at the end of any TB that
2949 changes the default. We could even (gasp) dynamiclly figure out
2950 what default would be most efficient given the running program. */
2951 ctx->tb_rm = -1;
2952 /* Similarly for flush-to-zero. */
2953 ctx->tb_ftz = -1;
2955 TCGV_UNUSED_I64(ctx->zero);
2956 TCGV_UNUSED_I64(ctx->sink);
2957 TCGV_UNUSED_I64(ctx->lit);
2959 /* Bound the number of insns to execute to those left on the page. */
2960 if (in_superpage(ctx, ctx->base.pc_first)) {
2961 mask = -1ULL << 41;
2962 } else {
2963 mask = TARGET_PAGE_MASK;
2965 bound = -(ctx->base.pc_first | mask) / 4;
2967 return MIN(max_insns, bound);
2970 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2974 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2976 tcg_gen_insn_start(dcbase->pc_next);
2979 static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
2980 const CPUBreakpoint *bp)
2982 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2984 ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0);
2986 /* The address covered by the breakpoint must be included in
2987 [tb->pc, tb->pc + tb->size) in order to for it to be
2988 properly cleared -- thus we increment the PC here so that
2989 the logic setting tb->size below does the right thing. */
2990 ctx->base.pc_next += 4;
2991 return true;
2994 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2996 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2997 CPUAlphaState *env = cpu->env_ptr;
2998 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
3000 ctx->base.pc_next += 4;
3001 ctx->base.is_jmp = translate_one(ctx, insn);
3003 free_context_temps(ctx);
3004 translator_loop_temp_check(&ctx->base);
3007 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3009 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3011 switch (ctx->base.is_jmp) {
3012 case DISAS_NORETURN:
3013 break;
3014 case DISAS_TOO_MANY:
3015 if (use_goto_tb(ctx, ctx->base.pc_next)) {
3016 tcg_gen_goto_tb(0);
3017 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3018 tcg_gen_exit_tb((uintptr_t)ctx->base.tb);
3020 /* FALLTHRU */
3021 case DISAS_PC_STALE:
3022 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3023 /* FALLTHRU */
3024 case DISAS_PC_UPDATED:
3025 if (!use_exit_tb(ctx)) {
3026 tcg_gen_lookup_and_goto_ptr();
3027 break;
3029 /* FALLTHRU */
3030 case DISAS_PC_UPDATED_NOCHAIN:
3031 if (ctx->base.singlestep_enabled) {
3032 gen_excp_1(EXCP_DEBUG, 0);
3033 } else {
3034 tcg_gen_exit_tb(0);
3036 break;
3037 default:
3038 g_assert_not_reached();
3042 static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
3044 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
3045 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size, 1);
3048 static const TranslatorOps alpha_tr_ops = {
3049 .init_disas_context = alpha_tr_init_disas_context,
3050 .tb_start = alpha_tr_tb_start,
3051 .insn_start = alpha_tr_insn_start,
3052 .breakpoint_check = alpha_tr_breakpoint_check,
3053 .translate_insn = alpha_tr_translate_insn,
3054 .tb_stop = alpha_tr_tb_stop,
3055 .disas_log = alpha_tr_disas_log,
3058 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
3060 DisasContext dc;
3061 translator_loop(&alpha_tr_ops, &dc.base, cpu, tb);
3064 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3065 target_ulong *data)
3067 env->pc = data[0];