Merge remote-tracking branch 'remotes/armbru/tags/pull-qapi-2021-09-13' into staging
[qemu.git] / target / alpha / translate.c
blobde6c0a8439f854fe5d57642be9522ec9b99ac94c
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "sysemu/cpu-timers.h"
24 #include "disas/disas.h"
25 #include "qemu/host-utils.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "exec/translator.h"
32 #include "exec/log.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 DisasContextBase base;
48 #ifndef CONFIG_USER_ONLY
49 uint64_t palbr;
50 #endif
51 uint32_t tbflags;
52 int mem_idx;
54 /* implver and amask values for this CPU. */
55 int implver;
56 int amask;
58 /* Current rounding mode for this TB. */
59 int tb_rm;
60 /* Current flush-to-zero setting for this TB. */
61 int tb_ftz;
63 /* The set of registers active in the current context. */
64 TCGv *ir;
66 /* Temporaries for $31 and $f31 as source and destination. */
67 TCGv zero;
68 TCGv sink;
71 /* Target-specific return values from translate_one, indicating the
72 state of the TB. Note that DISAS_NEXT indicates that we are not
73 exiting the TB. */
74 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
75 #define DISAS_PC_UPDATED DISAS_TARGET_1
76 #define DISAS_PC_STALE DISAS_TARGET_2
78 /* global register indexes */
79 static TCGv cpu_std_ir[31];
80 static TCGv cpu_fir[31];
81 static TCGv cpu_pc;
82 static TCGv cpu_lock_addr;
83 static TCGv cpu_lock_value;
85 #ifndef CONFIG_USER_ONLY
86 static TCGv cpu_pal_ir[31];
87 #endif
89 #include "exec/gen-icount.h"
91 void alpha_translate_init(void)
93 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
95 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
96 static const GlobalVar vars[] = {
97 DEF_VAR(pc),
98 DEF_VAR(lock_addr),
99 DEF_VAR(lock_value),
102 #undef DEF_VAR
104 /* Use the symbolic register names that match the disassembler. */
105 static const char greg_names[31][4] = {
106 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
107 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
108 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
109 "t10", "t11", "ra", "t12", "at", "gp", "sp"
111 static const char freg_names[31][4] = {
112 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
113 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
114 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
115 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
117 #ifndef CONFIG_USER_ONLY
118 static const char shadow_names[8][8] = {
119 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
120 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
122 #endif
124 int i;
126 for (i = 0; i < 31; i++) {
127 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
128 offsetof(CPUAlphaState, ir[i]),
129 greg_names[i]);
132 for (i = 0; i < 31; i++) {
133 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
134 offsetof(CPUAlphaState, fir[i]),
135 freg_names[i]);
138 #ifndef CONFIG_USER_ONLY
139 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
140 for (i = 0; i < 8; i++) {
141 int r = (i == 7 ? 25 : i + 8);
142 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
143 offsetof(CPUAlphaState,
144 shadow[i]),
145 shadow_names[i]);
147 #endif
149 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
150 const GlobalVar *v = &vars[i];
151 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
155 static TCGv load_zero(DisasContext *ctx)
157 if (!ctx->zero) {
158 ctx->zero = tcg_constant_i64(0);
160 return ctx->zero;
163 static TCGv dest_sink(DisasContext *ctx)
165 if (!ctx->sink) {
166 ctx->sink = tcg_temp_new();
168 return ctx->sink;
171 static void free_context_temps(DisasContext *ctx)
173 if (ctx->sink) {
174 tcg_gen_discard_i64(ctx->sink);
175 tcg_temp_free(ctx->sink);
176 ctx->sink = NULL;
180 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
182 if (likely(reg < 31)) {
183 return ctx->ir[reg];
184 } else {
185 return load_zero(ctx);
189 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
190 uint8_t lit, bool islit)
192 if (islit) {
193 return tcg_constant_i64(lit);
194 } else if (likely(reg < 31)) {
195 return ctx->ir[reg];
196 } else {
197 return load_zero(ctx);
201 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
203 if (likely(reg < 31)) {
204 return ctx->ir[reg];
205 } else {
206 return dest_sink(ctx);
210 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
212 if (likely(reg < 31)) {
213 return cpu_fir[reg];
214 } else {
215 return load_zero(ctx);
219 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
221 if (likely(reg < 31)) {
222 return cpu_fir[reg];
223 } else {
224 return dest_sink(ctx);
228 static int get_flag_ofs(unsigned shift)
230 int ofs = offsetof(CPUAlphaState, flags);
231 #ifdef HOST_WORDS_BIGENDIAN
232 ofs += 3 - (shift / 8);
233 #else
234 ofs += shift / 8;
235 #endif
236 return ofs;
239 static void ld_flag_byte(TCGv val, unsigned shift)
241 tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
244 static void st_flag_byte(TCGv val, unsigned shift)
246 tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
249 static void gen_excp_1(int exception, int error_code)
251 TCGv_i32 tmp1, tmp2;
253 tmp1 = tcg_constant_i32(exception);
254 tmp2 = tcg_constant_i32(error_code);
255 gen_helper_excp(cpu_env, tmp1, tmp2);
258 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
260 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
261 gen_excp_1(exception, error_code);
262 return DISAS_NORETURN;
265 static inline DisasJumpType gen_invalid(DisasContext *ctx)
267 return gen_excp(ctx, EXCP_OPCDEC, 0);
270 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
272 TCGv_i32 tmp32 = tcg_temp_new_i32();
273 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
274 gen_helper_memory_to_f(t0, tmp32);
275 tcg_temp_free_i32(tmp32);
278 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
280 TCGv tmp = tcg_temp_new();
281 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
282 gen_helper_memory_to_g(t0, tmp);
283 tcg_temp_free(tmp);
286 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
288 TCGv_i32 tmp32 = tcg_temp_new_i32();
289 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
290 gen_helper_memory_to_s(t0, tmp32);
291 tcg_temp_free_i32(tmp32);
294 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
296 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
297 tcg_gen_mov_i64(cpu_lock_addr, t1);
298 tcg_gen_mov_i64(cpu_lock_value, t0);
301 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
303 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
304 tcg_gen_mov_i64(cpu_lock_addr, t1);
305 tcg_gen_mov_i64(cpu_lock_value, t0);
308 static inline void gen_load_mem(DisasContext *ctx,
309 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
310 int flags),
311 int ra, int rb, int32_t disp16, bool fp,
312 bool clear)
314 TCGv tmp, addr, va;
316 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
317 prefetches, which we can treat as nops. No worries about
318 missed exceptions here. */
319 if (unlikely(ra == 31)) {
320 return;
323 tmp = tcg_temp_new();
324 addr = load_gpr(ctx, rb);
326 if (disp16) {
327 tcg_gen_addi_i64(tmp, addr, disp16);
328 addr = tmp;
330 if (clear) {
331 tcg_gen_andi_i64(tmp, addr, ~0x7);
332 addr = tmp;
335 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
336 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
338 tcg_temp_free(tmp);
341 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
343 TCGv_i32 tmp32 = tcg_temp_new_i32();
344 gen_helper_f_to_memory(tmp32, t0);
345 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
346 tcg_temp_free_i32(tmp32);
349 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
351 TCGv tmp = tcg_temp_new();
352 gen_helper_g_to_memory(tmp, t0);
353 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
354 tcg_temp_free(tmp);
357 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
359 TCGv_i32 tmp32 = tcg_temp_new_i32();
360 gen_helper_s_to_memory(tmp32, t0);
361 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
362 tcg_temp_free_i32(tmp32);
365 static inline void gen_store_mem(DisasContext *ctx,
366 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
367 int flags),
368 int ra, int rb, int32_t disp16, bool fp,
369 bool clear)
371 TCGv tmp, addr, va;
373 tmp = tcg_temp_new();
374 addr = load_gpr(ctx, rb);
376 if (disp16) {
377 tcg_gen_addi_i64(tmp, addr, disp16);
378 addr = tmp;
380 if (clear) {
381 tcg_gen_andi_i64(tmp, addr, ~0x7);
382 addr = tmp;
385 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
386 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
388 tcg_temp_free(tmp);
391 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
392 int32_t disp16, int mem_idx,
393 MemOp op)
395 TCGLabel *lab_fail, *lab_done;
396 TCGv addr, val;
398 addr = tcg_temp_new_i64();
399 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
400 free_context_temps(ctx);
402 lab_fail = gen_new_label();
403 lab_done = gen_new_label();
404 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
405 tcg_temp_free_i64(addr);
407 val = tcg_temp_new_i64();
408 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
409 load_gpr(ctx, ra), mem_idx, op);
410 free_context_temps(ctx);
412 if (ra != 31) {
413 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
415 tcg_temp_free_i64(val);
416 tcg_gen_br(lab_done);
418 gen_set_label(lab_fail);
419 if (ra != 31) {
420 tcg_gen_movi_i64(ctx->ir[ra], 0);
423 gen_set_label(lab_done);
424 tcg_gen_movi_i64(cpu_lock_addr, -1);
425 return DISAS_NEXT;
428 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
430 return translator_use_goto_tb(&ctx->base, dest);
433 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
435 uint64_t dest = ctx->base.pc_next + (disp << 2);
437 if (ra != 31) {
438 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
441 /* Notice branch-to-next; used to initialize RA with the PC. */
442 if (disp == 0) {
443 return 0;
444 } else if (use_goto_tb(ctx, dest)) {
445 tcg_gen_goto_tb(0);
446 tcg_gen_movi_i64(cpu_pc, dest);
447 tcg_gen_exit_tb(ctx->base.tb, 0);
448 return DISAS_NORETURN;
449 } else {
450 tcg_gen_movi_i64(cpu_pc, dest);
451 return DISAS_PC_UPDATED;
455 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
456 TCGv cmp, int32_t disp)
458 uint64_t dest = ctx->base.pc_next + (disp << 2);
459 TCGLabel *lab_true = gen_new_label();
461 if (use_goto_tb(ctx, dest)) {
462 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
464 tcg_gen_goto_tb(0);
465 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
466 tcg_gen_exit_tb(ctx->base.tb, 0);
468 gen_set_label(lab_true);
469 tcg_gen_goto_tb(1);
470 tcg_gen_movi_i64(cpu_pc, dest);
471 tcg_gen_exit_tb(ctx->base.tb, 1);
473 return DISAS_NORETURN;
474 } else {
475 TCGv_i64 z = load_zero(ctx);
476 TCGv_i64 d = tcg_constant_i64(dest);
477 TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);
479 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
480 return DISAS_PC_UPDATED;
484 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
485 int32_t disp, int mask)
487 if (mask) {
488 TCGv tmp = tcg_temp_new();
489 DisasJumpType ret;
491 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
492 ret = gen_bcond_internal(ctx, cond, tmp, disp);
493 tcg_temp_free(tmp);
494 return ret;
496 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
499 /* Fold -0.0 for comparison with COND. */
501 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
503 uint64_t mzero = 1ull << 63;
505 switch (cond) {
506 case TCG_COND_LE:
507 case TCG_COND_GT:
508 /* For <= or >, the -0.0 value directly compares the way we want. */
509 tcg_gen_mov_i64(dest, src);
510 break;
512 case TCG_COND_EQ:
513 case TCG_COND_NE:
514 /* For == or !=, we can simply mask off the sign bit and compare. */
515 tcg_gen_andi_i64(dest, src, mzero - 1);
516 break;
518 case TCG_COND_GE:
519 case TCG_COND_LT:
520 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
521 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
522 tcg_gen_neg_i64(dest, dest);
523 tcg_gen_and_i64(dest, dest, src);
524 break;
526 default:
527 abort();
531 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
532 int32_t disp)
534 TCGv cmp_tmp = tcg_temp_new();
535 DisasJumpType ret;
537 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
538 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
539 tcg_temp_free(cmp_tmp);
540 return ret;
543 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
545 TCGv_i64 va, vb, z;
547 z = load_zero(ctx);
548 vb = load_fpr(ctx, rb);
549 va = tcg_temp_new();
550 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
552 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
554 tcg_temp_free(va);
557 #define QUAL_RM_N 0x080 /* Round mode nearest even */
558 #define QUAL_RM_C 0x000 /* Round mode chopped */
559 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
560 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
561 #define QUAL_RM_MASK 0x0c0
563 #define QUAL_U 0x100 /* Underflow enable (fp output) */
564 #define QUAL_V 0x100 /* Overflow enable (int output) */
565 #define QUAL_S 0x400 /* Software completion enable */
566 #define QUAL_I 0x200 /* Inexact detection enable */
568 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
570 TCGv_i32 tmp;
572 fn11 &= QUAL_RM_MASK;
573 if (fn11 == ctx->tb_rm) {
574 return;
576 ctx->tb_rm = fn11;
578 tmp = tcg_temp_new_i32();
579 switch (fn11) {
580 case QUAL_RM_N:
581 tcg_gen_movi_i32(tmp, float_round_nearest_even);
582 break;
583 case QUAL_RM_C:
584 tcg_gen_movi_i32(tmp, float_round_to_zero);
585 break;
586 case QUAL_RM_M:
587 tcg_gen_movi_i32(tmp, float_round_down);
588 break;
589 case QUAL_RM_D:
590 tcg_gen_ld8u_i32(tmp, cpu_env,
591 offsetof(CPUAlphaState, fpcr_dyn_round));
592 break;
595 #if defined(CONFIG_SOFTFLOAT_INLINE)
596 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
597 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
598 sets the one field. */
599 tcg_gen_st8_i32(tmp, cpu_env,
600 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
601 #else
602 gen_helper_setroundmode(tmp);
603 #endif
605 tcg_temp_free_i32(tmp);
608 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
610 TCGv_i32 tmp;
612 fn11 &= QUAL_U;
613 if (fn11 == ctx->tb_ftz) {
614 return;
616 ctx->tb_ftz = fn11;
618 tmp = tcg_temp_new_i32();
619 if (fn11) {
620 /* Underflow is enabled, use the FPCR setting. */
621 tcg_gen_ld8u_i32(tmp, cpu_env,
622 offsetof(CPUAlphaState, fpcr_flush_to_zero));
623 } else {
624 /* Underflow is disabled, force flush-to-zero. */
625 tcg_gen_movi_i32(tmp, 1);
628 #if defined(CONFIG_SOFTFLOAT_INLINE)
629 tcg_gen_st8_i32(tmp, cpu_env,
630 offsetof(CPUAlphaState, fp_status.flush_to_zero));
631 #else
632 gen_helper_setflushzero(tmp);
633 #endif
635 tcg_temp_free_i32(tmp);
638 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
640 TCGv val;
642 if (unlikely(reg == 31)) {
643 val = load_zero(ctx);
644 } else {
645 val = cpu_fir[reg];
646 if ((fn11 & QUAL_S) == 0) {
647 if (is_cmp) {
648 gen_helper_ieee_input_cmp(cpu_env, val);
649 } else {
650 gen_helper_ieee_input(cpu_env, val);
652 } else {
653 #ifndef CONFIG_USER_ONLY
654 /* In system mode, raise exceptions for denormals like real
655 hardware. In user mode, proceed as if the OS completion
656 handler is handling the denormal as per spec. */
657 gen_helper_ieee_input_s(cpu_env, val);
658 #endif
661 return val;
664 static void gen_fp_exc_raise(int rc, int fn11)
666 /* ??? We ought to be able to do something with imprecise exceptions.
667 E.g. notice we're still in the trap shadow of something within the
668 TB and do not generate the code to signal the exception; end the TB
669 when an exception is forced to arrive, either by consumption of a
670 register value or TRAPB or EXCB. */
671 TCGv_i32 reg, ign;
672 uint32_t ignore = 0;
674 if (!(fn11 & QUAL_U)) {
675 /* Note that QUAL_U == QUAL_V, so ignore either. */
676 ignore |= FPCR_UNF | FPCR_IOV;
678 if (!(fn11 & QUAL_I)) {
679 ignore |= FPCR_INE;
681 ign = tcg_constant_i32(ignore);
683 /* ??? Pass in the regno of the destination so that the helper can
684 set EXC_MASK, which contains a bitmask of destination registers
685 that have caused arithmetic traps. A simple userspace emulation
686 does not require this. We do need it for a guest kernel's entArith,
687 or if we were to do something clever with imprecise exceptions. */
688 reg = tcg_constant_i32(rc + 32);
689 if (fn11 & QUAL_S) {
690 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
691 } else {
692 gen_helper_fp_exc_raise(cpu_env, ign, reg);
696 static void gen_cvtlq(TCGv vc, TCGv vb)
698 TCGv tmp = tcg_temp_new();
700 /* The arithmetic right shift here, plus the sign-extended mask below
701 yields a sign-extended result without an explicit ext32s_i64. */
702 tcg_gen_shri_i64(tmp, vb, 29);
703 tcg_gen_sari_i64(vc, vb, 32);
704 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
706 tcg_temp_free(tmp);
709 static void gen_ieee_arith2(DisasContext *ctx,
710 void (*helper)(TCGv, TCGv_ptr, TCGv),
711 int rb, int rc, int fn11)
713 TCGv vb;
715 gen_qual_roundmode(ctx, fn11);
716 gen_qual_flushzero(ctx, fn11);
718 vb = gen_ieee_input(ctx, rb, fn11, 0);
719 helper(dest_fpr(ctx, rc), cpu_env, vb);
721 gen_fp_exc_raise(rc, fn11);
724 #define IEEE_ARITH2(name) \
725 static inline void glue(gen_, name)(DisasContext *ctx, \
726 int rb, int rc, int fn11) \
728 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
730 IEEE_ARITH2(sqrts)
731 IEEE_ARITH2(sqrtt)
732 IEEE_ARITH2(cvtst)
733 IEEE_ARITH2(cvtts)
735 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
737 TCGv vb, vc;
739 /* No need to set flushzero, since we have an integer output. */
740 vb = gen_ieee_input(ctx, rb, fn11, 0);
741 vc = dest_fpr(ctx, rc);
743 /* Almost all integer conversions use cropped rounding;
744 special case that. */
745 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
746 gen_helper_cvttq_c(vc, cpu_env, vb);
747 } else {
748 gen_qual_roundmode(ctx, fn11);
749 gen_helper_cvttq(vc, cpu_env, vb);
751 gen_fp_exc_raise(rc, fn11);
754 static void gen_ieee_intcvt(DisasContext *ctx,
755 void (*helper)(TCGv, TCGv_ptr, TCGv),
756 int rb, int rc, int fn11)
758 TCGv vb, vc;
760 gen_qual_roundmode(ctx, fn11);
761 vb = load_fpr(ctx, rb);
762 vc = dest_fpr(ctx, rc);
764 /* The only exception that can be raised by integer conversion
765 is inexact. Thus we only need to worry about exceptions when
766 inexact handling is requested. */
767 if (fn11 & QUAL_I) {
768 helper(vc, cpu_env, vb);
769 gen_fp_exc_raise(rc, fn11);
770 } else {
771 helper(vc, cpu_env, vb);
775 #define IEEE_INTCVT(name) \
776 static inline void glue(gen_, name)(DisasContext *ctx, \
777 int rb, int rc, int fn11) \
779 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
781 IEEE_INTCVT(cvtqs)
782 IEEE_INTCVT(cvtqt)
784 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
786 TCGv vmask = tcg_constant_i64(mask);
787 TCGv tmp = tcg_temp_new_i64();
789 if (inv_a) {
790 tcg_gen_andc_i64(tmp, vmask, va);
791 } else {
792 tcg_gen_and_i64(tmp, va, vmask);
795 tcg_gen_andc_i64(vc, vb, vmask);
796 tcg_gen_or_i64(vc, vc, tmp);
798 tcg_temp_free(tmp);
801 static void gen_ieee_arith3(DisasContext *ctx,
802 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
803 int ra, int rb, int rc, int fn11)
805 TCGv va, vb, vc;
807 gen_qual_roundmode(ctx, fn11);
808 gen_qual_flushzero(ctx, fn11);
810 va = gen_ieee_input(ctx, ra, fn11, 0);
811 vb = gen_ieee_input(ctx, rb, fn11, 0);
812 vc = dest_fpr(ctx, rc);
813 helper(vc, cpu_env, va, vb);
815 gen_fp_exc_raise(rc, fn11);
818 #define IEEE_ARITH3(name) \
819 static inline void glue(gen_, name)(DisasContext *ctx, \
820 int ra, int rb, int rc, int fn11) \
822 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
824 IEEE_ARITH3(adds)
825 IEEE_ARITH3(subs)
826 IEEE_ARITH3(muls)
827 IEEE_ARITH3(divs)
828 IEEE_ARITH3(addt)
829 IEEE_ARITH3(subt)
830 IEEE_ARITH3(mult)
831 IEEE_ARITH3(divt)
833 static void gen_ieee_compare(DisasContext *ctx,
834 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
835 int ra, int rb, int rc, int fn11)
837 TCGv va, vb, vc;
839 va = gen_ieee_input(ctx, ra, fn11, 1);
840 vb = gen_ieee_input(ctx, rb, fn11, 1);
841 vc = dest_fpr(ctx, rc);
842 helper(vc, cpu_env, va, vb);
844 gen_fp_exc_raise(rc, fn11);
847 #define IEEE_CMP3(name) \
848 static inline void glue(gen_, name)(DisasContext *ctx, \
849 int ra, int rb, int rc, int fn11) \
851 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
853 IEEE_CMP3(cmptun)
854 IEEE_CMP3(cmpteq)
855 IEEE_CMP3(cmptlt)
856 IEEE_CMP3(cmptle)
858 static inline uint64_t zapnot_mask(uint8_t lit)
860 uint64_t mask = 0;
861 int i;
863 for (i = 0; i < 8; ++i) {
864 if ((lit >> i) & 1) {
865 mask |= 0xffull << (i * 8);
868 return mask;
871 /* Implement zapnot with an immediate operand, which expands to some
872 form of immediate AND. This is a basic building block in the
873 definition of many of the other byte manipulation instructions. */
874 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
876 switch (lit) {
877 case 0x00:
878 tcg_gen_movi_i64(dest, 0);
879 break;
880 case 0x01:
881 tcg_gen_ext8u_i64(dest, src);
882 break;
883 case 0x03:
884 tcg_gen_ext16u_i64(dest, src);
885 break;
886 case 0x0f:
887 tcg_gen_ext32u_i64(dest, src);
888 break;
889 case 0xff:
890 tcg_gen_mov_i64(dest, src);
891 break;
892 default:
893 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
894 break;
898 /* EXTWH, EXTLH, EXTQH */
899 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
900 uint8_t lit, uint8_t byte_mask)
902 if (islit) {
903 int pos = (64 - lit * 8) & 0x3f;
904 int len = cto32(byte_mask) * 8;
905 if (pos < len) {
906 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
907 } else {
908 tcg_gen_movi_i64(vc, 0);
910 } else {
911 TCGv tmp = tcg_temp_new();
912 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
913 tcg_gen_neg_i64(tmp, tmp);
914 tcg_gen_andi_i64(tmp, tmp, 0x3f);
915 tcg_gen_shl_i64(vc, va, tmp);
916 tcg_temp_free(tmp);
918 gen_zapnoti(vc, vc, byte_mask);
921 /* EXTBL, EXTWL, EXTLL, EXTQL */
922 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
923 uint8_t lit, uint8_t byte_mask)
925 if (islit) {
926 int pos = (lit & 7) * 8;
927 int len = cto32(byte_mask) * 8;
928 if (pos + len >= 64) {
929 len = 64 - pos;
931 tcg_gen_extract_i64(vc, va, pos, len);
932 } else {
933 TCGv tmp = tcg_temp_new();
934 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
935 tcg_gen_shli_i64(tmp, tmp, 3);
936 tcg_gen_shr_i64(vc, va, tmp);
937 tcg_temp_free(tmp);
938 gen_zapnoti(vc, vc, byte_mask);
942 /* INSWH, INSLH, INSQH */
943 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
944 uint8_t lit, uint8_t byte_mask)
946 if (islit) {
947 int pos = 64 - (lit & 7) * 8;
948 int len = cto32(byte_mask) * 8;
949 if (pos < len) {
950 tcg_gen_extract_i64(vc, va, pos, len - pos);
951 } else {
952 tcg_gen_movi_i64(vc, 0);
954 } else {
955 TCGv tmp = tcg_temp_new();
956 TCGv shift = tcg_temp_new();
958 /* The instruction description has us left-shift the byte mask
959 and extract bits <15:8> and apply that zap at the end. This
960 is equivalent to simply performing the zap first and shifting
961 afterward. */
962 gen_zapnoti(tmp, va, byte_mask);
964 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
965 portably by splitting the shift into two parts: shift_count-1 and 1.
966 Arrange for the -1 by using ones-complement instead of
967 twos-complement in the negation: ~(B * 8) & 63. */
969 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
970 tcg_gen_not_i64(shift, shift);
971 tcg_gen_andi_i64(shift, shift, 0x3f);
973 tcg_gen_shr_i64(vc, tmp, shift);
974 tcg_gen_shri_i64(vc, vc, 1);
975 tcg_temp_free(shift);
976 tcg_temp_free(tmp);
980 /* INSBL, INSWL, INSLL, INSQL */
981 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
982 uint8_t lit, uint8_t byte_mask)
984 if (islit) {
985 int pos = (lit & 7) * 8;
986 int len = cto32(byte_mask) * 8;
987 if (pos + len > 64) {
988 len = 64 - pos;
990 tcg_gen_deposit_z_i64(vc, va, pos, len);
991 } else {
992 TCGv tmp = tcg_temp_new();
993 TCGv shift = tcg_temp_new();
995 /* The instruction description has us left-shift the byte mask
996 and extract bits <15:8> and apply that zap at the end. This
997 is equivalent to simply performing the zap first and shifting
998 afterward. */
999 gen_zapnoti(tmp, va, byte_mask);
1001 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1002 tcg_gen_shli_i64(shift, shift, 3);
1003 tcg_gen_shl_i64(vc, tmp, shift);
1004 tcg_temp_free(shift);
1005 tcg_temp_free(tmp);
1009 /* MSKWH, MSKLH, MSKQH */
1010 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1011 uint8_t lit, uint8_t byte_mask)
1013 if (islit) {
1014 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1015 } else {
1016 TCGv shift = tcg_temp_new();
1017 TCGv mask = tcg_temp_new();
1019 /* The instruction description is as above, where the byte_mask
1020 is shifted left, and then we extract bits <15:8>. This can be
1021 emulated with a right-shift on the expanded byte mask. This
1022 requires extra care because for an input <2:0> == 0 we need a
1023 shift of 64 bits in order to generate a zero. This is done by
1024 splitting the shift into two parts, the variable shift - 1
1025 followed by a constant 1 shift. The code we expand below is
1026 equivalent to ~(B * 8) & 63. */
1028 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1029 tcg_gen_not_i64(shift, shift);
1030 tcg_gen_andi_i64(shift, shift, 0x3f);
1031 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1032 tcg_gen_shr_i64(mask, mask, shift);
1033 tcg_gen_shri_i64(mask, mask, 1);
1035 tcg_gen_andc_i64(vc, va, mask);
1037 tcg_temp_free(mask);
1038 tcg_temp_free(shift);
1042 /* MSKBL, MSKWL, MSKLL, MSKQL */
1043 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1044 uint8_t lit, uint8_t byte_mask)
1046 if (islit) {
1047 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1048 } else {
1049 TCGv shift = tcg_temp_new();
1050 TCGv mask = tcg_temp_new();
1052 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1053 tcg_gen_shli_i64(shift, shift, 3);
1054 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1055 tcg_gen_shl_i64(mask, mask, shift);
1057 tcg_gen_andc_i64(vc, va, mask);
1059 tcg_temp_free(mask);
1060 tcg_temp_free(shift);
1064 static void gen_rx(DisasContext *ctx, int ra, int set)
1066 if (ra != 31) {
1067 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1070 st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT);
1073 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1075 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1076 to internal cpu registers. */
1078 /* Unprivileged PAL call */
1079 if (palcode >= 0x80 && palcode < 0xC0) {
1080 switch (palcode) {
1081 case 0x86:
1082 /* IMB */
1083 /* No-op inside QEMU. */
1084 break;
1085 case 0x9E:
1086 /* RDUNIQUE */
1087 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1088 offsetof(CPUAlphaState, unique));
1089 break;
1090 case 0x9F:
1091 /* WRUNIQUE */
1092 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1093 offsetof(CPUAlphaState, unique));
1094 break;
1095 default:
1096 palcode &= 0xbf;
1097 goto do_call_pal;
1099 return DISAS_NEXT;
1102 #ifndef CONFIG_USER_ONLY
1103 /* Privileged PAL code */
1104 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1105 switch (palcode) {
1106 case 0x01:
1107 /* CFLUSH */
1108 /* No-op inside QEMU. */
1109 break;
1110 case 0x02:
1111 /* DRAINA */
1112 /* No-op inside QEMU. */
1113 break;
1114 case 0x2D:
1115 /* WRVPTPTR */
1116 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1117 offsetof(CPUAlphaState, vptptr));
1118 break;
1119 case 0x31:
1120 /* WRVAL */
1121 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1122 offsetof(CPUAlphaState, sysval));
1123 break;
1124 case 0x32:
1125 /* RDVAL */
1126 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1127 offsetof(CPUAlphaState, sysval));
1128 break;
1130 case 0x35:
1131 /* SWPIPL */
1132 /* Note that we already know we're in kernel mode, so we know
1133 that PS only contains the 3 IPL bits. */
1134 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1136 /* But make sure and store only the 3 IPL bits from the user. */
1138 TCGv tmp = tcg_temp_new();
1139 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1140 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1141 tcg_temp_free(tmp);
1144 /* Allow interrupts to be recognized right away. */
1145 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1146 return DISAS_PC_UPDATED_NOCHAIN;
1148 case 0x36:
1149 /* RDPS */
1150 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1151 break;
1153 case 0x38:
1154 /* WRUSP */
1155 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1156 offsetof(CPUAlphaState, usp));
1157 break;
1158 case 0x3A:
1159 /* RDUSP */
1160 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1161 offsetof(CPUAlphaState, usp));
1162 break;
1163 case 0x3C:
1164 /* WHAMI */
1165 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1166 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1167 break;
1169 case 0x3E:
1170 /* WTINT */
1171 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1172 -offsetof(AlphaCPU, env) +
1173 offsetof(CPUState, halted));
1174 tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1175 return gen_excp(ctx, EXCP_HALTED, 0);
1177 default:
1178 palcode &= 0x3f;
1179 goto do_call_pal;
1181 return DISAS_NEXT;
1183 #endif
1184 return gen_invalid(ctx);
1186 do_call_pal:
1187 #ifdef CONFIG_USER_ONLY
1188 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1189 #else
1191 TCGv tmp = tcg_temp_new();
1192 uint64_t exc_addr = ctx->base.pc_next;
1193 uint64_t entry = ctx->palbr;
1195 if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1196 exc_addr |= 1;
1197 } else {
1198 tcg_gen_movi_i64(tmp, 1);
1199 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1202 tcg_gen_movi_i64(tmp, exc_addr);
1203 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1204 tcg_temp_free(tmp);
1206 entry += (palcode & 0x80
1207 ? 0x2000 + (palcode - 0x80) * 64
1208 : 0x1000 + palcode * 64);
1210 tcg_gen_movi_i64(cpu_pc, entry);
1211 return DISAS_PC_UPDATED;
1213 #endif
1216 #ifndef CONFIG_USER_ONLY
1218 #define PR_LONG 0x200000
1220 static int cpu_pr_data(int pr)
1222 switch (pr) {
1223 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1224 case 3: return offsetof(CPUAlphaState, trap_arg0);
1225 case 4: return offsetof(CPUAlphaState, trap_arg1);
1226 case 5: return offsetof(CPUAlphaState, trap_arg2);
1227 case 6: return offsetof(CPUAlphaState, exc_addr);
1228 case 7: return offsetof(CPUAlphaState, palbr);
1229 case 8: return offsetof(CPUAlphaState, ptbr);
1230 case 9: return offsetof(CPUAlphaState, vptptr);
1231 case 10: return offsetof(CPUAlphaState, unique);
1232 case 11: return offsetof(CPUAlphaState, sysval);
1233 case 12: return offsetof(CPUAlphaState, usp);
1235 case 40 ... 63:
1236 return offsetof(CPUAlphaState, scratch[pr - 40]);
1238 case 251:
1239 return offsetof(CPUAlphaState, alarm_expire);
1241 return 0;
1244 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1246 void (*helper)(TCGv);
1247 int data;
1249 switch (regno) {
1250 case 32 ... 39:
1251 /* Accessing the "non-shadow" general registers. */
1252 regno = regno == 39 ? 25 : regno - 32 + 8;
1253 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1254 break;
1256 case 250: /* WALLTIME */
1257 helper = gen_helper_get_walltime;
1258 goto do_helper;
1259 case 249: /* VMTIME */
1260 helper = gen_helper_get_vmtime;
1261 do_helper:
1262 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1263 gen_io_start();
1264 helper(va);
1265 return DISAS_PC_STALE;
1266 } else {
1267 helper(va);
1269 break;
1271 case 0: /* PS */
1272 ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1273 break;
1274 case 1: /* FEN */
1275 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1276 break;
1278 default:
1279 /* The basic registers are data only, and unknown registers
1280 are read-zero, write-ignore. */
1281 data = cpu_pr_data(regno);
1282 if (data == 0) {
1283 tcg_gen_movi_i64(va, 0);
1284 } else if (data & PR_LONG) {
1285 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1286 } else {
1287 tcg_gen_ld_i64(va, cpu_env, data);
1289 break;
1292 return DISAS_NEXT;
1295 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1297 int data;
1298 DisasJumpType ret = DISAS_NEXT;
1300 switch (regno) {
1301 case 255:
1302 /* TBIA */
1303 gen_helper_tbia(cpu_env);
1304 break;
1306 case 254:
1307 /* TBIS */
1308 gen_helper_tbis(cpu_env, vb);
1309 break;
1311 case 253:
1312 /* WAIT */
1313 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1314 -offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
1315 return gen_excp(ctx, EXCP_HALTED, 0);
1317 case 252:
1318 /* HALT */
1319 gen_helper_halt(vb);
1320 return DISAS_PC_STALE;
1322 case 251:
1323 /* ALARM */
1324 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1325 gen_io_start();
1326 ret = DISAS_PC_STALE;
1328 gen_helper_set_alarm(cpu_env, vb);
1329 break;
1331 case 7:
1332 /* PALBR */
1333 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1334 /* Changing the PAL base register implies un-chaining all of the TBs
1335 that ended with a CALL_PAL. Since the base register usually only
1336 changes during boot, flushing everything works well. */
1337 gen_helper_tb_flush(cpu_env);
1338 return DISAS_PC_STALE;
1340 case 32 ... 39:
1341 /* Accessing the "non-shadow" general registers. */
1342 regno = regno == 39 ? 25 : regno - 32 + 8;
1343 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1344 break;
1346 case 0: /* PS */
1347 st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1348 break;
1349 case 1: /* FEN */
1350 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1351 break;
1353 default:
1354 /* The basic registers are data only, and unknown registers
1355 are read-zero, write-ignore. */
1356 data = cpu_pr_data(regno);
1357 if (data != 0) {
1358 if (data & PR_LONG) {
1359 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1360 } else {
1361 tcg_gen_st_i64(vb, cpu_env, data);
1364 break;
1367 return ret;
1369 #endif /* !USER_ONLY*/
1371 #define REQUIRE_NO_LIT \
1372 do { \
1373 if (real_islit) { \
1374 goto invalid_opc; \
1376 } while (0)
1378 #define REQUIRE_AMASK(FLAG) \
1379 do { \
1380 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1381 goto invalid_opc; \
1383 } while (0)
1385 #define REQUIRE_TB_FLAG(FLAG) \
1386 do { \
1387 if ((ctx->tbflags & (FLAG)) == 0) { \
1388 goto invalid_opc; \
1390 } while (0)
1392 #define REQUIRE_REG_31(WHICH) \
1393 do { \
1394 if (WHICH != 31) { \
1395 goto invalid_opc; \
1397 } while (0)
1399 #define REQUIRE_FEN \
1400 do { \
1401 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1402 goto raise_fen; \
1404 } while (0)
1406 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1408 int32_t disp21, disp16, disp12 __attribute__((unused));
1409 uint16_t fn11;
1410 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1411 bool islit, real_islit;
1412 TCGv va, vb, vc, tmp, tmp2;
1413 TCGv_i32 t32;
1414 DisasJumpType ret;
1416 /* Decode all instruction fields */
1417 opc = extract32(insn, 26, 6);
1418 ra = extract32(insn, 21, 5);
1419 rb = extract32(insn, 16, 5);
1420 rc = extract32(insn, 0, 5);
1421 real_islit = islit = extract32(insn, 12, 1);
1422 lit = extract32(insn, 13, 8);
1424 disp21 = sextract32(insn, 0, 21);
1425 disp16 = sextract32(insn, 0, 16);
1426 disp12 = sextract32(insn, 0, 12);
1428 fn11 = extract32(insn, 5, 11);
1429 fpfn = extract32(insn, 5, 6);
1430 fn7 = extract32(insn, 5, 7);
1432 if (rb == 31 && !islit) {
1433 islit = true;
1434 lit = 0;
1437 ret = DISAS_NEXT;
1438 switch (opc) {
1439 case 0x00:
1440 /* CALL_PAL */
1441 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1442 break;
1443 case 0x01:
1444 /* OPC01 */
1445 goto invalid_opc;
1446 case 0x02:
1447 /* OPC02 */
1448 goto invalid_opc;
1449 case 0x03:
1450 /* OPC03 */
1451 goto invalid_opc;
1452 case 0x04:
1453 /* OPC04 */
1454 goto invalid_opc;
1455 case 0x05:
1456 /* OPC05 */
1457 goto invalid_opc;
1458 case 0x06:
1459 /* OPC06 */
1460 goto invalid_opc;
1461 case 0x07:
1462 /* OPC07 */
1463 goto invalid_opc;
1465 case 0x09:
1466 /* LDAH */
1467 disp16 = (uint32_t)disp16 << 16;
1468 /* fall through */
1469 case 0x08:
1470 /* LDA */
1471 va = dest_gpr(ctx, ra);
1472 /* It's worth special-casing immediate loads. */
1473 if (rb == 31) {
1474 tcg_gen_movi_i64(va, disp16);
1475 } else {
1476 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1478 break;
1480 case 0x0A:
1481 /* LDBU */
1482 REQUIRE_AMASK(BWX);
1483 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1484 break;
1485 case 0x0B:
1486 /* LDQ_U */
1487 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1488 break;
1489 case 0x0C:
1490 /* LDWU */
1491 REQUIRE_AMASK(BWX);
1492 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1493 break;
1494 case 0x0D:
1495 /* STW */
1496 REQUIRE_AMASK(BWX);
1497 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1498 break;
1499 case 0x0E:
1500 /* STB */
1501 REQUIRE_AMASK(BWX);
1502 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1503 break;
1504 case 0x0F:
1505 /* STQ_U */
1506 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1507 break;
1509 case 0x10:
1510 vc = dest_gpr(ctx, rc);
1511 vb = load_gpr_lit(ctx, rb, lit, islit);
1513 if (ra == 31) {
1514 if (fn7 == 0x00) {
1515 /* Special case ADDL as SEXTL. */
1516 tcg_gen_ext32s_i64(vc, vb);
1517 break;
1519 if (fn7 == 0x29) {
1520 /* Special case SUBQ as NEGQ. */
1521 tcg_gen_neg_i64(vc, vb);
1522 break;
1526 va = load_gpr(ctx, ra);
1527 switch (fn7) {
1528 case 0x00:
1529 /* ADDL */
1530 tcg_gen_add_i64(vc, va, vb);
1531 tcg_gen_ext32s_i64(vc, vc);
1532 break;
1533 case 0x02:
1534 /* S4ADDL */
1535 tmp = tcg_temp_new();
1536 tcg_gen_shli_i64(tmp, va, 2);
1537 tcg_gen_add_i64(tmp, tmp, vb);
1538 tcg_gen_ext32s_i64(vc, tmp);
1539 tcg_temp_free(tmp);
1540 break;
1541 case 0x09:
1542 /* SUBL */
1543 tcg_gen_sub_i64(vc, va, vb);
1544 tcg_gen_ext32s_i64(vc, vc);
1545 break;
1546 case 0x0B:
1547 /* S4SUBL */
1548 tmp = tcg_temp_new();
1549 tcg_gen_shli_i64(tmp, va, 2);
1550 tcg_gen_sub_i64(tmp, tmp, vb);
1551 tcg_gen_ext32s_i64(vc, tmp);
1552 tcg_temp_free(tmp);
1553 break;
1554 case 0x0F:
1555 /* CMPBGE */
1556 if (ra == 31) {
1557 /* Special case 0 >= X as X == 0. */
1558 gen_helper_cmpbe0(vc, vb);
1559 } else {
1560 gen_helper_cmpbge(vc, va, vb);
1562 break;
1563 case 0x12:
1564 /* S8ADDL */
1565 tmp = tcg_temp_new();
1566 tcg_gen_shli_i64(tmp, va, 3);
1567 tcg_gen_add_i64(tmp, tmp, vb);
1568 tcg_gen_ext32s_i64(vc, tmp);
1569 tcg_temp_free(tmp);
1570 break;
1571 case 0x1B:
1572 /* S8SUBL */
1573 tmp = tcg_temp_new();
1574 tcg_gen_shli_i64(tmp, va, 3);
1575 tcg_gen_sub_i64(tmp, tmp, vb);
1576 tcg_gen_ext32s_i64(vc, tmp);
1577 tcg_temp_free(tmp);
1578 break;
1579 case 0x1D:
1580 /* CMPULT */
1581 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1582 break;
1583 case 0x20:
1584 /* ADDQ */
1585 tcg_gen_add_i64(vc, va, vb);
1586 break;
1587 case 0x22:
1588 /* S4ADDQ */
1589 tmp = tcg_temp_new();
1590 tcg_gen_shli_i64(tmp, va, 2);
1591 tcg_gen_add_i64(vc, tmp, vb);
1592 tcg_temp_free(tmp);
1593 break;
1594 case 0x29:
1595 /* SUBQ */
1596 tcg_gen_sub_i64(vc, va, vb);
1597 break;
1598 case 0x2B:
1599 /* S4SUBQ */
1600 tmp = tcg_temp_new();
1601 tcg_gen_shli_i64(tmp, va, 2);
1602 tcg_gen_sub_i64(vc, tmp, vb);
1603 tcg_temp_free(tmp);
1604 break;
1605 case 0x2D:
1606 /* CMPEQ */
1607 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1608 break;
1609 case 0x32:
1610 /* S8ADDQ */
1611 tmp = tcg_temp_new();
1612 tcg_gen_shli_i64(tmp, va, 3);
1613 tcg_gen_add_i64(vc, tmp, vb);
1614 tcg_temp_free(tmp);
1615 break;
1616 case 0x3B:
1617 /* S8SUBQ */
1618 tmp = tcg_temp_new();
1619 tcg_gen_shli_i64(tmp, va, 3);
1620 tcg_gen_sub_i64(vc, tmp, vb);
1621 tcg_temp_free(tmp);
1622 break;
1623 case 0x3D:
1624 /* CMPULE */
1625 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1626 break;
1627 case 0x40:
1628 /* ADDL/V */
1629 tmp = tcg_temp_new();
1630 tcg_gen_ext32s_i64(tmp, va);
1631 tcg_gen_ext32s_i64(vc, vb);
1632 tcg_gen_add_i64(tmp, tmp, vc);
1633 tcg_gen_ext32s_i64(vc, tmp);
1634 gen_helper_check_overflow(cpu_env, vc, tmp);
1635 tcg_temp_free(tmp);
1636 break;
1637 case 0x49:
1638 /* SUBL/V */
1639 tmp = tcg_temp_new();
1640 tcg_gen_ext32s_i64(tmp, va);
1641 tcg_gen_ext32s_i64(vc, vb);
1642 tcg_gen_sub_i64(tmp, tmp, vc);
1643 tcg_gen_ext32s_i64(vc, tmp);
1644 gen_helper_check_overflow(cpu_env, vc, tmp);
1645 tcg_temp_free(tmp);
1646 break;
1647 case 0x4D:
1648 /* CMPLT */
1649 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1650 break;
1651 case 0x60:
1652 /* ADDQ/V */
1653 tmp = tcg_temp_new();
1654 tmp2 = tcg_temp_new();
1655 tcg_gen_eqv_i64(tmp, va, vb);
1656 tcg_gen_mov_i64(tmp2, va);
1657 tcg_gen_add_i64(vc, va, vb);
1658 tcg_gen_xor_i64(tmp2, tmp2, vc);
1659 tcg_gen_and_i64(tmp, tmp, tmp2);
1660 tcg_gen_shri_i64(tmp, tmp, 63);
1661 tcg_gen_movi_i64(tmp2, 0);
1662 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1663 tcg_temp_free(tmp);
1664 tcg_temp_free(tmp2);
1665 break;
1666 case 0x69:
1667 /* SUBQ/V */
1668 tmp = tcg_temp_new();
1669 tmp2 = tcg_temp_new();
1670 tcg_gen_xor_i64(tmp, va, vb);
1671 tcg_gen_mov_i64(tmp2, va);
1672 tcg_gen_sub_i64(vc, va, vb);
1673 tcg_gen_xor_i64(tmp2, tmp2, vc);
1674 tcg_gen_and_i64(tmp, tmp, tmp2);
1675 tcg_gen_shri_i64(tmp, tmp, 63);
1676 tcg_gen_movi_i64(tmp2, 0);
1677 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1678 tcg_temp_free(tmp);
1679 tcg_temp_free(tmp2);
1680 break;
1681 case 0x6D:
1682 /* CMPLE */
1683 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1684 break;
1685 default:
1686 goto invalid_opc;
1688 break;
1690 case 0x11:
1691 if (fn7 == 0x20) {
1692 if (rc == 31) {
1693 /* Special case BIS as NOP. */
1694 break;
1696 if (ra == 31) {
1697 /* Special case BIS as MOV. */
1698 vc = dest_gpr(ctx, rc);
1699 if (islit) {
1700 tcg_gen_movi_i64(vc, lit);
1701 } else {
1702 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1704 break;
1708 vc = dest_gpr(ctx, rc);
1709 vb = load_gpr_lit(ctx, rb, lit, islit);
1711 if (fn7 == 0x28 && ra == 31) {
1712 /* Special case ORNOT as NOT. */
1713 tcg_gen_not_i64(vc, vb);
1714 break;
1717 va = load_gpr(ctx, ra);
1718 switch (fn7) {
1719 case 0x00:
1720 /* AND */
1721 tcg_gen_and_i64(vc, va, vb);
1722 break;
1723 case 0x08:
1724 /* BIC */
1725 tcg_gen_andc_i64(vc, va, vb);
1726 break;
1727 case 0x14:
1728 /* CMOVLBS */
1729 tmp = tcg_temp_new();
1730 tcg_gen_andi_i64(tmp, va, 1);
1731 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1732 vb, load_gpr(ctx, rc));
1733 tcg_temp_free(tmp);
1734 break;
1735 case 0x16:
1736 /* CMOVLBC */
1737 tmp = tcg_temp_new();
1738 tcg_gen_andi_i64(tmp, va, 1);
1739 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1740 vb, load_gpr(ctx, rc));
1741 tcg_temp_free(tmp);
1742 break;
1743 case 0x20:
1744 /* BIS */
1745 tcg_gen_or_i64(vc, va, vb);
1746 break;
1747 case 0x24:
1748 /* CMOVEQ */
1749 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1750 vb, load_gpr(ctx, rc));
1751 break;
1752 case 0x26:
1753 /* CMOVNE */
1754 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1755 vb, load_gpr(ctx, rc));
1756 break;
1757 case 0x28:
1758 /* ORNOT */
1759 tcg_gen_orc_i64(vc, va, vb);
1760 break;
1761 case 0x40:
1762 /* XOR */
1763 tcg_gen_xor_i64(vc, va, vb);
1764 break;
1765 case 0x44:
1766 /* CMOVLT */
1767 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1768 vb, load_gpr(ctx, rc));
1769 break;
1770 case 0x46:
1771 /* CMOVGE */
1772 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1773 vb, load_gpr(ctx, rc));
1774 break;
1775 case 0x48:
1776 /* EQV */
1777 tcg_gen_eqv_i64(vc, va, vb);
1778 break;
1779 case 0x61:
1780 /* AMASK */
1781 REQUIRE_REG_31(ra);
1782 tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1783 break;
1784 case 0x64:
1785 /* CMOVLE */
1786 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1787 vb, load_gpr(ctx, rc));
1788 break;
1789 case 0x66:
1790 /* CMOVGT */
1791 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1792 vb, load_gpr(ctx, rc));
1793 break;
1794 case 0x6C:
1795 /* IMPLVER */
1796 REQUIRE_REG_31(ra);
1797 tcg_gen_movi_i64(vc, ctx->implver);
1798 break;
1799 default:
1800 goto invalid_opc;
1802 break;
1804 case 0x12:
1805 vc = dest_gpr(ctx, rc);
1806 va = load_gpr(ctx, ra);
1807 switch (fn7) {
1808 case 0x02:
1809 /* MSKBL */
1810 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1811 break;
1812 case 0x06:
1813 /* EXTBL */
1814 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1815 break;
1816 case 0x0B:
1817 /* INSBL */
1818 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1819 break;
1820 case 0x12:
1821 /* MSKWL */
1822 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1823 break;
1824 case 0x16:
1825 /* EXTWL */
1826 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1827 break;
1828 case 0x1B:
1829 /* INSWL */
1830 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1831 break;
1832 case 0x22:
1833 /* MSKLL */
1834 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1835 break;
1836 case 0x26:
1837 /* EXTLL */
1838 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1839 break;
1840 case 0x2B:
1841 /* INSLL */
1842 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1843 break;
1844 case 0x30:
1845 /* ZAP */
1846 if (islit) {
1847 gen_zapnoti(vc, va, ~lit);
1848 } else {
1849 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1851 break;
1852 case 0x31:
1853 /* ZAPNOT */
1854 if (islit) {
1855 gen_zapnoti(vc, va, lit);
1856 } else {
1857 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1859 break;
1860 case 0x32:
1861 /* MSKQL */
1862 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1863 break;
1864 case 0x34:
1865 /* SRL */
1866 if (islit) {
1867 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1868 } else {
1869 tmp = tcg_temp_new();
1870 vb = load_gpr(ctx, rb);
1871 tcg_gen_andi_i64(tmp, vb, 0x3f);
1872 tcg_gen_shr_i64(vc, va, tmp);
1873 tcg_temp_free(tmp);
1875 break;
1876 case 0x36:
1877 /* EXTQL */
1878 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1879 break;
1880 case 0x39:
1881 /* SLL */
1882 if (islit) {
1883 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1884 } else {
1885 tmp = tcg_temp_new();
1886 vb = load_gpr(ctx, rb);
1887 tcg_gen_andi_i64(tmp, vb, 0x3f);
1888 tcg_gen_shl_i64(vc, va, tmp);
1889 tcg_temp_free(tmp);
1891 break;
1892 case 0x3B:
1893 /* INSQL */
1894 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1895 break;
1896 case 0x3C:
1897 /* SRA */
1898 if (islit) {
1899 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1900 } else {
1901 tmp = tcg_temp_new();
1902 vb = load_gpr(ctx, rb);
1903 tcg_gen_andi_i64(tmp, vb, 0x3f);
1904 tcg_gen_sar_i64(vc, va, tmp);
1905 tcg_temp_free(tmp);
1907 break;
1908 case 0x52:
1909 /* MSKWH */
1910 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1911 break;
1912 case 0x57:
1913 /* INSWH */
1914 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1915 break;
1916 case 0x5A:
1917 /* EXTWH */
1918 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1919 break;
1920 case 0x62:
1921 /* MSKLH */
1922 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1923 break;
1924 case 0x67:
1925 /* INSLH */
1926 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1927 break;
1928 case 0x6A:
1929 /* EXTLH */
1930 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1931 break;
1932 case 0x72:
1933 /* MSKQH */
1934 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1935 break;
1936 case 0x77:
1937 /* INSQH */
1938 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1939 break;
1940 case 0x7A:
1941 /* EXTQH */
1942 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1943 break;
1944 default:
1945 goto invalid_opc;
1947 break;
1949 case 0x13:
1950 vc = dest_gpr(ctx, rc);
1951 vb = load_gpr_lit(ctx, rb, lit, islit);
1952 va = load_gpr(ctx, ra);
1953 switch (fn7) {
1954 case 0x00:
1955 /* MULL */
1956 tcg_gen_mul_i64(vc, va, vb);
1957 tcg_gen_ext32s_i64(vc, vc);
1958 break;
1959 case 0x20:
1960 /* MULQ */
1961 tcg_gen_mul_i64(vc, va, vb);
1962 break;
1963 case 0x30:
1964 /* UMULH */
1965 tmp = tcg_temp_new();
1966 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1967 tcg_temp_free(tmp);
1968 break;
1969 case 0x40:
1970 /* MULL/V */
1971 tmp = tcg_temp_new();
1972 tcg_gen_ext32s_i64(tmp, va);
1973 tcg_gen_ext32s_i64(vc, vb);
1974 tcg_gen_mul_i64(tmp, tmp, vc);
1975 tcg_gen_ext32s_i64(vc, tmp);
1976 gen_helper_check_overflow(cpu_env, vc, tmp);
1977 tcg_temp_free(tmp);
1978 break;
1979 case 0x60:
1980 /* MULQ/V */
1981 tmp = tcg_temp_new();
1982 tmp2 = tcg_temp_new();
1983 tcg_gen_muls2_i64(vc, tmp, va, vb);
1984 tcg_gen_sari_i64(tmp2, vc, 63);
1985 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1986 tcg_temp_free(tmp);
1987 tcg_temp_free(tmp2);
1988 break;
1989 default:
1990 goto invalid_opc;
1992 break;
1994 case 0x14:
1995 REQUIRE_AMASK(FIX);
1996 vc = dest_fpr(ctx, rc);
1997 switch (fpfn) { /* fn11 & 0x3F */
1998 case 0x04:
1999 /* ITOFS */
2000 REQUIRE_REG_31(rb);
2001 REQUIRE_FEN;
2002 t32 = tcg_temp_new_i32();
2003 va = load_gpr(ctx, ra);
2004 tcg_gen_extrl_i64_i32(t32, va);
2005 gen_helper_memory_to_s(vc, t32);
2006 tcg_temp_free_i32(t32);
2007 break;
2008 case 0x0A:
2009 /* SQRTF */
2010 REQUIRE_REG_31(ra);
2011 REQUIRE_FEN;
2012 vb = load_fpr(ctx, rb);
2013 gen_helper_sqrtf(vc, cpu_env, vb);
2014 break;
2015 case 0x0B:
2016 /* SQRTS */
2017 REQUIRE_REG_31(ra);
2018 REQUIRE_FEN;
2019 gen_sqrts(ctx, rb, rc, fn11);
2020 break;
2021 case 0x14:
2022 /* ITOFF */
2023 REQUIRE_REG_31(rb);
2024 REQUIRE_FEN;
2025 t32 = tcg_temp_new_i32();
2026 va = load_gpr(ctx, ra);
2027 tcg_gen_extrl_i64_i32(t32, va);
2028 gen_helper_memory_to_f(vc, t32);
2029 tcg_temp_free_i32(t32);
2030 break;
2031 case 0x24:
2032 /* ITOFT */
2033 REQUIRE_REG_31(rb);
2034 REQUIRE_FEN;
2035 va = load_gpr(ctx, ra);
2036 tcg_gen_mov_i64(vc, va);
2037 break;
2038 case 0x2A:
2039 /* SQRTG */
2040 REQUIRE_REG_31(ra);
2041 REQUIRE_FEN;
2042 vb = load_fpr(ctx, rb);
2043 gen_helper_sqrtg(vc, cpu_env, vb);
2044 break;
2045 case 0x02B:
2046 /* SQRTT */
2047 REQUIRE_REG_31(ra);
2048 REQUIRE_FEN;
2049 gen_sqrtt(ctx, rb, rc, fn11);
2050 break;
2051 default:
2052 goto invalid_opc;
2054 break;
2056 case 0x15:
2057 /* VAX floating point */
2058 /* XXX: rounding mode and trap are ignored (!) */
2059 vc = dest_fpr(ctx, rc);
2060 vb = load_fpr(ctx, rb);
2061 va = load_fpr(ctx, ra);
2062 switch (fpfn) { /* fn11 & 0x3F */
2063 case 0x00:
2064 /* ADDF */
2065 REQUIRE_FEN;
2066 gen_helper_addf(vc, cpu_env, va, vb);
2067 break;
2068 case 0x01:
2069 /* SUBF */
2070 REQUIRE_FEN;
2071 gen_helper_subf(vc, cpu_env, va, vb);
2072 break;
2073 case 0x02:
2074 /* MULF */
2075 REQUIRE_FEN;
2076 gen_helper_mulf(vc, cpu_env, va, vb);
2077 break;
2078 case 0x03:
2079 /* DIVF */
2080 REQUIRE_FEN;
2081 gen_helper_divf(vc, cpu_env, va, vb);
2082 break;
2083 case 0x1E:
2084 /* CVTDG -- TODO */
2085 REQUIRE_REG_31(ra);
2086 goto invalid_opc;
2087 case 0x20:
2088 /* ADDG */
2089 REQUIRE_FEN;
2090 gen_helper_addg(vc, cpu_env, va, vb);
2091 break;
2092 case 0x21:
2093 /* SUBG */
2094 REQUIRE_FEN;
2095 gen_helper_subg(vc, cpu_env, va, vb);
2096 break;
2097 case 0x22:
2098 /* MULG */
2099 REQUIRE_FEN;
2100 gen_helper_mulg(vc, cpu_env, va, vb);
2101 break;
2102 case 0x23:
2103 /* DIVG */
2104 REQUIRE_FEN;
2105 gen_helper_divg(vc, cpu_env, va, vb);
2106 break;
2107 case 0x25:
2108 /* CMPGEQ */
2109 REQUIRE_FEN;
2110 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2111 break;
2112 case 0x26:
2113 /* CMPGLT */
2114 REQUIRE_FEN;
2115 gen_helper_cmpglt(vc, cpu_env, va, vb);
2116 break;
2117 case 0x27:
2118 /* CMPGLE */
2119 REQUIRE_FEN;
2120 gen_helper_cmpgle(vc, cpu_env, va, vb);
2121 break;
2122 case 0x2C:
2123 /* CVTGF */
2124 REQUIRE_REG_31(ra);
2125 REQUIRE_FEN;
2126 gen_helper_cvtgf(vc, cpu_env, vb);
2127 break;
2128 case 0x2D:
2129 /* CVTGD -- TODO */
2130 REQUIRE_REG_31(ra);
2131 goto invalid_opc;
2132 case 0x2F:
2133 /* CVTGQ */
2134 REQUIRE_REG_31(ra);
2135 REQUIRE_FEN;
2136 gen_helper_cvtgq(vc, cpu_env, vb);
2137 break;
2138 case 0x3C:
2139 /* CVTQF */
2140 REQUIRE_REG_31(ra);
2141 REQUIRE_FEN;
2142 gen_helper_cvtqf(vc, cpu_env, vb);
2143 break;
2144 case 0x3E:
2145 /* CVTQG */
2146 REQUIRE_REG_31(ra);
2147 REQUIRE_FEN;
2148 gen_helper_cvtqg(vc, cpu_env, vb);
2149 break;
2150 default:
2151 goto invalid_opc;
2153 break;
2155 case 0x16:
2156 /* IEEE floating-point */
2157 switch (fpfn) { /* fn11 & 0x3F */
2158 case 0x00:
2159 /* ADDS */
2160 REQUIRE_FEN;
2161 gen_adds(ctx, ra, rb, rc, fn11);
2162 break;
2163 case 0x01:
2164 /* SUBS */
2165 REQUIRE_FEN;
2166 gen_subs(ctx, ra, rb, rc, fn11);
2167 break;
2168 case 0x02:
2169 /* MULS */
2170 REQUIRE_FEN;
2171 gen_muls(ctx, ra, rb, rc, fn11);
2172 break;
2173 case 0x03:
2174 /* DIVS */
2175 REQUIRE_FEN;
2176 gen_divs(ctx, ra, rb, rc, fn11);
2177 break;
2178 case 0x20:
2179 /* ADDT */
2180 REQUIRE_FEN;
2181 gen_addt(ctx, ra, rb, rc, fn11);
2182 break;
2183 case 0x21:
2184 /* SUBT */
2185 REQUIRE_FEN;
2186 gen_subt(ctx, ra, rb, rc, fn11);
2187 break;
2188 case 0x22:
2189 /* MULT */
2190 REQUIRE_FEN;
2191 gen_mult(ctx, ra, rb, rc, fn11);
2192 break;
2193 case 0x23:
2194 /* DIVT */
2195 REQUIRE_FEN;
2196 gen_divt(ctx, ra, rb, rc, fn11);
2197 break;
2198 case 0x24:
2199 /* CMPTUN */
2200 REQUIRE_FEN;
2201 gen_cmptun(ctx, ra, rb, rc, fn11);
2202 break;
2203 case 0x25:
2204 /* CMPTEQ */
2205 REQUIRE_FEN;
2206 gen_cmpteq(ctx, ra, rb, rc, fn11);
2207 break;
2208 case 0x26:
2209 /* CMPTLT */
2210 REQUIRE_FEN;
2211 gen_cmptlt(ctx, ra, rb, rc, fn11);
2212 break;
2213 case 0x27:
2214 /* CMPTLE */
2215 REQUIRE_FEN;
2216 gen_cmptle(ctx, ra, rb, rc, fn11);
2217 break;
2218 case 0x2C:
2219 REQUIRE_REG_31(ra);
2220 REQUIRE_FEN;
2221 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2222 /* CVTST */
2223 gen_cvtst(ctx, rb, rc, fn11);
2224 } else {
2225 /* CVTTS */
2226 gen_cvtts(ctx, rb, rc, fn11);
2228 break;
2229 case 0x2F:
2230 /* CVTTQ */
2231 REQUIRE_REG_31(ra);
2232 REQUIRE_FEN;
2233 gen_cvttq(ctx, rb, rc, fn11);
2234 break;
2235 case 0x3C:
2236 /* CVTQS */
2237 REQUIRE_REG_31(ra);
2238 REQUIRE_FEN;
2239 gen_cvtqs(ctx, rb, rc, fn11);
2240 break;
2241 case 0x3E:
2242 /* CVTQT */
2243 REQUIRE_REG_31(ra);
2244 REQUIRE_FEN;
2245 gen_cvtqt(ctx, rb, rc, fn11);
2246 break;
2247 default:
2248 goto invalid_opc;
2250 break;
2252 case 0x17:
2253 switch (fn11) {
2254 case 0x010:
2255 /* CVTLQ */
2256 REQUIRE_REG_31(ra);
2257 REQUIRE_FEN;
2258 vc = dest_fpr(ctx, rc);
2259 vb = load_fpr(ctx, rb);
2260 gen_cvtlq(vc, vb);
2261 break;
2262 case 0x020:
2263 /* CPYS */
2264 REQUIRE_FEN;
2265 if (rc == 31) {
2266 /* Special case CPYS as FNOP. */
2267 } else {
2268 vc = dest_fpr(ctx, rc);
2269 va = load_fpr(ctx, ra);
2270 if (ra == rb) {
2271 /* Special case CPYS as FMOV. */
2272 tcg_gen_mov_i64(vc, va);
2273 } else {
2274 vb = load_fpr(ctx, rb);
2275 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2278 break;
2279 case 0x021:
2280 /* CPYSN */
2281 REQUIRE_FEN;
2282 vc = dest_fpr(ctx, rc);
2283 vb = load_fpr(ctx, rb);
2284 va = load_fpr(ctx, ra);
2285 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2286 break;
2287 case 0x022:
2288 /* CPYSE */
2289 REQUIRE_FEN;
2290 vc = dest_fpr(ctx, rc);
2291 vb = load_fpr(ctx, rb);
2292 va = load_fpr(ctx, ra);
2293 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2294 break;
2295 case 0x024:
2296 /* MT_FPCR */
2297 REQUIRE_FEN;
2298 va = load_fpr(ctx, ra);
2299 gen_helper_store_fpcr(cpu_env, va);
2300 if (ctx->tb_rm == QUAL_RM_D) {
2301 /* Re-do the copy of the rounding mode to fp_status
2302 the next time we use dynamic rounding. */
2303 ctx->tb_rm = -1;
2305 break;
2306 case 0x025:
2307 /* MF_FPCR */
2308 REQUIRE_FEN;
2309 va = dest_fpr(ctx, ra);
2310 gen_helper_load_fpcr(va, cpu_env);
2311 break;
2312 case 0x02A:
2313 /* FCMOVEQ */
2314 REQUIRE_FEN;
2315 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2316 break;
2317 case 0x02B:
2318 /* FCMOVNE */
2319 REQUIRE_FEN;
2320 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2321 break;
2322 case 0x02C:
2323 /* FCMOVLT */
2324 REQUIRE_FEN;
2325 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2326 break;
2327 case 0x02D:
2328 /* FCMOVGE */
2329 REQUIRE_FEN;
2330 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2331 break;
2332 case 0x02E:
2333 /* FCMOVLE */
2334 REQUIRE_FEN;
2335 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2336 break;
2337 case 0x02F:
2338 /* FCMOVGT */
2339 REQUIRE_FEN;
2340 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2341 break;
2342 case 0x030: /* CVTQL */
2343 case 0x130: /* CVTQL/V */
2344 case 0x530: /* CVTQL/SV */
2345 REQUIRE_REG_31(ra);
2346 REQUIRE_FEN;
2347 vc = dest_fpr(ctx, rc);
2348 vb = load_fpr(ctx, rb);
2349 gen_helper_cvtql(vc, cpu_env, vb);
2350 gen_fp_exc_raise(rc, fn11);
2351 break;
2352 default:
2353 goto invalid_opc;
2355 break;
2357 case 0x18:
2358 switch ((uint16_t)disp16) {
2359 case 0x0000:
2360 /* TRAPB */
2361 /* No-op. */
2362 break;
2363 case 0x0400:
2364 /* EXCB */
2365 /* No-op. */
2366 break;
2367 case 0x4000:
2368 /* MB */
2369 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2370 break;
2371 case 0x4400:
2372 /* WMB */
2373 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2374 break;
2375 case 0x8000:
2376 /* FETCH */
2377 /* No-op */
2378 break;
2379 case 0xA000:
2380 /* FETCH_M */
2381 /* No-op */
2382 break;
2383 case 0xC000:
2384 /* RPCC */
2385 va = dest_gpr(ctx, ra);
2386 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2387 gen_io_start();
2388 gen_helper_load_pcc(va, cpu_env);
2389 ret = DISAS_PC_STALE;
2390 } else {
2391 gen_helper_load_pcc(va, cpu_env);
2393 break;
2394 case 0xE000:
2395 /* RC */
2396 gen_rx(ctx, ra, 0);
2397 break;
2398 case 0xE800:
2399 /* ECB */
2400 break;
2401 case 0xF000:
2402 /* RS */
2403 gen_rx(ctx, ra, 1);
2404 break;
2405 case 0xF800:
2406 /* WH64 */
2407 /* No-op */
2408 break;
2409 case 0xFC00:
2410 /* WH64EN */
2411 /* No-op */
2412 break;
2413 default:
2414 goto invalid_opc;
2416 break;
2418 case 0x19:
2419 /* HW_MFPR (PALcode) */
2420 #ifndef CONFIG_USER_ONLY
2421 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2422 va = dest_gpr(ctx, ra);
2423 ret = gen_mfpr(ctx, va, insn & 0xffff);
2424 break;
2425 #else
2426 goto invalid_opc;
2427 #endif
2429 case 0x1A:
2430 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2431 prediction stack action, which of course we don't implement. */
2432 vb = load_gpr(ctx, rb);
2433 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2434 if (ra != 31) {
2435 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2437 ret = DISAS_PC_UPDATED;
2438 break;
2440 case 0x1B:
2441 /* HW_LD (PALcode) */
2442 #ifndef CONFIG_USER_ONLY
2443 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2445 TCGv addr = tcg_temp_new();
2446 vb = load_gpr(ctx, rb);
2447 va = dest_gpr(ctx, ra);
2449 tcg_gen_addi_i64(addr, vb, disp12);
2450 switch ((insn >> 12) & 0xF) {
2451 case 0x0:
2452 /* Longword physical access (hw_ldl/p) */
2453 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2454 break;
2455 case 0x1:
2456 /* Quadword physical access (hw_ldq/p) */
2457 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2458 break;
2459 case 0x2:
2460 /* Longword physical access with lock (hw_ldl_l/p) */
2461 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2462 break;
2463 case 0x3:
2464 /* Quadword physical access with lock (hw_ldq_l/p) */
2465 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2466 break;
2467 case 0x4:
2468 /* Longword virtual PTE fetch (hw_ldl/v) */
2469 goto invalid_opc;
2470 case 0x5:
2471 /* Quadword virtual PTE fetch (hw_ldq/v) */
2472 goto invalid_opc;
2473 break;
2474 case 0x6:
2475 /* Invalid */
2476 goto invalid_opc;
2477 case 0x7:
2478 /* Invaliid */
2479 goto invalid_opc;
2480 case 0x8:
2481 /* Longword virtual access (hw_ldl) */
2482 goto invalid_opc;
2483 case 0x9:
2484 /* Quadword virtual access (hw_ldq) */
2485 goto invalid_opc;
2486 case 0xA:
2487 /* Longword virtual access with protection check (hw_ldl/w) */
2488 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2489 break;
2490 case 0xB:
2491 /* Quadword virtual access with protection check (hw_ldq/w) */
2492 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2493 break;
2494 case 0xC:
2495 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2496 goto invalid_opc;
2497 case 0xD:
2498 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2499 goto invalid_opc;
2500 case 0xE:
2501 /* Longword virtual access with alternate access mode and
2502 protection checks (hw_ldl/wa) */
2503 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2504 break;
2505 case 0xF:
2506 /* Quadword virtual access with alternate access mode and
2507 protection checks (hw_ldq/wa) */
2508 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2509 break;
2511 tcg_temp_free(addr);
2512 break;
2514 #else
2515 goto invalid_opc;
2516 #endif
2518 case 0x1C:
2519 vc = dest_gpr(ctx, rc);
2520 if (fn7 == 0x70) {
2521 /* FTOIT */
2522 REQUIRE_AMASK(FIX);
2523 REQUIRE_REG_31(rb);
2524 va = load_fpr(ctx, ra);
2525 tcg_gen_mov_i64(vc, va);
2526 break;
2527 } else if (fn7 == 0x78) {
2528 /* FTOIS */
2529 REQUIRE_AMASK(FIX);
2530 REQUIRE_REG_31(rb);
2531 t32 = tcg_temp_new_i32();
2532 va = load_fpr(ctx, ra);
2533 gen_helper_s_to_memory(t32, va);
2534 tcg_gen_ext_i32_i64(vc, t32);
2535 tcg_temp_free_i32(t32);
2536 break;
2539 vb = load_gpr_lit(ctx, rb, lit, islit);
2540 switch (fn7) {
2541 case 0x00:
2542 /* SEXTB */
2543 REQUIRE_AMASK(BWX);
2544 REQUIRE_REG_31(ra);
2545 tcg_gen_ext8s_i64(vc, vb);
2546 break;
2547 case 0x01:
2548 /* SEXTW */
2549 REQUIRE_AMASK(BWX);
2550 REQUIRE_REG_31(ra);
2551 tcg_gen_ext16s_i64(vc, vb);
2552 break;
2553 case 0x30:
2554 /* CTPOP */
2555 REQUIRE_AMASK(CIX);
2556 REQUIRE_REG_31(ra);
2557 REQUIRE_NO_LIT;
2558 tcg_gen_ctpop_i64(vc, vb);
2559 break;
2560 case 0x31:
2561 /* PERR */
2562 REQUIRE_AMASK(MVI);
2563 REQUIRE_NO_LIT;
2564 va = load_gpr(ctx, ra);
2565 gen_helper_perr(vc, va, vb);
2566 break;
2567 case 0x32:
2568 /* CTLZ */
2569 REQUIRE_AMASK(CIX);
2570 REQUIRE_REG_31(ra);
2571 REQUIRE_NO_LIT;
2572 tcg_gen_clzi_i64(vc, vb, 64);
2573 break;
2574 case 0x33:
2575 /* CTTZ */
2576 REQUIRE_AMASK(CIX);
2577 REQUIRE_REG_31(ra);
2578 REQUIRE_NO_LIT;
2579 tcg_gen_ctzi_i64(vc, vb, 64);
2580 break;
2581 case 0x34:
2582 /* UNPKBW */
2583 REQUIRE_AMASK(MVI);
2584 REQUIRE_REG_31(ra);
2585 REQUIRE_NO_LIT;
2586 gen_helper_unpkbw(vc, vb);
2587 break;
2588 case 0x35:
2589 /* UNPKBL */
2590 REQUIRE_AMASK(MVI);
2591 REQUIRE_REG_31(ra);
2592 REQUIRE_NO_LIT;
2593 gen_helper_unpkbl(vc, vb);
2594 break;
2595 case 0x36:
2596 /* PKWB */
2597 REQUIRE_AMASK(MVI);
2598 REQUIRE_REG_31(ra);
2599 REQUIRE_NO_LIT;
2600 gen_helper_pkwb(vc, vb);
2601 break;
2602 case 0x37:
2603 /* PKLB */
2604 REQUIRE_AMASK(MVI);
2605 REQUIRE_REG_31(ra);
2606 REQUIRE_NO_LIT;
2607 gen_helper_pklb(vc, vb);
2608 break;
2609 case 0x38:
2610 /* MINSB8 */
2611 REQUIRE_AMASK(MVI);
2612 va = load_gpr(ctx, ra);
2613 gen_helper_minsb8(vc, va, vb);
2614 break;
2615 case 0x39:
2616 /* MINSW4 */
2617 REQUIRE_AMASK(MVI);
2618 va = load_gpr(ctx, ra);
2619 gen_helper_minsw4(vc, va, vb);
2620 break;
2621 case 0x3A:
2622 /* MINUB8 */
2623 REQUIRE_AMASK(MVI);
2624 va = load_gpr(ctx, ra);
2625 gen_helper_minub8(vc, va, vb);
2626 break;
2627 case 0x3B:
2628 /* MINUW4 */
2629 REQUIRE_AMASK(MVI);
2630 va = load_gpr(ctx, ra);
2631 gen_helper_minuw4(vc, va, vb);
2632 break;
2633 case 0x3C:
2634 /* MAXUB8 */
2635 REQUIRE_AMASK(MVI);
2636 va = load_gpr(ctx, ra);
2637 gen_helper_maxub8(vc, va, vb);
2638 break;
2639 case 0x3D:
2640 /* MAXUW4 */
2641 REQUIRE_AMASK(MVI);
2642 va = load_gpr(ctx, ra);
2643 gen_helper_maxuw4(vc, va, vb);
2644 break;
2645 case 0x3E:
2646 /* MAXSB8 */
2647 REQUIRE_AMASK(MVI);
2648 va = load_gpr(ctx, ra);
2649 gen_helper_maxsb8(vc, va, vb);
2650 break;
2651 case 0x3F:
2652 /* MAXSW4 */
2653 REQUIRE_AMASK(MVI);
2654 va = load_gpr(ctx, ra);
2655 gen_helper_maxsw4(vc, va, vb);
2656 break;
2657 default:
2658 goto invalid_opc;
2660 break;
2662 case 0x1D:
2663 /* HW_MTPR (PALcode) */
2664 #ifndef CONFIG_USER_ONLY
2665 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2666 vb = load_gpr(ctx, rb);
2667 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2668 break;
2669 #else
2670 goto invalid_opc;
2671 #endif
2673 case 0x1E:
2674 /* HW_RET (PALcode) */
2675 #ifndef CONFIG_USER_ONLY
2676 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2677 if (rb == 31) {
2678 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2679 address from EXC_ADDR. This turns out to be useful for our
2680 emulation PALcode, so continue to accept it. */
2681 vb = dest_sink(ctx);
2682 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2683 } else {
2684 vb = load_gpr(ctx, rb);
2686 tcg_gen_movi_i64(cpu_lock_addr, -1);
2687 st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT);
2688 tmp = tcg_temp_new();
2689 tcg_gen_andi_i64(tmp, vb, 1);
2690 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2691 tcg_temp_free(tmp);
2692 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2693 /* Allow interrupts to be recognized right away. */
2694 ret = DISAS_PC_UPDATED_NOCHAIN;
2695 break;
2696 #else
2697 goto invalid_opc;
2698 #endif
2700 case 0x1F:
2701 /* HW_ST (PALcode) */
2702 #ifndef CONFIG_USER_ONLY
2703 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2705 switch ((insn >> 12) & 0xF) {
2706 case 0x0:
2707 /* Longword physical access */
2708 va = load_gpr(ctx, ra);
2709 vb = load_gpr(ctx, rb);
2710 tmp = tcg_temp_new();
2711 tcg_gen_addi_i64(tmp, vb, disp12);
2712 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2713 tcg_temp_free(tmp);
2714 break;
2715 case 0x1:
2716 /* Quadword physical access */
2717 va = load_gpr(ctx, ra);
2718 vb = load_gpr(ctx, rb);
2719 tmp = tcg_temp_new();
2720 tcg_gen_addi_i64(tmp, vb, disp12);
2721 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2722 tcg_temp_free(tmp);
2723 break;
2724 case 0x2:
2725 /* Longword physical access with lock */
2726 ret = gen_store_conditional(ctx, ra, rb, disp12,
2727 MMU_PHYS_IDX, MO_LESL);
2728 break;
2729 case 0x3:
2730 /* Quadword physical access with lock */
2731 ret = gen_store_conditional(ctx, ra, rb, disp12,
2732 MMU_PHYS_IDX, MO_LEQ);
2733 break;
2734 case 0x4:
2735 /* Longword virtual access */
2736 goto invalid_opc;
2737 case 0x5:
2738 /* Quadword virtual access */
2739 goto invalid_opc;
2740 case 0x6:
2741 /* Invalid */
2742 goto invalid_opc;
2743 case 0x7:
2744 /* Invalid */
2745 goto invalid_opc;
2746 case 0x8:
2747 /* Invalid */
2748 goto invalid_opc;
2749 case 0x9:
2750 /* Invalid */
2751 goto invalid_opc;
2752 case 0xA:
2753 /* Invalid */
2754 goto invalid_opc;
2755 case 0xB:
2756 /* Invalid */
2757 goto invalid_opc;
2758 case 0xC:
2759 /* Longword virtual access with alternate access mode */
2760 goto invalid_opc;
2761 case 0xD:
2762 /* Quadword virtual access with alternate access mode */
2763 goto invalid_opc;
2764 case 0xE:
2765 /* Invalid */
2766 goto invalid_opc;
2767 case 0xF:
2768 /* Invalid */
2769 goto invalid_opc;
2771 break;
2773 #else
2774 goto invalid_opc;
2775 #endif
2776 case 0x20:
2777 /* LDF */
2778 REQUIRE_FEN;
2779 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2780 break;
2781 case 0x21:
2782 /* LDG */
2783 REQUIRE_FEN;
2784 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2785 break;
2786 case 0x22:
2787 /* LDS */
2788 REQUIRE_FEN;
2789 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2790 break;
2791 case 0x23:
2792 /* LDT */
2793 REQUIRE_FEN;
2794 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2795 break;
2796 case 0x24:
2797 /* STF */
2798 REQUIRE_FEN;
2799 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2800 break;
2801 case 0x25:
2802 /* STG */
2803 REQUIRE_FEN;
2804 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2805 break;
2806 case 0x26:
2807 /* STS */
2808 REQUIRE_FEN;
2809 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2810 break;
2811 case 0x27:
2812 /* STT */
2813 REQUIRE_FEN;
2814 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2815 break;
2816 case 0x28:
2817 /* LDL */
2818 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2819 break;
2820 case 0x29:
2821 /* LDQ */
2822 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2823 break;
2824 case 0x2A:
2825 /* LDL_L */
2826 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2827 break;
2828 case 0x2B:
2829 /* LDQ_L */
2830 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2831 break;
2832 case 0x2C:
2833 /* STL */
2834 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2835 break;
2836 case 0x2D:
2837 /* STQ */
2838 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2839 break;
2840 case 0x2E:
2841 /* STL_C */
2842 ret = gen_store_conditional(ctx, ra, rb, disp16,
2843 ctx->mem_idx, MO_LESL);
2844 break;
2845 case 0x2F:
2846 /* STQ_C */
2847 ret = gen_store_conditional(ctx, ra, rb, disp16,
2848 ctx->mem_idx, MO_LEQ);
2849 break;
2850 case 0x30:
2851 /* BR */
2852 ret = gen_bdirect(ctx, ra, disp21);
2853 break;
2854 case 0x31: /* FBEQ */
2855 REQUIRE_FEN;
2856 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2857 break;
2858 case 0x32: /* FBLT */
2859 REQUIRE_FEN;
2860 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2861 break;
2862 case 0x33: /* FBLE */
2863 REQUIRE_FEN;
2864 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2865 break;
2866 case 0x34:
2867 /* BSR */
2868 ret = gen_bdirect(ctx, ra, disp21);
2869 break;
2870 case 0x35: /* FBNE */
2871 REQUIRE_FEN;
2872 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2873 break;
2874 case 0x36: /* FBGE */
2875 REQUIRE_FEN;
2876 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2877 break;
2878 case 0x37: /* FBGT */
2879 REQUIRE_FEN;
2880 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2881 break;
2882 case 0x38:
2883 /* BLBC */
2884 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2885 break;
2886 case 0x39:
2887 /* BEQ */
2888 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2889 break;
2890 case 0x3A:
2891 /* BLT */
2892 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2893 break;
2894 case 0x3B:
2895 /* BLE */
2896 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2897 break;
2898 case 0x3C:
2899 /* BLBS */
2900 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2901 break;
2902 case 0x3D:
2903 /* BNE */
2904 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2905 break;
2906 case 0x3E:
2907 /* BGE */
2908 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2909 break;
2910 case 0x3F:
2911 /* BGT */
2912 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2913 break;
2914 invalid_opc:
2915 ret = gen_invalid(ctx);
2916 break;
2917 raise_fen:
2918 ret = gen_excp(ctx, EXCP_FEN, 0);
2919 break;
2922 return ret;
2925 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2927 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2928 CPUAlphaState *env = cpu->env_ptr;
2929 int64_t bound;
2931 ctx->tbflags = ctx->base.tb->flags;
2932 ctx->mem_idx = cpu_mmu_index(env, false);
2933 ctx->implver = env->implver;
2934 ctx->amask = env->amask;
2936 #ifdef CONFIG_USER_ONLY
2937 ctx->ir = cpu_std_ir;
2938 #else
2939 ctx->palbr = env->palbr;
2940 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2941 #endif
2943 /* ??? Every TB begins with unset rounding mode, to be initialized on
2944 the first fp insn of the TB. Alternately we could define a proper
2945 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2946 to reset the FP_STATUS to that default at the end of any TB that
2947 changes the default. We could even (gasp) dynamiclly figure out
2948 what default would be most efficient given the running program. */
2949 ctx->tb_rm = -1;
2950 /* Similarly for flush-to-zero. */
2951 ctx->tb_ftz = -1;
2953 ctx->zero = NULL;
2954 ctx->sink = NULL;
2956 /* Bound the number of insns to execute to those left on the page. */
2957 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
2958 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2961 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2965 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2967 tcg_gen_insn_start(dcbase->pc_next);
2970 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2972 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2973 CPUAlphaState *env = cpu->env_ptr;
2974 uint32_t insn = translator_ldl(env, ctx->base.pc_next);
2976 ctx->base.pc_next += 4;
2977 ctx->base.is_jmp = translate_one(ctx, insn);
2979 free_context_temps(ctx);
2980 translator_loop_temp_check(&ctx->base);
2983 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
2985 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2987 switch (ctx->base.is_jmp) {
2988 case DISAS_NORETURN:
2989 break;
2990 case DISAS_TOO_MANY:
2991 if (use_goto_tb(ctx, ctx->base.pc_next)) {
2992 tcg_gen_goto_tb(0);
2993 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
2994 tcg_gen_exit_tb(ctx->base.tb, 0);
2996 /* FALLTHRU */
2997 case DISAS_PC_STALE:
2998 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
2999 /* FALLTHRU */
3000 case DISAS_PC_UPDATED:
3001 if (!ctx->base.singlestep_enabled) {
3002 tcg_gen_lookup_and_goto_ptr();
3003 break;
3005 /* FALLTHRU */
3006 case DISAS_PC_UPDATED_NOCHAIN:
3007 if (ctx->base.singlestep_enabled) {
3008 gen_excp_1(EXCP_DEBUG, 0);
3009 } else {
3010 tcg_gen_exit_tb(NULL, 0);
3012 break;
3013 default:
3014 g_assert_not_reached();
3018 static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
3020 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
3021 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
3024 static const TranslatorOps alpha_tr_ops = {
3025 .init_disas_context = alpha_tr_init_disas_context,
3026 .tb_start = alpha_tr_tb_start,
3027 .insn_start = alpha_tr_insn_start,
3028 .translate_insn = alpha_tr_translate_insn,
3029 .tb_stop = alpha_tr_tb_stop,
3030 .disas_log = alpha_tr_disas_log,
3033 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
3035 DisasContext dc;
3036 translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
3039 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3040 target_ulong *data)
3042 env->pc = data[0];