target/alpha: Remove in_superpage
[qemu/ar7.git] / target / alpha / translate.c
blobbb7b5ce9942546a66cd3c466f8e5928826b3eead
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "sysemu/cpu-timers.h"
24 #include "disas/disas.h"
25 #include "qemu/host-utils.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "exec/translator.h"
32 #include "exec/log.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 DisasContextBase base;
48 #ifndef CONFIG_USER_ONLY
49 uint64_t palbr;
50 #endif
51 uint32_t tbflags;
52 int mem_idx;
54 /* implver and amask values for this CPU. */
55 int implver;
56 int amask;
58 /* Current rounding mode for this TB. */
59 int tb_rm;
60 /* Current flush-to-zero setting for this TB. */
61 int tb_ftz;
63 /* The set of registers active in the current context. */
64 TCGv *ir;
66 /* Temporaries for $31 and $f31 as source and destination. */
67 TCGv zero;
68 TCGv sink;
69 /* Temporary for immediate constants. */
70 TCGv lit;
73 /* Target-specific return values from translate_one, indicating the
74 state of the TB. Note that DISAS_NEXT indicates that we are not
75 exiting the TB. */
76 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
77 #define DISAS_PC_UPDATED DISAS_TARGET_1
78 #define DISAS_PC_STALE DISAS_TARGET_2
80 /* global register indexes */
81 static TCGv cpu_std_ir[31];
82 static TCGv cpu_fir[31];
83 static TCGv cpu_pc;
84 static TCGv cpu_lock_addr;
85 static TCGv cpu_lock_value;
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_pal_ir[31];
89 #endif
91 #include "exec/gen-icount.h"
93 void alpha_translate_init(void)
95 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
97 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
98 static const GlobalVar vars[] = {
99 DEF_VAR(pc),
100 DEF_VAR(lock_addr),
101 DEF_VAR(lock_value),
104 #undef DEF_VAR
106 /* Use the symbolic register names that match the disassembler. */
107 static const char greg_names[31][4] = {
108 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
109 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
110 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
111 "t10", "t11", "ra", "t12", "at", "gp", "sp"
113 static const char freg_names[31][4] = {
114 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
115 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
116 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
117 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
119 #ifndef CONFIG_USER_ONLY
120 static const char shadow_names[8][8] = {
121 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
122 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
124 #endif
126 int i;
128 for (i = 0; i < 31; i++) {
129 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
130 offsetof(CPUAlphaState, ir[i]),
131 greg_names[i]);
134 for (i = 0; i < 31; i++) {
135 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
136 offsetof(CPUAlphaState, fir[i]),
137 freg_names[i]);
140 #ifndef CONFIG_USER_ONLY
141 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
142 for (i = 0; i < 8; i++) {
143 int r = (i == 7 ? 25 : i + 8);
144 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
145 offsetof(CPUAlphaState,
146 shadow[i]),
147 shadow_names[i]);
149 #endif
151 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
152 const GlobalVar *v = &vars[i];
153 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
157 static TCGv load_zero(DisasContext *ctx)
159 if (!ctx->zero) {
160 ctx->zero = tcg_const_i64(0);
162 return ctx->zero;
165 static TCGv dest_sink(DisasContext *ctx)
167 if (!ctx->sink) {
168 ctx->sink = tcg_temp_new();
170 return ctx->sink;
173 static void free_context_temps(DisasContext *ctx)
175 if (ctx->sink) {
176 tcg_gen_discard_i64(ctx->sink);
177 tcg_temp_free(ctx->sink);
178 ctx->sink = NULL;
180 if (ctx->zero) {
181 tcg_temp_free(ctx->zero);
182 ctx->zero = NULL;
184 if (ctx->lit) {
185 tcg_temp_free(ctx->lit);
186 ctx->lit = NULL;
190 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
192 if (likely(reg < 31)) {
193 return ctx->ir[reg];
194 } else {
195 return load_zero(ctx);
199 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
200 uint8_t lit, bool islit)
202 if (islit) {
203 ctx->lit = tcg_const_i64(lit);
204 return ctx->lit;
205 } else if (likely(reg < 31)) {
206 return ctx->ir[reg];
207 } else {
208 return load_zero(ctx);
212 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
214 if (likely(reg < 31)) {
215 return ctx->ir[reg];
216 } else {
217 return dest_sink(ctx);
221 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
223 if (likely(reg < 31)) {
224 return cpu_fir[reg];
225 } else {
226 return load_zero(ctx);
230 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
232 if (likely(reg < 31)) {
233 return cpu_fir[reg];
234 } else {
235 return dest_sink(ctx);
239 static int get_flag_ofs(unsigned shift)
241 int ofs = offsetof(CPUAlphaState, flags);
242 #ifdef HOST_WORDS_BIGENDIAN
243 ofs += 3 - (shift / 8);
244 #else
245 ofs += shift / 8;
246 #endif
247 return ofs;
250 static void ld_flag_byte(TCGv val, unsigned shift)
252 tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
255 static void st_flag_byte(TCGv val, unsigned shift)
257 tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
260 static void gen_excp_1(int exception, int error_code)
262 TCGv_i32 tmp1, tmp2;
264 tmp1 = tcg_const_i32(exception);
265 tmp2 = tcg_const_i32(error_code);
266 gen_helper_excp(cpu_env, tmp1, tmp2);
267 tcg_temp_free_i32(tmp2);
268 tcg_temp_free_i32(tmp1);
271 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
273 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
274 gen_excp_1(exception, error_code);
275 return DISAS_NORETURN;
278 static inline DisasJumpType gen_invalid(DisasContext *ctx)
280 return gen_excp(ctx, EXCP_OPCDEC, 0);
283 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
285 TCGv_i32 tmp32 = tcg_temp_new_i32();
286 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
287 gen_helper_memory_to_f(t0, tmp32);
288 tcg_temp_free_i32(tmp32);
291 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
293 TCGv tmp = tcg_temp_new();
294 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
295 gen_helper_memory_to_g(t0, tmp);
296 tcg_temp_free(tmp);
299 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
301 TCGv_i32 tmp32 = tcg_temp_new_i32();
302 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
303 gen_helper_memory_to_s(t0, tmp32);
304 tcg_temp_free_i32(tmp32);
307 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
309 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
310 tcg_gen_mov_i64(cpu_lock_addr, t1);
311 tcg_gen_mov_i64(cpu_lock_value, t0);
314 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
316 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
317 tcg_gen_mov_i64(cpu_lock_addr, t1);
318 tcg_gen_mov_i64(cpu_lock_value, t0);
321 static inline void gen_load_mem(DisasContext *ctx,
322 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
323 int flags),
324 int ra, int rb, int32_t disp16, bool fp,
325 bool clear)
327 TCGv tmp, addr, va;
329 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
330 prefetches, which we can treat as nops. No worries about
331 missed exceptions here. */
332 if (unlikely(ra == 31)) {
333 return;
336 tmp = tcg_temp_new();
337 addr = load_gpr(ctx, rb);
339 if (disp16) {
340 tcg_gen_addi_i64(tmp, addr, disp16);
341 addr = tmp;
343 if (clear) {
344 tcg_gen_andi_i64(tmp, addr, ~0x7);
345 addr = tmp;
348 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
349 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
351 tcg_temp_free(tmp);
354 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
356 TCGv_i32 tmp32 = tcg_temp_new_i32();
357 gen_helper_f_to_memory(tmp32, t0);
358 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
359 tcg_temp_free_i32(tmp32);
362 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
364 TCGv tmp = tcg_temp_new();
365 gen_helper_g_to_memory(tmp, t0);
366 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
367 tcg_temp_free(tmp);
370 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
372 TCGv_i32 tmp32 = tcg_temp_new_i32();
373 gen_helper_s_to_memory(tmp32, t0);
374 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
375 tcg_temp_free_i32(tmp32);
378 static inline void gen_store_mem(DisasContext *ctx,
379 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
380 int flags),
381 int ra, int rb, int32_t disp16, bool fp,
382 bool clear)
384 TCGv tmp, addr, va;
386 tmp = tcg_temp_new();
387 addr = load_gpr(ctx, rb);
389 if (disp16) {
390 tcg_gen_addi_i64(tmp, addr, disp16);
391 addr = tmp;
393 if (clear) {
394 tcg_gen_andi_i64(tmp, addr, ~0x7);
395 addr = tmp;
398 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
399 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
401 tcg_temp_free(tmp);
404 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
405 int32_t disp16, int mem_idx,
406 MemOp op)
408 TCGLabel *lab_fail, *lab_done;
409 TCGv addr, val;
411 addr = tcg_temp_new_i64();
412 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
413 free_context_temps(ctx);
415 lab_fail = gen_new_label();
416 lab_done = gen_new_label();
417 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
418 tcg_temp_free_i64(addr);
420 val = tcg_temp_new_i64();
421 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
422 load_gpr(ctx, ra), mem_idx, op);
423 free_context_temps(ctx);
425 if (ra != 31) {
426 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
428 tcg_temp_free_i64(val);
429 tcg_gen_br(lab_done);
431 gen_set_label(lab_fail);
432 if (ra != 31) {
433 tcg_gen_movi_i64(ctx->ir[ra], 0);
436 gen_set_label(lab_done);
437 tcg_gen_movi_i64(cpu_lock_addr, -1);
438 return DISAS_NEXT;
441 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
443 #ifndef CONFIG_USER_ONLY
444 /* Check for the dest on the same page as the start of the TB. */
445 return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
446 #else
447 return true;
448 #endif
451 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
453 uint64_t dest = ctx->base.pc_next + (disp << 2);
455 if (ra != 31) {
456 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
459 /* Notice branch-to-next; used to initialize RA with the PC. */
460 if (disp == 0) {
461 return 0;
462 } else if (use_goto_tb(ctx, dest)) {
463 tcg_gen_goto_tb(0);
464 tcg_gen_movi_i64(cpu_pc, dest);
465 tcg_gen_exit_tb(ctx->base.tb, 0);
466 return DISAS_NORETURN;
467 } else {
468 tcg_gen_movi_i64(cpu_pc, dest);
469 return DISAS_PC_UPDATED;
473 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
474 TCGv cmp, int32_t disp)
476 uint64_t dest = ctx->base.pc_next + (disp << 2);
477 TCGLabel *lab_true = gen_new_label();
479 if (use_goto_tb(ctx, dest)) {
480 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
482 tcg_gen_goto_tb(0);
483 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
484 tcg_gen_exit_tb(ctx->base.tb, 0);
486 gen_set_label(lab_true);
487 tcg_gen_goto_tb(1);
488 tcg_gen_movi_i64(cpu_pc, dest);
489 tcg_gen_exit_tb(ctx->base.tb, 1);
491 return DISAS_NORETURN;
492 } else {
493 TCGv_i64 z = tcg_const_i64(0);
494 TCGv_i64 d = tcg_const_i64(dest);
495 TCGv_i64 p = tcg_const_i64(ctx->base.pc_next);
497 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
499 tcg_temp_free_i64(z);
500 tcg_temp_free_i64(d);
501 tcg_temp_free_i64(p);
502 return DISAS_PC_UPDATED;
506 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
507 int32_t disp, int mask)
509 if (mask) {
510 TCGv tmp = tcg_temp_new();
511 DisasJumpType ret;
513 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
514 ret = gen_bcond_internal(ctx, cond, tmp, disp);
515 tcg_temp_free(tmp);
516 return ret;
518 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
521 /* Fold -0.0 for comparison with COND. */
523 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
525 uint64_t mzero = 1ull << 63;
527 switch (cond) {
528 case TCG_COND_LE:
529 case TCG_COND_GT:
530 /* For <= or >, the -0.0 value directly compares the way we want. */
531 tcg_gen_mov_i64(dest, src);
532 break;
534 case TCG_COND_EQ:
535 case TCG_COND_NE:
536 /* For == or !=, we can simply mask off the sign bit and compare. */
537 tcg_gen_andi_i64(dest, src, mzero - 1);
538 break;
540 case TCG_COND_GE:
541 case TCG_COND_LT:
542 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
543 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
544 tcg_gen_neg_i64(dest, dest);
545 tcg_gen_and_i64(dest, dest, src);
546 break;
548 default:
549 abort();
553 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
554 int32_t disp)
556 TCGv cmp_tmp = tcg_temp_new();
557 DisasJumpType ret;
559 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
560 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
561 tcg_temp_free(cmp_tmp);
562 return ret;
565 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
567 TCGv_i64 va, vb, z;
569 z = load_zero(ctx);
570 vb = load_fpr(ctx, rb);
571 va = tcg_temp_new();
572 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
574 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
576 tcg_temp_free(va);
579 #define QUAL_RM_N 0x080 /* Round mode nearest even */
580 #define QUAL_RM_C 0x000 /* Round mode chopped */
581 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
582 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
583 #define QUAL_RM_MASK 0x0c0
585 #define QUAL_U 0x100 /* Underflow enable (fp output) */
586 #define QUAL_V 0x100 /* Overflow enable (int output) */
587 #define QUAL_S 0x400 /* Software completion enable */
588 #define QUAL_I 0x200 /* Inexact detection enable */
590 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
592 TCGv_i32 tmp;
594 fn11 &= QUAL_RM_MASK;
595 if (fn11 == ctx->tb_rm) {
596 return;
598 ctx->tb_rm = fn11;
600 tmp = tcg_temp_new_i32();
601 switch (fn11) {
602 case QUAL_RM_N:
603 tcg_gen_movi_i32(tmp, float_round_nearest_even);
604 break;
605 case QUAL_RM_C:
606 tcg_gen_movi_i32(tmp, float_round_to_zero);
607 break;
608 case QUAL_RM_M:
609 tcg_gen_movi_i32(tmp, float_round_down);
610 break;
611 case QUAL_RM_D:
612 tcg_gen_ld8u_i32(tmp, cpu_env,
613 offsetof(CPUAlphaState, fpcr_dyn_round));
614 break;
617 #if defined(CONFIG_SOFTFLOAT_INLINE)
618 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
619 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
620 sets the one field. */
621 tcg_gen_st8_i32(tmp, cpu_env,
622 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
623 #else
624 gen_helper_setroundmode(tmp);
625 #endif
627 tcg_temp_free_i32(tmp);
630 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
632 TCGv_i32 tmp;
634 fn11 &= QUAL_U;
635 if (fn11 == ctx->tb_ftz) {
636 return;
638 ctx->tb_ftz = fn11;
640 tmp = tcg_temp_new_i32();
641 if (fn11) {
642 /* Underflow is enabled, use the FPCR setting. */
643 tcg_gen_ld8u_i32(tmp, cpu_env,
644 offsetof(CPUAlphaState, fpcr_flush_to_zero));
645 } else {
646 /* Underflow is disabled, force flush-to-zero. */
647 tcg_gen_movi_i32(tmp, 1);
650 #if defined(CONFIG_SOFTFLOAT_INLINE)
651 tcg_gen_st8_i32(tmp, cpu_env,
652 offsetof(CPUAlphaState, fp_status.flush_to_zero));
653 #else
654 gen_helper_setflushzero(tmp);
655 #endif
657 tcg_temp_free_i32(tmp);
660 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
662 TCGv val;
664 if (unlikely(reg == 31)) {
665 val = load_zero(ctx);
666 } else {
667 val = cpu_fir[reg];
668 if ((fn11 & QUAL_S) == 0) {
669 if (is_cmp) {
670 gen_helper_ieee_input_cmp(cpu_env, val);
671 } else {
672 gen_helper_ieee_input(cpu_env, val);
674 } else {
675 #ifndef CONFIG_USER_ONLY
676 /* In system mode, raise exceptions for denormals like real
677 hardware. In user mode, proceed as if the OS completion
678 handler is handling the denormal as per spec. */
679 gen_helper_ieee_input_s(cpu_env, val);
680 #endif
683 return val;
686 static void gen_fp_exc_raise(int rc, int fn11)
688 /* ??? We ought to be able to do something with imprecise exceptions.
689 E.g. notice we're still in the trap shadow of something within the
690 TB and do not generate the code to signal the exception; end the TB
691 when an exception is forced to arrive, either by consumption of a
692 register value or TRAPB or EXCB. */
693 TCGv_i32 reg, ign;
694 uint32_t ignore = 0;
696 if (!(fn11 & QUAL_U)) {
697 /* Note that QUAL_U == QUAL_V, so ignore either. */
698 ignore |= FPCR_UNF | FPCR_IOV;
700 if (!(fn11 & QUAL_I)) {
701 ignore |= FPCR_INE;
703 ign = tcg_const_i32(ignore);
705 /* ??? Pass in the regno of the destination so that the helper can
706 set EXC_MASK, which contains a bitmask of destination registers
707 that have caused arithmetic traps. A simple userspace emulation
708 does not require this. We do need it for a guest kernel's entArith,
709 or if we were to do something clever with imprecise exceptions. */
710 reg = tcg_const_i32(rc + 32);
711 if (fn11 & QUAL_S) {
712 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
713 } else {
714 gen_helper_fp_exc_raise(cpu_env, ign, reg);
717 tcg_temp_free_i32(reg);
718 tcg_temp_free_i32(ign);
721 static void gen_cvtlq(TCGv vc, TCGv vb)
723 TCGv tmp = tcg_temp_new();
725 /* The arithmetic right shift here, plus the sign-extended mask below
726 yields a sign-extended result without an explicit ext32s_i64. */
727 tcg_gen_shri_i64(tmp, vb, 29);
728 tcg_gen_sari_i64(vc, vb, 32);
729 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
731 tcg_temp_free(tmp);
734 static void gen_ieee_arith2(DisasContext *ctx,
735 void (*helper)(TCGv, TCGv_ptr, TCGv),
736 int rb, int rc, int fn11)
738 TCGv vb;
740 gen_qual_roundmode(ctx, fn11);
741 gen_qual_flushzero(ctx, fn11);
743 vb = gen_ieee_input(ctx, rb, fn11, 0);
744 helper(dest_fpr(ctx, rc), cpu_env, vb);
746 gen_fp_exc_raise(rc, fn11);
749 #define IEEE_ARITH2(name) \
750 static inline void glue(gen_, name)(DisasContext *ctx, \
751 int rb, int rc, int fn11) \
753 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
755 IEEE_ARITH2(sqrts)
756 IEEE_ARITH2(sqrtt)
757 IEEE_ARITH2(cvtst)
758 IEEE_ARITH2(cvtts)
760 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
762 TCGv vb, vc;
764 /* No need to set flushzero, since we have an integer output. */
765 vb = gen_ieee_input(ctx, rb, fn11, 0);
766 vc = dest_fpr(ctx, rc);
768 /* Almost all integer conversions use cropped rounding;
769 special case that. */
770 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
771 gen_helper_cvttq_c(vc, cpu_env, vb);
772 } else {
773 gen_qual_roundmode(ctx, fn11);
774 gen_helper_cvttq(vc, cpu_env, vb);
776 gen_fp_exc_raise(rc, fn11);
779 static void gen_ieee_intcvt(DisasContext *ctx,
780 void (*helper)(TCGv, TCGv_ptr, TCGv),
781 int rb, int rc, int fn11)
783 TCGv vb, vc;
785 gen_qual_roundmode(ctx, fn11);
786 vb = load_fpr(ctx, rb);
787 vc = dest_fpr(ctx, rc);
789 /* The only exception that can be raised by integer conversion
790 is inexact. Thus we only need to worry about exceptions when
791 inexact handling is requested. */
792 if (fn11 & QUAL_I) {
793 helper(vc, cpu_env, vb);
794 gen_fp_exc_raise(rc, fn11);
795 } else {
796 helper(vc, cpu_env, vb);
800 #define IEEE_INTCVT(name) \
801 static inline void glue(gen_, name)(DisasContext *ctx, \
802 int rb, int rc, int fn11) \
804 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
806 IEEE_INTCVT(cvtqs)
807 IEEE_INTCVT(cvtqt)
809 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
811 TCGv vmask = tcg_const_i64(mask);
812 TCGv tmp = tcg_temp_new_i64();
814 if (inv_a) {
815 tcg_gen_andc_i64(tmp, vmask, va);
816 } else {
817 tcg_gen_and_i64(tmp, va, vmask);
820 tcg_gen_andc_i64(vc, vb, vmask);
821 tcg_gen_or_i64(vc, vc, tmp);
823 tcg_temp_free(vmask);
824 tcg_temp_free(tmp);
827 static void gen_ieee_arith3(DisasContext *ctx,
828 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
829 int ra, int rb, int rc, int fn11)
831 TCGv va, vb, vc;
833 gen_qual_roundmode(ctx, fn11);
834 gen_qual_flushzero(ctx, fn11);
836 va = gen_ieee_input(ctx, ra, fn11, 0);
837 vb = gen_ieee_input(ctx, rb, fn11, 0);
838 vc = dest_fpr(ctx, rc);
839 helper(vc, cpu_env, va, vb);
841 gen_fp_exc_raise(rc, fn11);
844 #define IEEE_ARITH3(name) \
845 static inline void glue(gen_, name)(DisasContext *ctx, \
846 int ra, int rb, int rc, int fn11) \
848 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
850 IEEE_ARITH3(adds)
851 IEEE_ARITH3(subs)
852 IEEE_ARITH3(muls)
853 IEEE_ARITH3(divs)
854 IEEE_ARITH3(addt)
855 IEEE_ARITH3(subt)
856 IEEE_ARITH3(mult)
857 IEEE_ARITH3(divt)
859 static void gen_ieee_compare(DisasContext *ctx,
860 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
861 int ra, int rb, int rc, int fn11)
863 TCGv va, vb, vc;
865 va = gen_ieee_input(ctx, ra, fn11, 1);
866 vb = gen_ieee_input(ctx, rb, fn11, 1);
867 vc = dest_fpr(ctx, rc);
868 helper(vc, cpu_env, va, vb);
870 gen_fp_exc_raise(rc, fn11);
873 #define IEEE_CMP3(name) \
874 static inline void glue(gen_, name)(DisasContext *ctx, \
875 int ra, int rb, int rc, int fn11) \
877 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
879 IEEE_CMP3(cmptun)
880 IEEE_CMP3(cmpteq)
881 IEEE_CMP3(cmptlt)
882 IEEE_CMP3(cmptle)
884 static inline uint64_t zapnot_mask(uint8_t lit)
886 uint64_t mask = 0;
887 int i;
889 for (i = 0; i < 8; ++i) {
890 if ((lit >> i) & 1) {
891 mask |= 0xffull << (i * 8);
894 return mask;
897 /* Implement zapnot with an immediate operand, which expands to some
898 form of immediate AND. This is a basic building block in the
899 definition of many of the other byte manipulation instructions. */
900 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
902 switch (lit) {
903 case 0x00:
904 tcg_gen_movi_i64(dest, 0);
905 break;
906 case 0x01:
907 tcg_gen_ext8u_i64(dest, src);
908 break;
909 case 0x03:
910 tcg_gen_ext16u_i64(dest, src);
911 break;
912 case 0x0f:
913 tcg_gen_ext32u_i64(dest, src);
914 break;
915 case 0xff:
916 tcg_gen_mov_i64(dest, src);
917 break;
918 default:
919 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
920 break;
924 /* EXTWH, EXTLH, EXTQH */
925 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
926 uint8_t lit, uint8_t byte_mask)
928 if (islit) {
929 int pos = (64 - lit * 8) & 0x3f;
930 int len = cto32(byte_mask) * 8;
931 if (pos < len) {
932 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
933 } else {
934 tcg_gen_movi_i64(vc, 0);
936 } else {
937 TCGv tmp = tcg_temp_new();
938 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
939 tcg_gen_neg_i64(tmp, tmp);
940 tcg_gen_andi_i64(tmp, tmp, 0x3f);
941 tcg_gen_shl_i64(vc, va, tmp);
942 tcg_temp_free(tmp);
944 gen_zapnoti(vc, vc, byte_mask);
947 /* EXTBL, EXTWL, EXTLL, EXTQL */
948 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
949 uint8_t lit, uint8_t byte_mask)
951 if (islit) {
952 int pos = (lit & 7) * 8;
953 int len = cto32(byte_mask) * 8;
954 if (pos + len >= 64) {
955 len = 64 - pos;
957 tcg_gen_extract_i64(vc, va, pos, len);
958 } else {
959 TCGv tmp = tcg_temp_new();
960 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
961 tcg_gen_shli_i64(tmp, tmp, 3);
962 tcg_gen_shr_i64(vc, va, tmp);
963 tcg_temp_free(tmp);
964 gen_zapnoti(vc, vc, byte_mask);
968 /* INSWH, INSLH, INSQH */
969 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
970 uint8_t lit, uint8_t byte_mask)
972 if (islit) {
973 int pos = 64 - (lit & 7) * 8;
974 int len = cto32(byte_mask) * 8;
975 if (pos < len) {
976 tcg_gen_extract_i64(vc, va, pos, len - pos);
977 } else {
978 tcg_gen_movi_i64(vc, 0);
980 } else {
981 TCGv tmp = tcg_temp_new();
982 TCGv shift = tcg_temp_new();
984 /* The instruction description has us left-shift the byte mask
985 and extract bits <15:8> and apply that zap at the end. This
986 is equivalent to simply performing the zap first and shifting
987 afterward. */
988 gen_zapnoti(tmp, va, byte_mask);
990 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
991 portably by splitting the shift into two parts: shift_count-1 and 1.
992 Arrange for the -1 by using ones-complement instead of
993 twos-complement in the negation: ~(B * 8) & 63. */
995 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
996 tcg_gen_not_i64(shift, shift);
997 tcg_gen_andi_i64(shift, shift, 0x3f);
999 tcg_gen_shr_i64(vc, tmp, shift);
1000 tcg_gen_shri_i64(vc, vc, 1);
1001 tcg_temp_free(shift);
1002 tcg_temp_free(tmp);
1006 /* INSBL, INSWL, INSLL, INSQL */
1007 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1008 uint8_t lit, uint8_t byte_mask)
1010 if (islit) {
1011 int pos = (lit & 7) * 8;
1012 int len = cto32(byte_mask) * 8;
1013 if (pos + len > 64) {
1014 len = 64 - pos;
1016 tcg_gen_deposit_z_i64(vc, va, pos, len);
1017 } else {
1018 TCGv tmp = tcg_temp_new();
1019 TCGv shift = tcg_temp_new();
1021 /* The instruction description has us left-shift the byte mask
1022 and extract bits <15:8> and apply that zap at the end. This
1023 is equivalent to simply performing the zap first and shifting
1024 afterward. */
1025 gen_zapnoti(tmp, va, byte_mask);
1027 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1028 tcg_gen_shli_i64(shift, shift, 3);
1029 tcg_gen_shl_i64(vc, tmp, shift);
1030 tcg_temp_free(shift);
1031 tcg_temp_free(tmp);
1035 /* MSKWH, MSKLH, MSKQH */
1036 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1037 uint8_t lit, uint8_t byte_mask)
1039 if (islit) {
1040 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1041 } else {
1042 TCGv shift = tcg_temp_new();
1043 TCGv mask = tcg_temp_new();
1045 /* The instruction description is as above, where the byte_mask
1046 is shifted left, and then we extract bits <15:8>. This can be
1047 emulated with a right-shift on the expanded byte mask. This
1048 requires extra care because for an input <2:0> == 0 we need a
1049 shift of 64 bits in order to generate a zero. This is done by
1050 splitting the shift into two parts, the variable shift - 1
1051 followed by a constant 1 shift. The code we expand below is
1052 equivalent to ~(B * 8) & 63. */
1054 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1055 tcg_gen_not_i64(shift, shift);
1056 tcg_gen_andi_i64(shift, shift, 0x3f);
1057 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1058 tcg_gen_shr_i64(mask, mask, shift);
1059 tcg_gen_shri_i64(mask, mask, 1);
1061 tcg_gen_andc_i64(vc, va, mask);
1063 tcg_temp_free(mask);
1064 tcg_temp_free(shift);
1068 /* MSKBL, MSKWL, MSKLL, MSKQL */
1069 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1070 uint8_t lit, uint8_t byte_mask)
1072 if (islit) {
1073 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1074 } else {
1075 TCGv shift = tcg_temp_new();
1076 TCGv mask = tcg_temp_new();
1078 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1079 tcg_gen_shli_i64(shift, shift, 3);
1080 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1081 tcg_gen_shl_i64(mask, mask, shift);
1083 tcg_gen_andc_i64(vc, va, mask);
1085 tcg_temp_free(mask);
1086 tcg_temp_free(shift);
1090 static void gen_rx(DisasContext *ctx, int ra, int set)
1092 TCGv tmp;
1094 if (ra != 31) {
1095 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1098 tmp = tcg_const_i64(set);
1099 st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1100 tcg_temp_free(tmp);
1103 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1105 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1106 to internal cpu registers. */
1108 /* Unprivileged PAL call */
1109 if (palcode >= 0x80 && palcode < 0xC0) {
1110 switch (palcode) {
1111 case 0x86:
1112 /* IMB */
1113 /* No-op inside QEMU. */
1114 break;
1115 case 0x9E:
1116 /* RDUNIQUE */
1117 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1118 offsetof(CPUAlphaState, unique));
1119 break;
1120 case 0x9F:
1121 /* WRUNIQUE */
1122 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1123 offsetof(CPUAlphaState, unique));
1124 break;
1125 default:
1126 palcode &= 0xbf;
1127 goto do_call_pal;
1129 return DISAS_NEXT;
1132 #ifndef CONFIG_USER_ONLY
1133 /* Privileged PAL code */
1134 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1135 switch (palcode) {
1136 case 0x01:
1137 /* CFLUSH */
1138 /* No-op inside QEMU. */
1139 break;
1140 case 0x02:
1141 /* DRAINA */
1142 /* No-op inside QEMU. */
1143 break;
1144 case 0x2D:
1145 /* WRVPTPTR */
1146 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1147 offsetof(CPUAlphaState, vptptr));
1148 break;
1149 case 0x31:
1150 /* WRVAL */
1151 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1152 offsetof(CPUAlphaState, sysval));
1153 break;
1154 case 0x32:
1155 /* RDVAL */
1156 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1157 offsetof(CPUAlphaState, sysval));
1158 break;
1160 case 0x35:
1161 /* SWPIPL */
1162 /* Note that we already know we're in kernel mode, so we know
1163 that PS only contains the 3 IPL bits. */
1164 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1166 /* But make sure and store only the 3 IPL bits from the user. */
1168 TCGv tmp = tcg_temp_new();
1169 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1170 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1171 tcg_temp_free(tmp);
1174 /* Allow interrupts to be recognized right away. */
1175 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1176 return DISAS_PC_UPDATED_NOCHAIN;
1178 case 0x36:
1179 /* RDPS */
1180 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1181 break;
1183 case 0x38:
1184 /* WRUSP */
1185 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1186 offsetof(CPUAlphaState, usp));
1187 break;
1188 case 0x3A:
1189 /* RDUSP */
1190 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1191 offsetof(CPUAlphaState, usp));
1192 break;
1193 case 0x3C:
1194 /* WHAMI */
1195 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1196 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1197 break;
1199 case 0x3E:
1200 /* WTINT */
1202 TCGv_i32 tmp = tcg_const_i32(1);
1203 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1204 offsetof(CPUState, halted));
1205 tcg_temp_free_i32(tmp);
1207 tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1208 return gen_excp(ctx, EXCP_HALTED, 0);
1210 default:
1211 palcode &= 0x3f;
1212 goto do_call_pal;
1214 return DISAS_NEXT;
1216 #endif
1217 return gen_invalid(ctx);
1219 do_call_pal:
1220 #ifdef CONFIG_USER_ONLY
1221 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1222 #else
1224 TCGv tmp = tcg_temp_new();
1225 uint64_t exc_addr = ctx->base.pc_next;
1226 uint64_t entry = ctx->palbr;
1228 if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1229 exc_addr |= 1;
1230 } else {
1231 tcg_gen_movi_i64(tmp, 1);
1232 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1235 tcg_gen_movi_i64(tmp, exc_addr);
1236 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1237 tcg_temp_free(tmp);
1239 entry += (palcode & 0x80
1240 ? 0x2000 + (palcode - 0x80) * 64
1241 : 0x1000 + palcode * 64);
1243 /* Since the destination is running in PALmode, we don't really
1244 need the page permissions check. We'll see the existence of
1245 the page when we create the TB, and we'll flush all TBs if
1246 we change the PAL base register. */
1247 if (!ctx->base.singlestep_enabled) {
1248 tcg_gen_goto_tb(0);
1249 tcg_gen_movi_i64(cpu_pc, entry);
1250 tcg_gen_exit_tb(ctx->base.tb, 0);
1251 return DISAS_NORETURN;
1252 } else {
1253 tcg_gen_movi_i64(cpu_pc, entry);
1254 return DISAS_PC_UPDATED;
1257 #endif
1260 #ifndef CONFIG_USER_ONLY
1262 #define PR_LONG 0x200000
1264 static int cpu_pr_data(int pr)
1266 switch (pr) {
1267 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1268 case 3: return offsetof(CPUAlphaState, trap_arg0);
1269 case 4: return offsetof(CPUAlphaState, trap_arg1);
1270 case 5: return offsetof(CPUAlphaState, trap_arg2);
1271 case 6: return offsetof(CPUAlphaState, exc_addr);
1272 case 7: return offsetof(CPUAlphaState, palbr);
1273 case 8: return offsetof(CPUAlphaState, ptbr);
1274 case 9: return offsetof(CPUAlphaState, vptptr);
1275 case 10: return offsetof(CPUAlphaState, unique);
1276 case 11: return offsetof(CPUAlphaState, sysval);
1277 case 12: return offsetof(CPUAlphaState, usp);
1279 case 40 ... 63:
1280 return offsetof(CPUAlphaState, scratch[pr - 40]);
1282 case 251:
1283 return offsetof(CPUAlphaState, alarm_expire);
1285 return 0;
1288 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1290 void (*helper)(TCGv);
1291 int data;
1293 switch (regno) {
1294 case 32 ... 39:
1295 /* Accessing the "non-shadow" general registers. */
1296 regno = regno == 39 ? 25 : regno - 32 + 8;
1297 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1298 break;
1300 case 250: /* WALLTIME */
1301 helper = gen_helper_get_walltime;
1302 goto do_helper;
1303 case 249: /* VMTIME */
1304 helper = gen_helper_get_vmtime;
1305 do_helper:
1306 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1307 gen_io_start();
1308 helper(va);
1309 return DISAS_PC_STALE;
1310 } else {
1311 helper(va);
1313 break;
1315 case 0: /* PS */
1316 ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1317 break;
1318 case 1: /* FEN */
1319 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1320 break;
1322 default:
1323 /* The basic registers are data only, and unknown registers
1324 are read-zero, write-ignore. */
1325 data = cpu_pr_data(regno);
1326 if (data == 0) {
1327 tcg_gen_movi_i64(va, 0);
1328 } else if (data & PR_LONG) {
1329 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1330 } else {
1331 tcg_gen_ld_i64(va, cpu_env, data);
1333 break;
1336 return DISAS_NEXT;
1339 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1341 int data;
1342 DisasJumpType ret = DISAS_NEXT;
1344 switch (regno) {
1345 case 255:
1346 /* TBIA */
1347 gen_helper_tbia(cpu_env);
1348 break;
1350 case 254:
1351 /* TBIS */
1352 gen_helper_tbis(cpu_env, vb);
1353 break;
1355 case 253:
1356 /* WAIT */
1358 TCGv_i32 tmp = tcg_const_i32(1);
1359 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1360 offsetof(CPUState, halted));
1361 tcg_temp_free_i32(tmp);
1363 return gen_excp(ctx, EXCP_HALTED, 0);
1365 case 252:
1366 /* HALT */
1367 gen_helper_halt(vb);
1368 return DISAS_PC_STALE;
1370 case 251:
1371 /* ALARM */
1372 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1373 gen_io_start();
1374 ret = DISAS_PC_STALE;
1376 gen_helper_set_alarm(cpu_env, vb);
1377 break;
1379 case 7:
1380 /* PALBR */
1381 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1382 /* Changing the PAL base register implies un-chaining all of the TBs
1383 that ended with a CALL_PAL. Since the base register usually only
1384 changes during boot, flushing everything works well. */
1385 gen_helper_tb_flush(cpu_env);
1386 return DISAS_PC_STALE;
1388 case 32 ... 39:
1389 /* Accessing the "non-shadow" general registers. */
1390 regno = regno == 39 ? 25 : regno - 32 + 8;
1391 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1392 break;
1394 case 0: /* PS */
1395 st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1396 break;
1397 case 1: /* FEN */
1398 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1399 break;
1401 default:
1402 /* The basic registers are data only, and unknown registers
1403 are read-zero, write-ignore. */
1404 data = cpu_pr_data(regno);
1405 if (data != 0) {
1406 if (data & PR_LONG) {
1407 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1408 } else {
1409 tcg_gen_st_i64(vb, cpu_env, data);
1412 break;
1415 return ret;
1417 #endif /* !USER_ONLY*/
1419 #define REQUIRE_NO_LIT \
1420 do { \
1421 if (real_islit) { \
1422 goto invalid_opc; \
1424 } while (0)
1426 #define REQUIRE_AMASK(FLAG) \
1427 do { \
1428 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1429 goto invalid_opc; \
1431 } while (0)
1433 #define REQUIRE_TB_FLAG(FLAG) \
1434 do { \
1435 if ((ctx->tbflags & (FLAG)) == 0) { \
1436 goto invalid_opc; \
1438 } while (0)
1440 #define REQUIRE_REG_31(WHICH) \
1441 do { \
1442 if (WHICH != 31) { \
1443 goto invalid_opc; \
1445 } while (0)
1447 #define REQUIRE_FEN \
1448 do { \
1449 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1450 goto raise_fen; \
1452 } while (0)
1454 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1456 int32_t disp21, disp16, disp12 __attribute__((unused));
1457 uint16_t fn11;
1458 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1459 bool islit, real_islit;
1460 TCGv va, vb, vc, tmp, tmp2;
1461 TCGv_i32 t32;
1462 DisasJumpType ret;
1464 /* Decode all instruction fields */
1465 opc = extract32(insn, 26, 6);
1466 ra = extract32(insn, 21, 5);
1467 rb = extract32(insn, 16, 5);
1468 rc = extract32(insn, 0, 5);
1469 real_islit = islit = extract32(insn, 12, 1);
1470 lit = extract32(insn, 13, 8);
1472 disp21 = sextract32(insn, 0, 21);
1473 disp16 = sextract32(insn, 0, 16);
1474 disp12 = sextract32(insn, 0, 12);
1476 fn11 = extract32(insn, 5, 11);
1477 fpfn = extract32(insn, 5, 6);
1478 fn7 = extract32(insn, 5, 7);
1480 if (rb == 31 && !islit) {
1481 islit = true;
1482 lit = 0;
1485 ret = DISAS_NEXT;
1486 switch (opc) {
1487 case 0x00:
1488 /* CALL_PAL */
1489 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1490 break;
1491 case 0x01:
1492 /* OPC01 */
1493 goto invalid_opc;
1494 case 0x02:
1495 /* OPC02 */
1496 goto invalid_opc;
1497 case 0x03:
1498 /* OPC03 */
1499 goto invalid_opc;
1500 case 0x04:
1501 /* OPC04 */
1502 goto invalid_opc;
1503 case 0x05:
1504 /* OPC05 */
1505 goto invalid_opc;
1506 case 0x06:
1507 /* OPC06 */
1508 goto invalid_opc;
1509 case 0x07:
1510 /* OPC07 */
1511 goto invalid_opc;
1513 case 0x09:
1514 /* LDAH */
1515 disp16 = (uint32_t)disp16 << 16;
1516 /* fall through */
1517 case 0x08:
1518 /* LDA */
1519 va = dest_gpr(ctx, ra);
1520 /* It's worth special-casing immediate loads. */
1521 if (rb == 31) {
1522 tcg_gen_movi_i64(va, disp16);
1523 } else {
1524 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1526 break;
1528 case 0x0A:
1529 /* LDBU */
1530 REQUIRE_AMASK(BWX);
1531 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1532 break;
1533 case 0x0B:
1534 /* LDQ_U */
1535 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1536 break;
1537 case 0x0C:
1538 /* LDWU */
1539 REQUIRE_AMASK(BWX);
1540 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1541 break;
1542 case 0x0D:
1543 /* STW */
1544 REQUIRE_AMASK(BWX);
1545 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1546 break;
1547 case 0x0E:
1548 /* STB */
1549 REQUIRE_AMASK(BWX);
1550 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1551 break;
1552 case 0x0F:
1553 /* STQ_U */
1554 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1555 break;
1557 case 0x10:
1558 vc = dest_gpr(ctx, rc);
1559 vb = load_gpr_lit(ctx, rb, lit, islit);
1561 if (ra == 31) {
1562 if (fn7 == 0x00) {
1563 /* Special case ADDL as SEXTL. */
1564 tcg_gen_ext32s_i64(vc, vb);
1565 break;
1567 if (fn7 == 0x29) {
1568 /* Special case SUBQ as NEGQ. */
1569 tcg_gen_neg_i64(vc, vb);
1570 break;
1574 va = load_gpr(ctx, ra);
1575 switch (fn7) {
1576 case 0x00:
1577 /* ADDL */
1578 tcg_gen_add_i64(vc, va, vb);
1579 tcg_gen_ext32s_i64(vc, vc);
1580 break;
1581 case 0x02:
1582 /* S4ADDL */
1583 tmp = tcg_temp_new();
1584 tcg_gen_shli_i64(tmp, va, 2);
1585 tcg_gen_add_i64(tmp, tmp, vb);
1586 tcg_gen_ext32s_i64(vc, tmp);
1587 tcg_temp_free(tmp);
1588 break;
1589 case 0x09:
1590 /* SUBL */
1591 tcg_gen_sub_i64(vc, va, vb);
1592 tcg_gen_ext32s_i64(vc, vc);
1593 break;
1594 case 0x0B:
1595 /* S4SUBL */
1596 tmp = tcg_temp_new();
1597 tcg_gen_shli_i64(tmp, va, 2);
1598 tcg_gen_sub_i64(tmp, tmp, vb);
1599 tcg_gen_ext32s_i64(vc, tmp);
1600 tcg_temp_free(tmp);
1601 break;
1602 case 0x0F:
1603 /* CMPBGE */
1604 if (ra == 31) {
1605 /* Special case 0 >= X as X == 0. */
1606 gen_helper_cmpbe0(vc, vb);
1607 } else {
1608 gen_helper_cmpbge(vc, va, vb);
1610 break;
1611 case 0x12:
1612 /* S8ADDL */
1613 tmp = tcg_temp_new();
1614 tcg_gen_shli_i64(tmp, va, 3);
1615 tcg_gen_add_i64(tmp, tmp, vb);
1616 tcg_gen_ext32s_i64(vc, tmp);
1617 tcg_temp_free(tmp);
1618 break;
1619 case 0x1B:
1620 /* S8SUBL */
1621 tmp = tcg_temp_new();
1622 tcg_gen_shli_i64(tmp, va, 3);
1623 tcg_gen_sub_i64(tmp, tmp, vb);
1624 tcg_gen_ext32s_i64(vc, tmp);
1625 tcg_temp_free(tmp);
1626 break;
1627 case 0x1D:
1628 /* CMPULT */
1629 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1630 break;
1631 case 0x20:
1632 /* ADDQ */
1633 tcg_gen_add_i64(vc, va, vb);
1634 break;
1635 case 0x22:
1636 /* S4ADDQ */
1637 tmp = tcg_temp_new();
1638 tcg_gen_shli_i64(tmp, va, 2);
1639 tcg_gen_add_i64(vc, tmp, vb);
1640 tcg_temp_free(tmp);
1641 break;
1642 case 0x29:
1643 /* SUBQ */
1644 tcg_gen_sub_i64(vc, va, vb);
1645 break;
1646 case 0x2B:
1647 /* S4SUBQ */
1648 tmp = tcg_temp_new();
1649 tcg_gen_shli_i64(tmp, va, 2);
1650 tcg_gen_sub_i64(vc, tmp, vb);
1651 tcg_temp_free(tmp);
1652 break;
1653 case 0x2D:
1654 /* CMPEQ */
1655 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1656 break;
1657 case 0x32:
1658 /* S8ADDQ */
1659 tmp = tcg_temp_new();
1660 tcg_gen_shli_i64(tmp, va, 3);
1661 tcg_gen_add_i64(vc, tmp, vb);
1662 tcg_temp_free(tmp);
1663 break;
1664 case 0x3B:
1665 /* S8SUBQ */
1666 tmp = tcg_temp_new();
1667 tcg_gen_shli_i64(tmp, va, 3);
1668 tcg_gen_sub_i64(vc, tmp, vb);
1669 tcg_temp_free(tmp);
1670 break;
1671 case 0x3D:
1672 /* CMPULE */
1673 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1674 break;
1675 case 0x40:
1676 /* ADDL/V */
1677 tmp = tcg_temp_new();
1678 tcg_gen_ext32s_i64(tmp, va);
1679 tcg_gen_ext32s_i64(vc, vb);
1680 tcg_gen_add_i64(tmp, tmp, vc);
1681 tcg_gen_ext32s_i64(vc, tmp);
1682 gen_helper_check_overflow(cpu_env, vc, tmp);
1683 tcg_temp_free(tmp);
1684 break;
1685 case 0x49:
1686 /* SUBL/V */
1687 tmp = tcg_temp_new();
1688 tcg_gen_ext32s_i64(tmp, va);
1689 tcg_gen_ext32s_i64(vc, vb);
1690 tcg_gen_sub_i64(tmp, tmp, vc);
1691 tcg_gen_ext32s_i64(vc, tmp);
1692 gen_helper_check_overflow(cpu_env, vc, tmp);
1693 tcg_temp_free(tmp);
1694 break;
1695 case 0x4D:
1696 /* CMPLT */
1697 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1698 break;
1699 case 0x60:
1700 /* ADDQ/V */
1701 tmp = tcg_temp_new();
1702 tmp2 = tcg_temp_new();
1703 tcg_gen_eqv_i64(tmp, va, vb);
1704 tcg_gen_mov_i64(tmp2, va);
1705 tcg_gen_add_i64(vc, va, vb);
1706 tcg_gen_xor_i64(tmp2, tmp2, vc);
1707 tcg_gen_and_i64(tmp, tmp, tmp2);
1708 tcg_gen_shri_i64(tmp, tmp, 63);
1709 tcg_gen_movi_i64(tmp2, 0);
1710 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1711 tcg_temp_free(tmp);
1712 tcg_temp_free(tmp2);
1713 break;
1714 case 0x69:
1715 /* SUBQ/V */
1716 tmp = tcg_temp_new();
1717 tmp2 = tcg_temp_new();
1718 tcg_gen_xor_i64(tmp, va, vb);
1719 tcg_gen_mov_i64(tmp2, va);
1720 tcg_gen_sub_i64(vc, va, vb);
1721 tcg_gen_xor_i64(tmp2, tmp2, vc);
1722 tcg_gen_and_i64(tmp, tmp, tmp2);
1723 tcg_gen_shri_i64(tmp, tmp, 63);
1724 tcg_gen_movi_i64(tmp2, 0);
1725 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1726 tcg_temp_free(tmp);
1727 tcg_temp_free(tmp2);
1728 break;
1729 case 0x6D:
1730 /* CMPLE */
1731 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1732 break;
1733 default:
1734 goto invalid_opc;
1736 break;
1738 case 0x11:
1739 if (fn7 == 0x20) {
1740 if (rc == 31) {
1741 /* Special case BIS as NOP. */
1742 break;
1744 if (ra == 31) {
1745 /* Special case BIS as MOV. */
1746 vc = dest_gpr(ctx, rc);
1747 if (islit) {
1748 tcg_gen_movi_i64(vc, lit);
1749 } else {
1750 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1752 break;
1756 vc = dest_gpr(ctx, rc);
1757 vb = load_gpr_lit(ctx, rb, lit, islit);
1759 if (fn7 == 0x28 && ra == 31) {
1760 /* Special case ORNOT as NOT. */
1761 tcg_gen_not_i64(vc, vb);
1762 break;
1765 va = load_gpr(ctx, ra);
1766 switch (fn7) {
1767 case 0x00:
1768 /* AND */
1769 tcg_gen_and_i64(vc, va, vb);
1770 break;
1771 case 0x08:
1772 /* BIC */
1773 tcg_gen_andc_i64(vc, va, vb);
1774 break;
1775 case 0x14:
1776 /* CMOVLBS */
1777 tmp = tcg_temp_new();
1778 tcg_gen_andi_i64(tmp, va, 1);
1779 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1780 vb, load_gpr(ctx, rc));
1781 tcg_temp_free(tmp);
1782 break;
1783 case 0x16:
1784 /* CMOVLBC */
1785 tmp = tcg_temp_new();
1786 tcg_gen_andi_i64(tmp, va, 1);
1787 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1788 vb, load_gpr(ctx, rc));
1789 tcg_temp_free(tmp);
1790 break;
1791 case 0x20:
1792 /* BIS */
1793 tcg_gen_or_i64(vc, va, vb);
1794 break;
1795 case 0x24:
1796 /* CMOVEQ */
1797 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1798 vb, load_gpr(ctx, rc));
1799 break;
1800 case 0x26:
1801 /* CMOVNE */
1802 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1803 vb, load_gpr(ctx, rc));
1804 break;
1805 case 0x28:
1806 /* ORNOT */
1807 tcg_gen_orc_i64(vc, va, vb);
1808 break;
1809 case 0x40:
1810 /* XOR */
1811 tcg_gen_xor_i64(vc, va, vb);
1812 break;
1813 case 0x44:
1814 /* CMOVLT */
1815 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1816 vb, load_gpr(ctx, rc));
1817 break;
1818 case 0x46:
1819 /* CMOVGE */
1820 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1821 vb, load_gpr(ctx, rc));
1822 break;
1823 case 0x48:
1824 /* EQV */
1825 tcg_gen_eqv_i64(vc, va, vb);
1826 break;
1827 case 0x61:
1828 /* AMASK */
1829 REQUIRE_REG_31(ra);
1830 tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1831 break;
1832 case 0x64:
1833 /* CMOVLE */
1834 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1835 vb, load_gpr(ctx, rc));
1836 break;
1837 case 0x66:
1838 /* CMOVGT */
1839 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1840 vb, load_gpr(ctx, rc));
1841 break;
1842 case 0x6C:
1843 /* IMPLVER */
1844 REQUIRE_REG_31(ra);
1845 tcg_gen_movi_i64(vc, ctx->implver);
1846 break;
1847 default:
1848 goto invalid_opc;
1850 break;
1852 case 0x12:
1853 vc = dest_gpr(ctx, rc);
1854 va = load_gpr(ctx, ra);
1855 switch (fn7) {
1856 case 0x02:
1857 /* MSKBL */
1858 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1859 break;
1860 case 0x06:
1861 /* EXTBL */
1862 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1863 break;
1864 case 0x0B:
1865 /* INSBL */
1866 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1867 break;
1868 case 0x12:
1869 /* MSKWL */
1870 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1871 break;
1872 case 0x16:
1873 /* EXTWL */
1874 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1875 break;
1876 case 0x1B:
1877 /* INSWL */
1878 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1879 break;
1880 case 0x22:
1881 /* MSKLL */
1882 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1883 break;
1884 case 0x26:
1885 /* EXTLL */
1886 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1887 break;
1888 case 0x2B:
1889 /* INSLL */
1890 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1891 break;
1892 case 0x30:
1893 /* ZAP */
1894 if (islit) {
1895 gen_zapnoti(vc, va, ~lit);
1896 } else {
1897 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1899 break;
1900 case 0x31:
1901 /* ZAPNOT */
1902 if (islit) {
1903 gen_zapnoti(vc, va, lit);
1904 } else {
1905 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1907 break;
1908 case 0x32:
1909 /* MSKQL */
1910 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1911 break;
1912 case 0x34:
1913 /* SRL */
1914 if (islit) {
1915 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1916 } else {
1917 tmp = tcg_temp_new();
1918 vb = load_gpr(ctx, rb);
1919 tcg_gen_andi_i64(tmp, vb, 0x3f);
1920 tcg_gen_shr_i64(vc, va, tmp);
1921 tcg_temp_free(tmp);
1923 break;
1924 case 0x36:
1925 /* EXTQL */
1926 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1927 break;
1928 case 0x39:
1929 /* SLL */
1930 if (islit) {
1931 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1932 } else {
1933 tmp = tcg_temp_new();
1934 vb = load_gpr(ctx, rb);
1935 tcg_gen_andi_i64(tmp, vb, 0x3f);
1936 tcg_gen_shl_i64(vc, va, tmp);
1937 tcg_temp_free(tmp);
1939 break;
1940 case 0x3B:
1941 /* INSQL */
1942 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1943 break;
1944 case 0x3C:
1945 /* SRA */
1946 if (islit) {
1947 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1948 } else {
1949 tmp = tcg_temp_new();
1950 vb = load_gpr(ctx, rb);
1951 tcg_gen_andi_i64(tmp, vb, 0x3f);
1952 tcg_gen_sar_i64(vc, va, tmp);
1953 tcg_temp_free(tmp);
1955 break;
1956 case 0x52:
1957 /* MSKWH */
1958 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1959 break;
1960 case 0x57:
1961 /* INSWH */
1962 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1963 break;
1964 case 0x5A:
1965 /* EXTWH */
1966 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1967 break;
1968 case 0x62:
1969 /* MSKLH */
1970 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1971 break;
1972 case 0x67:
1973 /* INSLH */
1974 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1975 break;
1976 case 0x6A:
1977 /* EXTLH */
1978 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1979 break;
1980 case 0x72:
1981 /* MSKQH */
1982 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1983 break;
1984 case 0x77:
1985 /* INSQH */
1986 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1987 break;
1988 case 0x7A:
1989 /* EXTQH */
1990 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1991 break;
1992 default:
1993 goto invalid_opc;
1995 break;
1997 case 0x13:
1998 vc = dest_gpr(ctx, rc);
1999 vb = load_gpr_lit(ctx, rb, lit, islit);
2000 va = load_gpr(ctx, ra);
2001 switch (fn7) {
2002 case 0x00:
2003 /* MULL */
2004 tcg_gen_mul_i64(vc, va, vb);
2005 tcg_gen_ext32s_i64(vc, vc);
2006 break;
2007 case 0x20:
2008 /* MULQ */
2009 tcg_gen_mul_i64(vc, va, vb);
2010 break;
2011 case 0x30:
2012 /* UMULH */
2013 tmp = tcg_temp_new();
2014 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2015 tcg_temp_free(tmp);
2016 break;
2017 case 0x40:
2018 /* MULL/V */
2019 tmp = tcg_temp_new();
2020 tcg_gen_ext32s_i64(tmp, va);
2021 tcg_gen_ext32s_i64(vc, vb);
2022 tcg_gen_mul_i64(tmp, tmp, vc);
2023 tcg_gen_ext32s_i64(vc, tmp);
2024 gen_helper_check_overflow(cpu_env, vc, tmp);
2025 tcg_temp_free(tmp);
2026 break;
2027 case 0x60:
2028 /* MULQ/V */
2029 tmp = tcg_temp_new();
2030 tmp2 = tcg_temp_new();
2031 tcg_gen_muls2_i64(vc, tmp, va, vb);
2032 tcg_gen_sari_i64(tmp2, vc, 63);
2033 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2034 tcg_temp_free(tmp);
2035 tcg_temp_free(tmp2);
2036 break;
2037 default:
2038 goto invalid_opc;
2040 break;
2042 case 0x14:
2043 REQUIRE_AMASK(FIX);
2044 vc = dest_fpr(ctx, rc);
2045 switch (fpfn) { /* fn11 & 0x3F */
2046 case 0x04:
2047 /* ITOFS */
2048 REQUIRE_REG_31(rb);
2049 REQUIRE_FEN;
2050 t32 = tcg_temp_new_i32();
2051 va = load_gpr(ctx, ra);
2052 tcg_gen_extrl_i64_i32(t32, va);
2053 gen_helper_memory_to_s(vc, t32);
2054 tcg_temp_free_i32(t32);
2055 break;
2056 case 0x0A:
2057 /* SQRTF */
2058 REQUIRE_REG_31(ra);
2059 REQUIRE_FEN;
2060 vb = load_fpr(ctx, rb);
2061 gen_helper_sqrtf(vc, cpu_env, vb);
2062 break;
2063 case 0x0B:
2064 /* SQRTS */
2065 REQUIRE_REG_31(ra);
2066 REQUIRE_FEN;
2067 gen_sqrts(ctx, rb, rc, fn11);
2068 break;
2069 case 0x14:
2070 /* ITOFF */
2071 REQUIRE_REG_31(rb);
2072 REQUIRE_FEN;
2073 t32 = tcg_temp_new_i32();
2074 va = load_gpr(ctx, ra);
2075 tcg_gen_extrl_i64_i32(t32, va);
2076 gen_helper_memory_to_f(vc, t32);
2077 tcg_temp_free_i32(t32);
2078 break;
2079 case 0x24:
2080 /* ITOFT */
2081 REQUIRE_REG_31(rb);
2082 REQUIRE_FEN;
2083 va = load_gpr(ctx, ra);
2084 tcg_gen_mov_i64(vc, va);
2085 break;
2086 case 0x2A:
2087 /* SQRTG */
2088 REQUIRE_REG_31(ra);
2089 REQUIRE_FEN;
2090 vb = load_fpr(ctx, rb);
2091 gen_helper_sqrtg(vc, cpu_env, vb);
2092 break;
2093 case 0x02B:
2094 /* SQRTT */
2095 REQUIRE_REG_31(ra);
2096 REQUIRE_FEN;
2097 gen_sqrtt(ctx, rb, rc, fn11);
2098 break;
2099 default:
2100 goto invalid_opc;
2102 break;
2104 case 0x15:
2105 /* VAX floating point */
2106 /* XXX: rounding mode and trap are ignored (!) */
2107 vc = dest_fpr(ctx, rc);
2108 vb = load_fpr(ctx, rb);
2109 va = load_fpr(ctx, ra);
2110 switch (fpfn) { /* fn11 & 0x3F */
2111 case 0x00:
2112 /* ADDF */
2113 REQUIRE_FEN;
2114 gen_helper_addf(vc, cpu_env, va, vb);
2115 break;
2116 case 0x01:
2117 /* SUBF */
2118 REQUIRE_FEN;
2119 gen_helper_subf(vc, cpu_env, va, vb);
2120 break;
2121 case 0x02:
2122 /* MULF */
2123 REQUIRE_FEN;
2124 gen_helper_mulf(vc, cpu_env, va, vb);
2125 break;
2126 case 0x03:
2127 /* DIVF */
2128 REQUIRE_FEN;
2129 gen_helper_divf(vc, cpu_env, va, vb);
2130 break;
2131 case 0x1E:
2132 /* CVTDG -- TODO */
2133 REQUIRE_REG_31(ra);
2134 goto invalid_opc;
2135 case 0x20:
2136 /* ADDG */
2137 REQUIRE_FEN;
2138 gen_helper_addg(vc, cpu_env, va, vb);
2139 break;
2140 case 0x21:
2141 /* SUBG */
2142 REQUIRE_FEN;
2143 gen_helper_subg(vc, cpu_env, va, vb);
2144 break;
2145 case 0x22:
2146 /* MULG */
2147 REQUIRE_FEN;
2148 gen_helper_mulg(vc, cpu_env, va, vb);
2149 break;
2150 case 0x23:
2151 /* DIVG */
2152 REQUIRE_FEN;
2153 gen_helper_divg(vc, cpu_env, va, vb);
2154 break;
2155 case 0x25:
2156 /* CMPGEQ */
2157 REQUIRE_FEN;
2158 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2159 break;
2160 case 0x26:
2161 /* CMPGLT */
2162 REQUIRE_FEN;
2163 gen_helper_cmpglt(vc, cpu_env, va, vb);
2164 break;
2165 case 0x27:
2166 /* CMPGLE */
2167 REQUIRE_FEN;
2168 gen_helper_cmpgle(vc, cpu_env, va, vb);
2169 break;
2170 case 0x2C:
2171 /* CVTGF */
2172 REQUIRE_REG_31(ra);
2173 REQUIRE_FEN;
2174 gen_helper_cvtgf(vc, cpu_env, vb);
2175 break;
2176 case 0x2D:
2177 /* CVTGD -- TODO */
2178 REQUIRE_REG_31(ra);
2179 goto invalid_opc;
2180 case 0x2F:
2181 /* CVTGQ */
2182 REQUIRE_REG_31(ra);
2183 REQUIRE_FEN;
2184 gen_helper_cvtgq(vc, cpu_env, vb);
2185 break;
2186 case 0x3C:
2187 /* CVTQF */
2188 REQUIRE_REG_31(ra);
2189 REQUIRE_FEN;
2190 gen_helper_cvtqf(vc, cpu_env, vb);
2191 break;
2192 case 0x3E:
2193 /* CVTQG */
2194 REQUIRE_REG_31(ra);
2195 REQUIRE_FEN;
2196 gen_helper_cvtqg(vc, cpu_env, vb);
2197 break;
2198 default:
2199 goto invalid_opc;
2201 break;
2203 case 0x16:
2204 /* IEEE floating-point */
2205 switch (fpfn) { /* fn11 & 0x3F */
2206 case 0x00:
2207 /* ADDS */
2208 REQUIRE_FEN;
2209 gen_adds(ctx, ra, rb, rc, fn11);
2210 break;
2211 case 0x01:
2212 /* SUBS */
2213 REQUIRE_FEN;
2214 gen_subs(ctx, ra, rb, rc, fn11);
2215 break;
2216 case 0x02:
2217 /* MULS */
2218 REQUIRE_FEN;
2219 gen_muls(ctx, ra, rb, rc, fn11);
2220 break;
2221 case 0x03:
2222 /* DIVS */
2223 REQUIRE_FEN;
2224 gen_divs(ctx, ra, rb, rc, fn11);
2225 break;
2226 case 0x20:
2227 /* ADDT */
2228 REQUIRE_FEN;
2229 gen_addt(ctx, ra, rb, rc, fn11);
2230 break;
2231 case 0x21:
2232 /* SUBT */
2233 REQUIRE_FEN;
2234 gen_subt(ctx, ra, rb, rc, fn11);
2235 break;
2236 case 0x22:
2237 /* MULT */
2238 REQUIRE_FEN;
2239 gen_mult(ctx, ra, rb, rc, fn11);
2240 break;
2241 case 0x23:
2242 /* DIVT */
2243 REQUIRE_FEN;
2244 gen_divt(ctx, ra, rb, rc, fn11);
2245 break;
2246 case 0x24:
2247 /* CMPTUN */
2248 REQUIRE_FEN;
2249 gen_cmptun(ctx, ra, rb, rc, fn11);
2250 break;
2251 case 0x25:
2252 /* CMPTEQ */
2253 REQUIRE_FEN;
2254 gen_cmpteq(ctx, ra, rb, rc, fn11);
2255 break;
2256 case 0x26:
2257 /* CMPTLT */
2258 REQUIRE_FEN;
2259 gen_cmptlt(ctx, ra, rb, rc, fn11);
2260 break;
2261 case 0x27:
2262 /* CMPTLE */
2263 REQUIRE_FEN;
2264 gen_cmptle(ctx, ra, rb, rc, fn11);
2265 break;
2266 case 0x2C:
2267 REQUIRE_REG_31(ra);
2268 REQUIRE_FEN;
2269 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2270 /* CVTST */
2271 gen_cvtst(ctx, rb, rc, fn11);
2272 } else {
2273 /* CVTTS */
2274 gen_cvtts(ctx, rb, rc, fn11);
2276 break;
2277 case 0x2F:
2278 /* CVTTQ */
2279 REQUIRE_REG_31(ra);
2280 REQUIRE_FEN;
2281 gen_cvttq(ctx, rb, rc, fn11);
2282 break;
2283 case 0x3C:
2284 /* CVTQS */
2285 REQUIRE_REG_31(ra);
2286 REQUIRE_FEN;
2287 gen_cvtqs(ctx, rb, rc, fn11);
2288 break;
2289 case 0x3E:
2290 /* CVTQT */
2291 REQUIRE_REG_31(ra);
2292 REQUIRE_FEN;
2293 gen_cvtqt(ctx, rb, rc, fn11);
2294 break;
2295 default:
2296 goto invalid_opc;
2298 break;
2300 case 0x17:
2301 switch (fn11) {
2302 case 0x010:
2303 /* CVTLQ */
2304 REQUIRE_REG_31(ra);
2305 REQUIRE_FEN;
2306 vc = dest_fpr(ctx, rc);
2307 vb = load_fpr(ctx, rb);
2308 gen_cvtlq(vc, vb);
2309 break;
2310 case 0x020:
2311 /* CPYS */
2312 REQUIRE_FEN;
2313 if (rc == 31) {
2314 /* Special case CPYS as FNOP. */
2315 } else {
2316 vc = dest_fpr(ctx, rc);
2317 va = load_fpr(ctx, ra);
2318 if (ra == rb) {
2319 /* Special case CPYS as FMOV. */
2320 tcg_gen_mov_i64(vc, va);
2321 } else {
2322 vb = load_fpr(ctx, rb);
2323 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2326 break;
2327 case 0x021:
2328 /* CPYSN */
2329 REQUIRE_FEN;
2330 vc = dest_fpr(ctx, rc);
2331 vb = load_fpr(ctx, rb);
2332 va = load_fpr(ctx, ra);
2333 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2334 break;
2335 case 0x022:
2336 /* CPYSE */
2337 REQUIRE_FEN;
2338 vc = dest_fpr(ctx, rc);
2339 vb = load_fpr(ctx, rb);
2340 va = load_fpr(ctx, ra);
2341 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2342 break;
2343 case 0x024:
2344 /* MT_FPCR */
2345 REQUIRE_FEN;
2346 va = load_fpr(ctx, ra);
2347 gen_helper_store_fpcr(cpu_env, va);
2348 if (ctx->tb_rm == QUAL_RM_D) {
2349 /* Re-do the copy of the rounding mode to fp_status
2350 the next time we use dynamic rounding. */
2351 ctx->tb_rm = -1;
2353 break;
2354 case 0x025:
2355 /* MF_FPCR */
2356 REQUIRE_FEN;
2357 va = dest_fpr(ctx, ra);
2358 gen_helper_load_fpcr(va, cpu_env);
2359 break;
2360 case 0x02A:
2361 /* FCMOVEQ */
2362 REQUIRE_FEN;
2363 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2364 break;
2365 case 0x02B:
2366 /* FCMOVNE */
2367 REQUIRE_FEN;
2368 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2369 break;
2370 case 0x02C:
2371 /* FCMOVLT */
2372 REQUIRE_FEN;
2373 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2374 break;
2375 case 0x02D:
2376 /* FCMOVGE */
2377 REQUIRE_FEN;
2378 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2379 break;
2380 case 0x02E:
2381 /* FCMOVLE */
2382 REQUIRE_FEN;
2383 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2384 break;
2385 case 0x02F:
2386 /* FCMOVGT */
2387 REQUIRE_FEN;
2388 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2389 break;
2390 case 0x030: /* CVTQL */
2391 case 0x130: /* CVTQL/V */
2392 case 0x530: /* CVTQL/SV */
2393 REQUIRE_REG_31(ra);
2394 REQUIRE_FEN;
2395 vc = dest_fpr(ctx, rc);
2396 vb = load_fpr(ctx, rb);
2397 gen_helper_cvtql(vc, cpu_env, vb);
2398 gen_fp_exc_raise(rc, fn11);
2399 break;
2400 default:
2401 goto invalid_opc;
2403 break;
2405 case 0x18:
2406 switch ((uint16_t)disp16) {
2407 case 0x0000:
2408 /* TRAPB */
2409 /* No-op. */
2410 break;
2411 case 0x0400:
2412 /* EXCB */
2413 /* No-op. */
2414 break;
2415 case 0x4000:
2416 /* MB */
2417 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2418 break;
2419 case 0x4400:
2420 /* WMB */
2421 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2422 break;
2423 case 0x8000:
2424 /* FETCH */
2425 /* No-op */
2426 break;
2427 case 0xA000:
2428 /* FETCH_M */
2429 /* No-op */
2430 break;
2431 case 0xC000:
2432 /* RPCC */
2433 va = dest_gpr(ctx, ra);
2434 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2435 gen_io_start();
2436 gen_helper_load_pcc(va, cpu_env);
2437 ret = DISAS_PC_STALE;
2438 } else {
2439 gen_helper_load_pcc(va, cpu_env);
2441 break;
2442 case 0xE000:
2443 /* RC */
2444 gen_rx(ctx, ra, 0);
2445 break;
2446 case 0xE800:
2447 /* ECB */
2448 break;
2449 case 0xF000:
2450 /* RS */
2451 gen_rx(ctx, ra, 1);
2452 break;
2453 case 0xF800:
2454 /* WH64 */
2455 /* No-op */
2456 break;
2457 case 0xFC00:
2458 /* WH64EN */
2459 /* No-op */
2460 break;
2461 default:
2462 goto invalid_opc;
2464 break;
2466 case 0x19:
2467 /* HW_MFPR (PALcode) */
2468 #ifndef CONFIG_USER_ONLY
2469 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2470 va = dest_gpr(ctx, ra);
2471 ret = gen_mfpr(ctx, va, insn & 0xffff);
2472 break;
2473 #else
2474 goto invalid_opc;
2475 #endif
2477 case 0x1A:
2478 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2479 prediction stack action, which of course we don't implement. */
2480 vb = load_gpr(ctx, rb);
2481 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2482 if (ra != 31) {
2483 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2485 ret = DISAS_PC_UPDATED;
2486 break;
2488 case 0x1B:
2489 /* HW_LD (PALcode) */
2490 #ifndef CONFIG_USER_ONLY
2491 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2493 TCGv addr = tcg_temp_new();
2494 vb = load_gpr(ctx, rb);
2495 va = dest_gpr(ctx, ra);
2497 tcg_gen_addi_i64(addr, vb, disp12);
2498 switch ((insn >> 12) & 0xF) {
2499 case 0x0:
2500 /* Longword physical access (hw_ldl/p) */
2501 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2502 break;
2503 case 0x1:
2504 /* Quadword physical access (hw_ldq/p) */
2505 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2506 break;
2507 case 0x2:
2508 /* Longword physical access with lock (hw_ldl_l/p) */
2509 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2510 break;
2511 case 0x3:
2512 /* Quadword physical access with lock (hw_ldq_l/p) */
2513 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2514 break;
2515 case 0x4:
2516 /* Longword virtual PTE fetch (hw_ldl/v) */
2517 goto invalid_opc;
2518 case 0x5:
2519 /* Quadword virtual PTE fetch (hw_ldq/v) */
2520 goto invalid_opc;
2521 break;
2522 case 0x6:
2523 /* Invalid */
2524 goto invalid_opc;
2525 case 0x7:
2526 /* Invaliid */
2527 goto invalid_opc;
2528 case 0x8:
2529 /* Longword virtual access (hw_ldl) */
2530 goto invalid_opc;
2531 case 0x9:
2532 /* Quadword virtual access (hw_ldq) */
2533 goto invalid_opc;
2534 case 0xA:
2535 /* Longword virtual access with protection check (hw_ldl/w) */
2536 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2537 break;
2538 case 0xB:
2539 /* Quadword virtual access with protection check (hw_ldq/w) */
2540 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2541 break;
2542 case 0xC:
2543 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2544 goto invalid_opc;
2545 case 0xD:
2546 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2547 goto invalid_opc;
2548 case 0xE:
2549 /* Longword virtual access with alternate access mode and
2550 protection checks (hw_ldl/wa) */
2551 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2552 break;
2553 case 0xF:
2554 /* Quadword virtual access with alternate access mode and
2555 protection checks (hw_ldq/wa) */
2556 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2557 break;
2559 tcg_temp_free(addr);
2560 break;
2562 #else
2563 goto invalid_opc;
2564 #endif
2566 case 0x1C:
2567 vc = dest_gpr(ctx, rc);
2568 if (fn7 == 0x70) {
2569 /* FTOIT */
2570 REQUIRE_AMASK(FIX);
2571 REQUIRE_REG_31(rb);
2572 va = load_fpr(ctx, ra);
2573 tcg_gen_mov_i64(vc, va);
2574 break;
2575 } else if (fn7 == 0x78) {
2576 /* FTOIS */
2577 REQUIRE_AMASK(FIX);
2578 REQUIRE_REG_31(rb);
2579 t32 = tcg_temp_new_i32();
2580 va = load_fpr(ctx, ra);
2581 gen_helper_s_to_memory(t32, va);
2582 tcg_gen_ext_i32_i64(vc, t32);
2583 tcg_temp_free_i32(t32);
2584 break;
2587 vb = load_gpr_lit(ctx, rb, lit, islit);
2588 switch (fn7) {
2589 case 0x00:
2590 /* SEXTB */
2591 REQUIRE_AMASK(BWX);
2592 REQUIRE_REG_31(ra);
2593 tcg_gen_ext8s_i64(vc, vb);
2594 break;
2595 case 0x01:
2596 /* SEXTW */
2597 REQUIRE_AMASK(BWX);
2598 REQUIRE_REG_31(ra);
2599 tcg_gen_ext16s_i64(vc, vb);
2600 break;
2601 case 0x30:
2602 /* CTPOP */
2603 REQUIRE_AMASK(CIX);
2604 REQUIRE_REG_31(ra);
2605 REQUIRE_NO_LIT;
2606 tcg_gen_ctpop_i64(vc, vb);
2607 break;
2608 case 0x31:
2609 /* PERR */
2610 REQUIRE_AMASK(MVI);
2611 REQUIRE_NO_LIT;
2612 va = load_gpr(ctx, ra);
2613 gen_helper_perr(vc, va, vb);
2614 break;
2615 case 0x32:
2616 /* CTLZ */
2617 REQUIRE_AMASK(CIX);
2618 REQUIRE_REG_31(ra);
2619 REQUIRE_NO_LIT;
2620 tcg_gen_clzi_i64(vc, vb, 64);
2621 break;
2622 case 0x33:
2623 /* CTTZ */
2624 REQUIRE_AMASK(CIX);
2625 REQUIRE_REG_31(ra);
2626 REQUIRE_NO_LIT;
2627 tcg_gen_ctzi_i64(vc, vb, 64);
2628 break;
2629 case 0x34:
2630 /* UNPKBW */
2631 REQUIRE_AMASK(MVI);
2632 REQUIRE_REG_31(ra);
2633 REQUIRE_NO_LIT;
2634 gen_helper_unpkbw(vc, vb);
2635 break;
2636 case 0x35:
2637 /* UNPKBL */
2638 REQUIRE_AMASK(MVI);
2639 REQUIRE_REG_31(ra);
2640 REQUIRE_NO_LIT;
2641 gen_helper_unpkbl(vc, vb);
2642 break;
2643 case 0x36:
2644 /* PKWB */
2645 REQUIRE_AMASK(MVI);
2646 REQUIRE_REG_31(ra);
2647 REQUIRE_NO_LIT;
2648 gen_helper_pkwb(vc, vb);
2649 break;
2650 case 0x37:
2651 /* PKLB */
2652 REQUIRE_AMASK(MVI);
2653 REQUIRE_REG_31(ra);
2654 REQUIRE_NO_LIT;
2655 gen_helper_pklb(vc, vb);
2656 break;
2657 case 0x38:
2658 /* MINSB8 */
2659 REQUIRE_AMASK(MVI);
2660 va = load_gpr(ctx, ra);
2661 gen_helper_minsb8(vc, va, vb);
2662 break;
2663 case 0x39:
2664 /* MINSW4 */
2665 REQUIRE_AMASK(MVI);
2666 va = load_gpr(ctx, ra);
2667 gen_helper_minsw4(vc, va, vb);
2668 break;
2669 case 0x3A:
2670 /* MINUB8 */
2671 REQUIRE_AMASK(MVI);
2672 va = load_gpr(ctx, ra);
2673 gen_helper_minub8(vc, va, vb);
2674 break;
2675 case 0x3B:
2676 /* MINUW4 */
2677 REQUIRE_AMASK(MVI);
2678 va = load_gpr(ctx, ra);
2679 gen_helper_minuw4(vc, va, vb);
2680 break;
2681 case 0x3C:
2682 /* MAXUB8 */
2683 REQUIRE_AMASK(MVI);
2684 va = load_gpr(ctx, ra);
2685 gen_helper_maxub8(vc, va, vb);
2686 break;
2687 case 0x3D:
2688 /* MAXUW4 */
2689 REQUIRE_AMASK(MVI);
2690 va = load_gpr(ctx, ra);
2691 gen_helper_maxuw4(vc, va, vb);
2692 break;
2693 case 0x3E:
2694 /* MAXSB8 */
2695 REQUIRE_AMASK(MVI);
2696 va = load_gpr(ctx, ra);
2697 gen_helper_maxsb8(vc, va, vb);
2698 break;
2699 case 0x3F:
2700 /* MAXSW4 */
2701 REQUIRE_AMASK(MVI);
2702 va = load_gpr(ctx, ra);
2703 gen_helper_maxsw4(vc, va, vb);
2704 break;
2705 default:
2706 goto invalid_opc;
2708 break;
2710 case 0x1D:
2711 /* HW_MTPR (PALcode) */
2712 #ifndef CONFIG_USER_ONLY
2713 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2714 vb = load_gpr(ctx, rb);
2715 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2716 break;
2717 #else
2718 goto invalid_opc;
2719 #endif
2721 case 0x1E:
2722 /* HW_RET (PALcode) */
2723 #ifndef CONFIG_USER_ONLY
2724 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2725 if (rb == 31) {
2726 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2727 address from EXC_ADDR. This turns out to be useful for our
2728 emulation PALcode, so continue to accept it. */
2729 ctx->lit = vb = tcg_temp_new();
2730 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2731 } else {
2732 vb = load_gpr(ctx, rb);
2734 tcg_gen_movi_i64(cpu_lock_addr, -1);
2735 tmp = tcg_temp_new();
2736 tcg_gen_movi_i64(tmp, 0);
2737 st_flag_byte(tmp, ENV_FLAG_RX_SHIFT);
2738 tcg_gen_andi_i64(tmp, vb, 1);
2739 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2740 tcg_temp_free(tmp);
2741 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2742 /* Allow interrupts to be recognized right away. */
2743 ret = DISAS_PC_UPDATED_NOCHAIN;
2744 break;
2745 #else
2746 goto invalid_opc;
2747 #endif
2749 case 0x1F:
2750 /* HW_ST (PALcode) */
2751 #ifndef CONFIG_USER_ONLY
2752 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2754 switch ((insn >> 12) & 0xF) {
2755 case 0x0:
2756 /* Longword physical access */
2757 va = load_gpr(ctx, ra);
2758 vb = load_gpr(ctx, rb);
2759 tmp = tcg_temp_new();
2760 tcg_gen_addi_i64(tmp, vb, disp12);
2761 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2762 tcg_temp_free(tmp);
2763 break;
2764 case 0x1:
2765 /* Quadword physical access */
2766 va = load_gpr(ctx, ra);
2767 vb = load_gpr(ctx, rb);
2768 tmp = tcg_temp_new();
2769 tcg_gen_addi_i64(tmp, vb, disp12);
2770 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2771 tcg_temp_free(tmp);
2772 break;
2773 case 0x2:
2774 /* Longword physical access with lock */
2775 ret = gen_store_conditional(ctx, ra, rb, disp12,
2776 MMU_PHYS_IDX, MO_LESL);
2777 break;
2778 case 0x3:
2779 /* Quadword physical access with lock */
2780 ret = gen_store_conditional(ctx, ra, rb, disp12,
2781 MMU_PHYS_IDX, MO_LEQ);
2782 break;
2783 case 0x4:
2784 /* Longword virtual access */
2785 goto invalid_opc;
2786 case 0x5:
2787 /* Quadword virtual access */
2788 goto invalid_opc;
2789 case 0x6:
2790 /* Invalid */
2791 goto invalid_opc;
2792 case 0x7:
2793 /* Invalid */
2794 goto invalid_opc;
2795 case 0x8:
2796 /* Invalid */
2797 goto invalid_opc;
2798 case 0x9:
2799 /* Invalid */
2800 goto invalid_opc;
2801 case 0xA:
2802 /* Invalid */
2803 goto invalid_opc;
2804 case 0xB:
2805 /* Invalid */
2806 goto invalid_opc;
2807 case 0xC:
2808 /* Longword virtual access with alternate access mode */
2809 goto invalid_opc;
2810 case 0xD:
2811 /* Quadword virtual access with alternate access mode */
2812 goto invalid_opc;
2813 case 0xE:
2814 /* Invalid */
2815 goto invalid_opc;
2816 case 0xF:
2817 /* Invalid */
2818 goto invalid_opc;
2820 break;
2822 #else
2823 goto invalid_opc;
2824 #endif
2825 case 0x20:
2826 /* LDF */
2827 REQUIRE_FEN;
2828 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2829 break;
2830 case 0x21:
2831 /* LDG */
2832 REQUIRE_FEN;
2833 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2834 break;
2835 case 0x22:
2836 /* LDS */
2837 REQUIRE_FEN;
2838 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2839 break;
2840 case 0x23:
2841 /* LDT */
2842 REQUIRE_FEN;
2843 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2844 break;
2845 case 0x24:
2846 /* STF */
2847 REQUIRE_FEN;
2848 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2849 break;
2850 case 0x25:
2851 /* STG */
2852 REQUIRE_FEN;
2853 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2854 break;
2855 case 0x26:
2856 /* STS */
2857 REQUIRE_FEN;
2858 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2859 break;
2860 case 0x27:
2861 /* STT */
2862 REQUIRE_FEN;
2863 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2864 break;
2865 case 0x28:
2866 /* LDL */
2867 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2868 break;
2869 case 0x29:
2870 /* LDQ */
2871 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2872 break;
2873 case 0x2A:
2874 /* LDL_L */
2875 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2876 break;
2877 case 0x2B:
2878 /* LDQ_L */
2879 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2880 break;
2881 case 0x2C:
2882 /* STL */
2883 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2884 break;
2885 case 0x2D:
2886 /* STQ */
2887 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2888 break;
2889 case 0x2E:
2890 /* STL_C */
2891 ret = gen_store_conditional(ctx, ra, rb, disp16,
2892 ctx->mem_idx, MO_LESL);
2893 break;
2894 case 0x2F:
2895 /* STQ_C */
2896 ret = gen_store_conditional(ctx, ra, rb, disp16,
2897 ctx->mem_idx, MO_LEQ);
2898 break;
2899 case 0x30:
2900 /* BR */
2901 ret = gen_bdirect(ctx, ra, disp21);
2902 break;
2903 case 0x31: /* FBEQ */
2904 REQUIRE_FEN;
2905 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2906 break;
2907 case 0x32: /* FBLT */
2908 REQUIRE_FEN;
2909 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2910 break;
2911 case 0x33: /* FBLE */
2912 REQUIRE_FEN;
2913 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2914 break;
2915 case 0x34:
2916 /* BSR */
2917 ret = gen_bdirect(ctx, ra, disp21);
2918 break;
2919 case 0x35: /* FBNE */
2920 REQUIRE_FEN;
2921 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2922 break;
2923 case 0x36: /* FBGE */
2924 REQUIRE_FEN;
2925 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2926 break;
2927 case 0x37: /* FBGT */
2928 REQUIRE_FEN;
2929 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2930 break;
2931 case 0x38:
2932 /* BLBC */
2933 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2934 break;
2935 case 0x39:
2936 /* BEQ */
2937 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2938 break;
2939 case 0x3A:
2940 /* BLT */
2941 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2942 break;
2943 case 0x3B:
2944 /* BLE */
2945 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2946 break;
2947 case 0x3C:
2948 /* BLBS */
2949 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2950 break;
2951 case 0x3D:
2952 /* BNE */
2953 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2954 break;
2955 case 0x3E:
2956 /* BGE */
2957 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2958 break;
2959 case 0x3F:
2960 /* BGT */
2961 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2962 break;
2963 invalid_opc:
2964 ret = gen_invalid(ctx);
2965 break;
2966 raise_fen:
2967 ret = gen_excp(ctx, EXCP_FEN, 0);
2968 break;
2971 return ret;
2974 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2976 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2977 CPUAlphaState *env = cpu->env_ptr;
2978 int64_t bound;
2980 ctx->tbflags = ctx->base.tb->flags;
2981 ctx->mem_idx = cpu_mmu_index(env, false);
2982 ctx->implver = env->implver;
2983 ctx->amask = env->amask;
2985 #ifdef CONFIG_USER_ONLY
2986 ctx->ir = cpu_std_ir;
2987 #else
2988 ctx->palbr = env->palbr;
2989 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2990 #endif
2992 /* ??? Every TB begins with unset rounding mode, to be initialized on
2993 the first fp insn of the TB. Alternately we could define a proper
2994 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2995 to reset the FP_STATUS to that default at the end of any TB that
2996 changes the default. We could even (gasp) dynamiclly figure out
2997 what default would be most efficient given the running program. */
2998 ctx->tb_rm = -1;
2999 /* Similarly for flush-to-zero. */
3000 ctx->tb_ftz = -1;
3002 ctx->zero = NULL;
3003 ctx->sink = NULL;
3004 ctx->lit = NULL;
3006 /* Bound the number of insns to execute to those left on the page. */
3007 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
3008 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
3011 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3015 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3017 tcg_gen_insn_start(dcbase->pc_next);
3020 static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
3021 const CPUBreakpoint *bp)
3023 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3025 ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0);
3027 /* The address covered by the breakpoint must be included in
3028 [tb->pc, tb->pc + tb->size) in order to for it to be
3029 properly cleared -- thus we increment the PC here so that
3030 the logic setting tb->size below does the right thing. */
3031 ctx->base.pc_next += 4;
3032 return true;
3035 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3037 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3038 CPUAlphaState *env = cpu->env_ptr;
3039 uint32_t insn = translator_ldl(env, ctx->base.pc_next);
3041 ctx->base.pc_next += 4;
3042 ctx->base.is_jmp = translate_one(ctx, insn);
3044 free_context_temps(ctx);
3045 translator_loop_temp_check(&ctx->base);
3048 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3050 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3052 switch (ctx->base.is_jmp) {
3053 case DISAS_NORETURN:
3054 break;
3055 case DISAS_TOO_MANY:
3056 if (use_goto_tb(ctx, ctx->base.pc_next)) {
3057 tcg_gen_goto_tb(0);
3058 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3059 tcg_gen_exit_tb(ctx->base.tb, 0);
3061 /* FALLTHRU */
3062 case DISAS_PC_STALE:
3063 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3064 /* FALLTHRU */
3065 case DISAS_PC_UPDATED:
3066 if (!ctx->base.singlestep_enabled) {
3067 tcg_gen_lookup_and_goto_ptr();
3068 break;
3070 /* FALLTHRU */
3071 case DISAS_PC_UPDATED_NOCHAIN:
3072 if (ctx->base.singlestep_enabled) {
3073 gen_excp_1(EXCP_DEBUG, 0);
3074 } else {
3075 tcg_gen_exit_tb(NULL, 0);
3077 break;
3078 default:
3079 g_assert_not_reached();
3083 static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
3085 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
3086 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
3089 static const TranslatorOps alpha_tr_ops = {
3090 .init_disas_context = alpha_tr_init_disas_context,
3091 .tb_start = alpha_tr_tb_start,
3092 .insn_start = alpha_tr_insn_start,
3093 .breakpoint_check = alpha_tr_breakpoint_check,
3094 .translate_insn = alpha_tr_translate_insn,
3095 .tb_stop = alpha_tr_tb_stop,
3096 .disas_log = alpha_tr_disas_log,
3099 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
3101 DisasContext dc;
3102 translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
3105 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3106 target_ulong *data)
3108 env->pc = data[0];