target/ppc: Don't compile ppc_tlb_invalid_all without TCG
[qemu/kevin.git] / target / alpha / translate.c
blobf2922f5f8cf38675c194735a7d6827ce3bd3ee14
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "sysemu/cpu-timers.h"
24 #include "disas/disas.h"
25 #include "qemu/host-utils.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/translator.h"
33 #include "exec/log.h"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 # define LOG_DISAS(...) do { } while (0)
43 #endif
45 typedef struct DisasContext DisasContext;
46 struct DisasContext {
47 DisasContextBase base;
49 #ifndef CONFIG_USER_ONLY
50 uint64_t palbr;
51 #endif
52 uint32_t tbflags;
53 int mem_idx;
55 /* implver and amask values for this CPU. */
56 int implver;
57 int amask;
59 /* Current rounding mode for this TB. */
60 int tb_rm;
61 /* Current flush-to-zero setting for this TB. */
62 int tb_ftz;
64 /* The set of registers active in the current context. */
65 TCGv *ir;
67 /* Temporaries for $31 and $f31 as source and destination. */
68 TCGv zero;
69 TCGv sink;
70 /* Temporary for immediate constants. */
71 TCGv lit;
74 /* Target-specific return values from translate_one, indicating the
75 state of the TB. Note that DISAS_NEXT indicates that we are not
76 exiting the TB. */
77 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
78 #define DISAS_PC_UPDATED DISAS_TARGET_1
79 #define DISAS_PC_STALE DISAS_TARGET_2
81 /* global register indexes */
82 static TCGv cpu_std_ir[31];
83 static TCGv cpu_fir[31];
84 static TCGv cpu_pc;
85 static TCGv cpu_lock_addr;
86 static TCGv cpu_lock_value;
88 #ifndef CONFIG_USER_ONLY
89 static TCGv cpu_pal_ir[31];
90 #endif
92 #include "exec/gen-icount.h"
94 void alpha_translate_init(void)
96 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
98 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
99 static const GlobalVar vars[] = {
100 DEF_VAR(pc),
101 DEF_VAR(lock_addr),
102 DEF_VAR(lock_value),
105 #undef DEF_VAR
107 /* Use the symbolic register names that match the disassembler. */
108 static const char greg_names[31][4] = {
109 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
110 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
111 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
112 "t10", "t11", "ra", "t12", "at", "gp", "sp"
114 static const char freg_names[31][4] = {
115 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
116 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
117 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
118 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
120 #ifndef CONFIG_USER_ONLY
121 static const char shadow_names[8][8] = {
122 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
123 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
125 #endif
127 int i;
129 for (i = 0; i < 31; i++) {
130 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
131 offsetof(CPUAlphaState, ir[i]),
132 greg_names[i]);
135 for (i = 0; i < 31; i++) {
136 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
137 offsetof(CPUAlphaState, fir[i]),
138 freg_names[i]);
141 #ifndef CONFIG_USER_ONLY
142 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
143 for (i = 0; i < 8; i++) {
144 int r = (i == 7 ? 25 : i + 8);
145 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
146 offsetof(CPUAlphaState,
147 shadow[i]),
148 shadow_names[i]);
150 #endif
152 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
153 const GlobalVar *v = &vars[i];
154 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
158 static TCGv load_zero(DisasContext *ctx)
160 if (!ctx->zero) {
161 ctx->zero = tcg_const_i64(0);
163 return ctx->zero;
166 static TCGv dest_sink(DisasContext *ctx)
168 if (!ctx->sink) {
169 ctx->sink = tcg_temp_new();
171 return ctx->sink;
174 static void free_context_temps(DisasContext *ctx)
176 if (ctx->sink) {
177 tcg_gen_discard_i64(ctx->sink);
178 tcg_temp_free(ctx->sink);
179 ctx->sink = NULL;
181 if (ctx->zero) {
182 tcg_temp_free(ctx->zero);
183 ctx->zero = NULL;
185 if (ctx->lit) {
186 tcg_temp_free(ctx->lit);
187 ctx->lit = NULL;
191 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
193 if (likely(reg < 31)) {
194 return ctx->ir[reg];
195 } else {
196 return load_zero(ctx);
200 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
201 uint8_t lit, bool islit)
203 if (islit) {
204 ctx->lit = tcg_const_i64(lit);
205 return ctx->lit;
206 } else if (likely(reg < 31)) {
207 return ctx->ir[reg];
208 } else {
209 return load_zero(ctx);
213 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
215 if (likely(reg < 31)) {
216 return ctx->ir[reg];
217 } else {
218 return dest_sink(ctx);
222 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
224 if (likely(reg < 31)) {
225 return cpu_fir[reg];
226 } else {
227 return load_zero(ctx);
231 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
233 if (likely(reg < 31)) {
234 return cpu_fir[reg];
235 } else {
236 return dest_sink(ctx);
240 static int get_flag_ofs(unsigned shift)
242 int ofs = offsetof(CPUAlphaState, flags);
243 #ifdef HOST_WORDS_BIGENDIAN
244 ofs += 3 - (shift / 8);
245 #else
246 ofs += shift / 8;
247 #endif
248 return ofs;
251 static void ld_flag_byte(TCGv val, unsigned shift)
253 tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
256 static void st_flag_byte(TCGv val, unsigned shift)
258 tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
261 static void gen_excp_1(int exception, int error_code)
263 TCGv_i32 tmp1, tmp2;
265 tmp1 = tcg_const_i32(exception);
266 tmp2 = tcg_const_i32(error_code);
267 gen_helper_excp(cpu_env, tmp1, tmp2);
268 tcg_temp_free_i32(tmp2);
269 tcg_temp_free_i32(tmp1);
272 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
274 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
275 gen_excp_1(exception, error_code);
276 return DISAS_NORETURN;
279 static inline DisasJumpType gen_invalid(DisasContext *ctx)
281 return gen_excp(ctx, EXCP_OPCDEC, 0);
284 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
286 TCGv_i32 tmp32 = tcg_temp_new_i32();
287 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
288 gen_helper_memory_to_f(t0, tmp32);
289 tcg_temp_free_i32(tmp32);
292 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
294 TCGv tmp = tcg_temp_new();
295 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
296 gen_helper_memory_to_g(t0, tmp);
297 tcg_temp_free(tmp);
300 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
302 TCGv_i32 tmp32 = tcg_temp_new_i32();
303 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
304 gen_helper_memory_to_s(t0, tmp32);
305 tcg_temp_free_i32(tmp32);
308 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
310 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
311 tcg_gen_mov_i64(cpu_lock_addr, t1);
312 tcg_gen_mov_i64(cpu_lock_value, t0);
315 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
317 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
318 tcg_gen_mov_i64(cpu_lock_addr, t1);
319 tcg_gen_mov_i64(cpu_lock_value, t0);
322 static inline void gen_load_mem(DisasContext *ctx,
323 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
324 int flags),
325 int ra, int rb, int32_t disp16, bool fp,
326 bool clear)
328 TCGv tmp, addr, va;
330 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
331 prefetches, which we can treat as nops. No worries about
332 missed exceptions here. */
333 if (unlikely(ra == 31)) {
334 return;
337 tmp = tcg_temp_new();
338 addr = load_gpr(ctx, rb);
340 if (disp16) {
341 tcg_gen_addi_i64(tmp, addr, disp16);
342 addr = tmp;
344 if (clear) {
345 tcg_gen_andi_i64(tmp, addr, ~0x7);
346 addr = tmp;
349 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
350 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
352 tcg_temp_free(tmp);
355 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
357 TCGv_i32 tmp32 = tcg_temp_new_i32();
358 gen_helper_f_to_memory(tmp32, t0);
359 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
360 tcg_temp_free_i32(tmp32);
363 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
365 TCGv tmp = tcg_temp_new();
366 gen_helper_g_to_memory(tmp, t0);
367 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
368 tcg_temp_free(tmp);
371 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
373 TCGv_i32 tmp32 = tcg_temp_new_i32();
374 gen_helper_s_to_memory(tmp32, t0);
375 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
376 tcg_temp_free_i32(tmp32);
379 static inline void gen_store_mem(DisasContext *ctx,
380 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
381 int flags),
382 int ra, int rb, int32_t disp16, bool fp,
383 bool clear)
385 TCGv tmp, addr, va;
387 tmp = tcg_temp_new();
388 addr = load_gpr(ctx, rb);
390 if (disp16) {
391 tcg_gen_addi_i64(tmp, addr, disp16);
392 addr = tmp;
394 if (clear) {
395 tcg_gen_andi_i64(tmp, addr, ~0x7);
396 addr = tmp;
399 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
400 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
402 tcg_temp_free(tmp);
405 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
406 int32_t disp16, int mem_idx,
407 MemOp op)
409 TCGLabel *lab_fail, *lab_done;
410 TCGv addr, val;
412 addr = tcg_temp_new_i64();
413 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
414 free_context_temps(ctx);
416 lab_fail = gen_new_label();
417 lab_done = gen_new_label();
418 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
419 tcg_temp_free_i64(addr);
421 val = tcg_temp_new_i64();
422 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
423 load_gpr(ctx, ra), mem_idx, op);
424 free_context_temps(ctx);
426 if (ra != 31) {
427 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
429 tcg_temp_free_i64(val);
430 tcg_gen_br(lab_done);
432 gen_set_label(lab_fail);
433 if (ra != 31) {
434 tcg_gen_movi_i64(ctx->ir[ra], 0);
437 gen_set_label(lab_done);
438 tcg_gen_movi_i64(cpu_lock_addr, -1);
439 return DISAS_NEXT;
442 static bool in_superpage(DisasContext *ctx, int64_t addr)
444 #ifndef CONFIG_USER_ONLY
445 return ((ctx->tbflags & ENV_FLAG_PS_USER) == 0
446 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
447 && ((addr >> 41) & 3) == 2);
448 #else
449 return false;
450 #endif
453 static bool use_exit_tb(DisasContext *ctx)
455 return ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
456 || ctx->base.singlestep_enabled
457 || singlestep);
460 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
462 /* Suppress goto_tb in the case of single-steping and IO. */
463 if (unlikely(use_exit_tb(ctx))) {
464 return false;
466 #ifndef CONFIG_USER_ONLY
467 /* If the destination is in the superpage, the page perms can't change. */
468 if (in_superpage(ctx, dest)) {
469 return true;
471 /* Check for the dest on the same page as the start of the TB. */
472 return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
473 #else
474 return true;
475 #endif
478 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
480 uint64_t dest = ctx->base.pc_next + (disp << 2);
482 if (ra != 31) {
483 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
486 /* Notice branch-to-next; used to initialize RA with the PC. */
487 if (disp == 0) {
488 return 0;
489 } else if (use_goto_tb(ctx, dest)) {
490 tcg_gen_goto_tb(0);
491 tcg_gen_movi_i64(cpu_pc, dest);
492 tcg_gen_exit_tb(ctx->base.tb, 0);
493 return DISAS_NORETURN;
494 } else {
495 tcg_gen_movi_i64(cpu_pc, dest);
496 return DISAS_PC_UPDATED;
500 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
501 TCGv cmp, int32_t disp)
503 uint64_t dest = ctx->base.pc_next + (disp << 2);
504 TCGLabel *lab_true = gen_new_label();
506 if (use_goto_tb(ctx, dest)) {
507 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
509 tcg_gen_goto_tb(0);
510 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
511 tcg_gen_exit_tb(ctx->base.tb, 0);
513 gen_set_label(lab_true);
514 tcg_gen_goto_tb(1);
515 tcg_gen_movi_i64(cpu_pc, dest);
516 tcg_gen_exit_tb(ctx->base.tb, 1);
518 return DISAS_NORETURN;
519 } else {
520 TCGv_i64 z = tcg_const_i64(0);
521 TCGv_i64 d = tcg_const_i64(dest);
522 TCGv_i64 p = tcg_const_i64(ctx->base.pc_next);
524 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
526 tcg_temp_free_i64(z);
527 tcg_temp_free_i64(d);
528 tcg_temp_free_i64(p);
529 return DISAS_PC_UPDATED;
533 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
534 int32_t disp, int mask)
536 if (mask) {
537 TCGv tmp = tcg_temp_new();
538 DisasJumpType ret;
540 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
541 ret = gen_bcond_internal(ctx, cond, tmp, disp);
542 tcg_temp_free(tmp);
543 return ret;
545 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
548 /* Fold -0.0 for comparison with COND. */
550 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
552 uint64_t mzero = 1ull << 63;
554 switch (cond) {
555 case TCG_COND_LE:
556 case TCG_COND_GT:
557 /* For <= or >, the -0.0 value directly compares the way we want. */
558 tcg_gen_mov_i64(dest, src);
559 break;
561 case TCG_COND_EQ:
562 case TCG_COND_NE:
563 /* For == or !=, we can simply mask off the sign bit and compare. */
564 tcg_gen_andi_i64(dest, src, mzero - 1);
565 break;
567 case TCG_COND_GE:
568 case TCG_COND_LT:
569 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
570 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
571 tcg_gen_neg_i64(dest, dest);
572 tcg_gen_and_i64(dest, dest, src);
573 break;
575 default:
576 abort();
580 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
581 int32_t disp)
583 TCGv cmp_tmp = tcg_temp_new();
584 DisasJumpType ret;
586 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
587 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
588 tcg_temp_free(cmp_tmp);
589 return ret;
592 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
594 TCGv_i64 va, vb, z;
596 z = load_zero(ctx);
597 vb = load_fpr(ctx, rb);
598 va = tcg_temp_new();
599 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
601 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
603 tcg_temp_free(va);
606 #define QUAL_RM_N 0x080 /* Round mode nearest even */
607 #define QUAL_RM_C 0x000 /* Round mode chopped */
608 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
609 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
610 #define QUAL_RM_MASK 0x0c0
612 #define QUAL_U 0x100 /* Underflow enable (fp output) */
613 #define QUAL_V 0x100 /* Overflow enable (int output) */
614 #define QUAL_S 0x400 /* Software completion enable */
615 #define QUAL_I 0x200 /* Inexact detection enable */
617 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
619 TCGv_i32 tmp;
621 fn11 &= QUAL_RM_MASK;
622 if (fn11 == ctx->tb_rm) {
623 return;
625 ctx->tb_rm = fn11;
627 tmp = tcg_temp_new_i32();
628 switch (fn11) {
629 case QUAL_RM_N:
630 tcg_gen_movi_i32(tmp, float_round_nearest_even);
631 break;
632 case QUAL_RM_C:
633 tcg_gen_movi_i32(tmp, float_round_to_zero);
634 break;
635 case QUAL_RM_M:
636 tcg_gen_movi_i32(tmp, float_round_down);
637 break;
638 case QUAL_RM_D:
639 tcg_gen_ld8u_i32(tmp, cpu_env,
640 offsetof(CPUAlphaState, fpcr_dyn_round));
641 break;
644 #if defined(CONFIG_SOFTFLOAT_INLINE)
645 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
646 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
647 sets the one field. */
648 tcg_gen_st8_i32(tmp, cpu_env,
649 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
650 #else
651 gen_helper_setroundmode(tmp);
652 #endif
654 tcg_temp_free_i32(tmp);
657 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
659 TCGv_i32 tmp;
661 fn11 &= QUAL_U;
662 if (fn11 == ctx->tb_ftz) {
663 return;
665 ctx->tb_ftz = fn11;
667 tmp = tcg_temp_new_i32();
668 if (fn11) {
669 /* Underflow is enabled, use the FPCR setting. */
670 tcg_gen_ld8u_i32(tmp, cpu_env,
671 offsetof(CPUAlphaState, fpcr_flush_to_zero));
672 } else {
673 /* Underflow is disabled, force flush-to-zero. */
674 tcg_gen_movi_i32(tmp, 1);
677 #if defined(CONFIG_SOFTFLOAT_INLINE)
678 tcg_gen_st8_i32(tmp, cpu_env,
679 offsetof(CPUAlphaState, fp_status.flush_to_zero));
680 #else
681 gen_helper_setflushzero(tmp);
682 #endif
684 tcg_temp_free_i32(tmp);
687 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
689 TCGv val;
691 if (unlikely(reg == 31)) {
692 val = load_zero(ctx);
693 } else {
694 val = cpu_fir[reg];
695 if ((fn11 & QUAL_S) == 0) {
696 if (is_cmp) {
697 gen_helper_ieee_input_cmp(cpu_env, val);
698 } else {
699 gen_helper_ieee_input(cpu_env, val);
701 } else {
702 #ifndef CONFIG_USER_ONLY
703 /* In system mode, raise exceptions for denormals like real
704 hardware. In user mode, proceed as if the OS completion
705 handler is handling the denormal as per spec. */
706 gen_helper_ieee_input_s(cpu_env, val);
707 #endif
710 return val;
713 static void gen_fp_exc_raise(int rc, int fn11)
715 /* ??? We ought to be able to do something with imprecise exceptions.
716 E.g. notice we're still in the trap shadow of something within the
717 TB and do not generate the code to signal the exception; end the TB
718 when an exception is forced to arrive, either by consumption of a
719 register value or TRAPB or EXCB. */
720 TCGv_i32 reg, ign;
721 uint32_t ignore = 0;
723 if (!(fn11 & QUAL_U)) {
724 /* Note that QUAL_U == QUAL_V, so ignore either. */
725 ignore |= FPCR_UNF | FPCR_IOV;
727 if (!(fn11 & QUAL_I)) {
728 ignore |= FPCR_INE;
730 ign = tcg_const_i32(ignore);
732 /* ??? Pass in the regno of the destination so that the helper can
733 set EXC_MASK, which contains a bitmask of destination registers
734 that have caused arithmetic traps. A simple userspace emulation
735 does not require this. We do need it for a guest kernel's entArith,
736 or if we were to do something clever with imprecise exceptions. */
737 reg = tcg_const_i32(rc + 32);
738 if (fn11 & QUAL_S) {
739 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
740 } else {
741 gen_helper_fp_exc_raise(cpu_env, ign, reg);
744 tcg_temp_free_i32(reg);
745 tcg_temp_free_i32(ign);
748 static void gen_cvtlq(TCGv vc, TCGv vb)
750 TCGv tmp = tcg_temp_new();
752 /* The arithmetic right shift here, plus the sign-extended mask below
753 yields a sign-extended result without an explicit ext32s_i64. */
754 tcg_gen_shri_i64(tmp, vb, 29);
755 tcg_gen_sari_i64(vc, vb, 32);
756 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
758 tcg_temp_free(tmp);
761 static void gen_ieee_arith2(DisasContext *ctx,
762 void (*helper)(TCGv, TCGv_ptr, TCGv),
763 int rb, int rc, int fn11)
765 TCGv vb;
767 gen_qual_roundmode(ctx, fn11);
768 gen_qual_flushzero(ctx, fn11);
770 vb = gen_ieee_input(ctx, rb, fn11, 0);
771 helper(dest_fpr(ctx, rc), cpu_env, vb);
773 gen_fp_exc_raise(rc, fn11);
776 #define IEEE_ARITH2(name) \
777 static inline void glue(gen_, name)(DisasContext *ctx, \
778 int rb, int rc, int fn11) \
780 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
782 IEEE_ARITH2(sqrts)
783 IEEE_ARITH2(sqrtt)
784 IEEE_ARITH2(cvtst)
785 IEEE_ARITH2(cvtts)
787 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
789 TCGv vb, vc;
791 /* No need to set flushzero, since we have an integer output. */
792 vb = gen_ieee_input(ctx, rb, fn11, 0);
793 vc = dest_fpr(ctx, rc);
795 /* Almost all integer conversions use cropped rounding;
796 special case that. */
797 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
798 gen_helper_cvttq_c(vc, cpu_env, vb);
799 } else {
800 gen_qual_roundmode(ctx, fn11);
801 gen_helper_cvttq(vc, cpu_env, vb);
803 gen_fp_exc_raise(rc, fn11);
806 static void gen_ieee_intcvt(DisasContext *ctx,
807 void (*helper)(TCGv, TCGv_ptr, TCGv),
808 int rb, int rc, int fn11)
810 TCGv vb, vc;
812 gen_qual_roundmode(ctx, fn11);
813 vb = load_fpr(ctx, rb);
814 vc = dest_fpr(ctx, rc);
816 /* The only exception that can be raised by integer conversion
817 is inexact. Thus we only need to worry about exceptions when
818 inexact handling is requested. */
819 if (fn11 & QUAL_I) {
820 helper(vc, cpu_env, vb);
821 gen_fp_exc_raise(rc, fn11);
822 } else {
823 helper(vc, cpu_env, vb);
827 #define IEEE_INTCVT(name) \
828 static inline void glue(gen_, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
831 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
833 IEEE_INTCVT(cvtqs)
834 IEEE_INTCVT(cvtqt)
836 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
838 TCGv vmask = tcg_const_i64(mask);
839 TCGv tmp = tcg_temp_new_i64();
841 if (inv_a) {
842 tcg_gen_andc_i64(tmp, vmask, va);
843 } else {
844 tcg_gen_and_i64(tmp, va, vmask);
847 tcg_gen_andc_i64(vc, vb, vmask);
848 tcg_gen_or_i64(vc, vc, tmp);
850 tcg_temp_free(vmask);
851 tcg_temp_free(tmp);
854 static void gen_ieee_arith3(DisasContext *ctx,
855 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
856 int ra, int rb, int rc, int fn11)
858 TCGv va, vb, vc;
860 gen_qual_roundmode(ctx, fn11);
861 gen_qual_flushzero(ctx, fn11);
863 va = gen_ieee_input(ctx, ra, fn11, 0);
864 vb = gen_ieee_input(ctx, rb, fn11, 0);
865 vc = dest_fpr(ctx, rc);
866 helper(vc, cpu_env, va, vb);
868 gen_fp_exc_raise(rc, fn11);
871 #define IEEE_ARITH3(name) \
872 static inline void glue(gen_, name)(DisasContext *ctx, \
873 int ra, int rb, int rc, int fn11) \
875 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
877 IEEE_ARITH3(adds)
878 IEEE_ARITH3(subs)
879 IEEE_ARITH3(muls)
880 IEEE_ARITH3(divs)
881 IEEE_ARITH3(addt)
882 IEEE_ARITH3(subt)
883 IEEE_ARITH3(mult)
884 IEEE_ARITH3(divt)
886 static void gen_ieee_compare(DisasContext *ctx,
887 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
888 int ra, int rb, int rc, int fn11)
890 TCGv va, vb, vc;
892 va = gen_ieee_input(ctx, ra, fn11, 1);
893 vb = gen_ieee_input(ctx, rb, fn11, 1);
894 vc = dest_fpr(ctx, rc);
895 helper(vc, cpu_env, va, vb);
897 gen_fp_exc_raise(rc, fn11);
900 #define IEEE_CMP3(name) \
901 static inline void glue(gen_, name)(DisasContext *ctx, \
902 int ra, int rb, int rc, int fn11) \
904 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
906 IEEE_CMP3(cmptun)
907 IEEE_CMP3(cmpteq)
908 IEEE_CMP3(cmptlt)
909 IEEE_CMP3(cmptle)
911 static inline uint64_t zapnot_mask(uint8_t lit)
913 uint64_t mask = 0;
914 int i;
916 for (i = 0; i < 8; ++i) {
917 if ((lit >> i) & 1) {
918 mask |= 0xffull << (i * 8);
921 return mask;
924 /* Implement zapnot with an immediate operand, which expands to some
925 form of immediate AND. This is a basic building block in the
926 definition of many of the other byte manipulation instructions. */
927 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
929 switch (lit) {
930 case 0x00:
931 tcg_gen_movi_i64(dest, 0);
932 break;
933 case 0x01:
934 tcg_gen_ext8u_i64(dest, src);
935 break;
936 case 0x03:
937 tcg_gen_ext16u_i64(dest, src);
938 break;
939 case 0x0f:
940 tcg_gen_ext32u_i64(dest, src);
941 break;
942 case 0xff:
943 tcg_gen_mov_i64(dest, src);
944 break;
945 default:
946 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
947 break;
951 /* EXTWH, EXTLH, EXTQH */
952 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
953 uint8_t lit, uint8_t byte_mask)
955 if (islit) {
956 int pos = (64 - lit * 8) & 0x3f;
957 int len = cto32(byte_mask) * 8;
958 if (pos < len) {
959 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
960 } else {
961 tcg_gen_movi_i64(vc, 0);
963 } else {
964 TCGv tmp = tcg_temp_new();
965 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
966 tcg_gen_neg_i64(tmp, tmp);
967 tcg_gen_andi_i64(tmp, tmp, 0x3f);
968 tcg_gen_shl_i64(vc, va, tmp);
969 tcg_temp_free(tmp);
971 gen_zapnoti(vc, vc, byte_mask);
974 /* EXTBL, EXTWL, EXTLL, EXTQL */
975 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
976 uint8_t lit, uint8_t byte_mask)
978 if (islit) {
979 int pos = (lit & 7) * 8;
980 int len = cto32(byte_mask) * 8;
981 if (pos + len >= 64) {
982 len = 64 - pos;
984 tcg_gen_extract_i64(vc, va, pos, len);
985 } else {
986 TCGv tmp = tcg_temp_new();
987 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
988 tcg_gen_shli_i64(tmp, tmp, 3);
989 tcg_gen_shr_i64(vc, va, tmp);
990 tcg_temp_free(tmp);
991 gen_zapnoti(vc, vc, byte_mask);
995 /* INSWH, INSLH, INSQH */
996 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
997 uint8_t lit, uint8_t byte_mask)
999 if (islit) {
1000 int pos = 64 - (lit & 7) * 8;
1001 int len = cto32(byte_mask) * 8;
1002 if (pos < len) {
1003 tcg_gen_extract_i64(vc, va, pos, len - pos);
1004 } else {
1005 tcg_gen_movi_i64(vc, 0);
1007 } else {
1008 TCGv tmp = tcg_temp_new();
1009 TCGv shift = tcg_temp_new();
1011 /* The instruction description has us left-shift the byte mask
1012 and extract bits <15:8> and apply that zap at the end. This
1013 is equivalent to simply performing the zap first and shifting
1014 afterward. */
1015 gen_zapnoti(tmp, va, byte_mask);
1017 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1018 portably by splitting the shift into two parts: shift_count-1 and 1.
1019 Arrange for the -1 by using ones-complement instead of
1020 twos-complement in the negation: ~(B * 8) & 63. */
1022 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1023 tcg_gen_not_i64(shift, shift);
1024 tcg_gen_andi_i64(shift, shift, 0x3f);
1026 tcg_gen_shr_i64(vc, tmp, shift);
1027 tcg_gen_shri_i64(vc, vc, 1);
1028 tcg_temp_free(shift);
1029 tcg_temp_free(tmp);
1033 /* INSBL, INSWL, INSLL, INSQL */
1034 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1035 uint8_t lit, uint8_t byte_mask)
1037 if (islit) {
1038 int pos = (lit & 7) * 8;
1039 int len = cto32(byte_mask) * 8;
1040 if (pos + len > 64) {
1041 len = 64 - pos;
1043 tcg_gen_deposit_z_i64(vc, va, pos, len);
1044 } else {
1045 TCGv tmp = tcg_temp_new();
1046 TCGv shift = tcg_temp_new();
1048 /* The instruction description has us left-shift the byte mask
1049 and extract bits <15:8> and apply that zap at the end. This
1050 is equivalent to simply performing the zap first and shifting
1051 afterward. */
1052 gen_zapnoti(tmp, va, byte_mask);
1054 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1055 tcg_gen_shli_i64(shift, shift, 3);
1056 tcg_gen_shl_i64(vc, tmp, shift);
1057 tcg_temp_free(shift);
1058 tcg_temp_free(tmp);
1062 /* MSKWH, MSKLH, MSKQH */
1063 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1064 uint8_t lit, uint8_t byte_mask)
1066 if (islit) {
1067 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1068 } else {
1069 TCGv shift = tcg_temp_new();
1070 TCGv mask = tcg_temp_new();
1072 /* The instruction description is as above, where the byte_mask
1073 is shifted left, and then we extract bits <15:8>. This can be
1074 emulated with a right-shift on the expanded byte mask. This
1075 requires extra care because for an input <2:0> == 0 we need a
1076 shift of 64 bits in order to generate a zero. This is done by
1077 splitting the shift into two parts, the variable shift - 1
1078 followed by a constant 1 shift. The code we expand below is
1079 equivalent to ~(B * 8) & 63. */
1081 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1082 tcg_gen_not_i64(shift, shift);
1083 tcg_gen_andi_i64(shift, shift, 0x3f);
1084 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1085 tcg_gen_shr_i64(mask, mask, shift);
1086 tcg_gen_shri_i64(mask, mask, 1);
1088 tcg_gen_andc_i64(vc, va, mask);
1090 tcg_temp_free(mask);
1091 tcg_temp_free(shift);
1095 /* MSKBL, MSKWL, MSKLL, MSKQL */
1096 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1097 uint8_t lit, uint8_t byte_mask)
1099 if (islit) {
1100 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1101 } else {
1102 TCGv shift = tcg_temp_new();
1103 TCGv mask = tcg_temp_new();
1105 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1106 tcg_gen_shli_i64(shift, shift, 3);
1107 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1108 tcg_gen_shl_i64(mask, mask, shift);
1110 tcg_gen_andc_i64(vc, va, mask);
1112 tcg_temp_free(mask);
1113 tcg_temp_free(shift);
1117 static void gen_rx(DisasContext *ctx, int ra, int set)
1119 TCGv tmp;
1121 if (ra != 31) {
1122 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1125 tmp = tcg_const_i64(set);
1126 st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1127 tcg_temp_free(tmp);
1130 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1132 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1133 to internal cpu registers. */
1135 /* Unprivileged PAL call */
1136 if (palcode >= 0x80 && palcode < 0xC0) {
1137 switch (palcode) {
1138 case 0x86:
1139 /* IMB */
1140 /* No-op inside QEMU. */
1141 break;
1142 case 0x9E:
1143 /* RDUNIQUE */
1144 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1145 offsetof(CPUAlphaState, unique));
1146 break;
1147 case 0x9F:
1148 /* WRUNIQUE */
1149 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1150 offsetof(CPUAlphaState, unique));
1151 break;
1152 default:
1153 palcode &= 0xbf;
1154 goto do_call_pal;
1156 return DISAS_NEXT;
1159 #ifndef CONFIG_USER_ONLY
1160 /* Privileged PAL code */
1161 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1162 switch (palcode) {
1163 case 0x01:
1164 /* CFLUSH */
1165 /* No-op inside QEMU. */
1166 break;
1167 case 0x02:
1168 /* DRAINA */
1169 /* No-op inside QEMU. */
1170 break;
1171 case 0x2D:
1172 /* WRVPTPTR */
1173 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1174 offsetof(CPUAlphaState, vptptr));
1175 break;
1176 case 0x31:
1177 /* WRVAL */
1178 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1179 offsetof(CPUAlphaState, sysval));
1180 break;
1181 case 0x32:
1182 /* RDVAL */
1183 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1184 offsetof(CPUAlphaState, sysval));
1185 break;
1187 case 0x35:
1188 /* SWPIPL */
1189 /* Note that we already know we're in kernel mode, so we know
1190 that PS only contains the 3 IPL bits. */
1191 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1193 /* But make sure and store only the 3 IPL bits from the user. */
1195 TCGv tmp = tcg_temp_new();
1196 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1197 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1198 tcg_temp_free(tmp);
1201 /* Allow interrupts to be recognized right away. */
1202 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1203 return DISAS_PC_UPDATED_NOCHAIN;
1205 case 0x36:
1206 /* RDPS */
1207 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1208 break;
1210 case 0x38:
1211 /* WRUSP */
1212 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1213 offsetof(CPUAlphaState, usp));
1214 break;
1215 case 0x3A:
1216 /* RDUSP */
1217 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1218 offsetof(CPUAlphaState, usp));
1219 break;
1220 case 0x3C:
1221 /* WHAMI */
1222 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1223 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1224 break;
1226 case 0x3E:
1227 /* WTINT */
1229 TCGv_i32 tmp = tcg_const_i32(1);
1230 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1231 offsetof(CPUState, halted));
1232 tcg_temp_free_i32(tmp);
1234 tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1235 return gen_excp(ctx, EXCP_HALTED, 0);
1237 default:
1238 palcode &= 0x3f;
1239 goto do_call_pal;
1241 return DISAS_NEXT;
1243 #endif
1244 return gen_invalid(ctx);
1246 do_call_pal:
1247 #ifdef CONFIG_USER_ONLY
1248 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1249 #else
1251 TCGv tmp = tcg_temp_new();
1252 uint64_t exc_addr = ctx->base.pc_next;
1253 uint64_t entry = ctx->palbr;
1255 if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1256 exc_addr |= 1;
1257 } else {
1258 tcg_gen_movi_i64(tmp, 1);
1259 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1262 tcg_gen_movi_i64(tmp, exc_addr);
1263 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1264 tcg_temp_free(tmp);
1266 entry += (palcode & 0x80
1267 ? 0x2000 + (palcode - 0x80) * 64
1268 : 0x1000 + palcode * 64);
1270 /* Since the destination is running in PALmode, we don't really
1271 need the page permissions check. We'll see the existence of
1272 the page when we create the TB, and we'll flush all TBs if
1273 we change the PAL base register. */
1274 if (!use_exit_tb(ctx)) {
1275 tcg_gen_goto_tb(0);
1276 tcg_gen_movi_i64(cpu_pc, entry);
1277 tcg_gen_exit_tb(ctx->base.tb, 0);
1278 return DISAS_NORETURN;
1279 } else {
1280 tcg_gen_movi_i64(cpu_pc, entry);
1281 return DISAS_PC_UPDATED;
1284 #endif
1287 #ifndef CONFIG_USER_ONLY
1289 #define PR_LONG 0x200000
1291 static int cpu_pr_data(int pr)
1293 switch (pr) {
1294 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1295 case 3: return offsetof(CPUAlphaState, trap_arg0);
1296 case 4: return offsetof(CPUAlphaState, trap_arg1);
1297 case 5: return offsetof(CPUAlphaState, trap_arg2);
1298 case 6: return offsetof(CPUAlphaState, exc_addr);
1299 case 7: return offsetof(CPUAlphaState, palbr);
1300 case 8: return offsetof(CPUAlphaState, ptbr);
1301 case 9: return offsetof(CPUAlphaState, vptptr);
1302 case 10: return offsetof(CPUAlphaState, unique);
1303 case 11: return offsetof(CPUAlphaState, sysval);
1304 case 12: return offsetof(CPUAlphaState, usp);
1306 case 40 ... 63:
1307 return offsetof(CPUAlphaState, scratch[pr - 40]);
1309 case 251:
1310 return offsetof(CPUAlphaState, alarm_expire);
1312 return 0;
1315 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1317 void (*helper)(TCGv);
1318 int data;
1320 switch (regno) {
1321 case 32 ... 39:
1322 /* Accessing the "non-shadow" general registers. */
1323 regno = regno == 39 ? 25 : regno - 32 + 8;
1324 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1325 break;
1327 case 250: /* WALLTIME */
1328 helper = gen_helper_get_walltime;
1329 goto do_helper;
1330 case 249: /* VMTIME */
1331 helper = gen_helper_get_vmtime;
1332 do_helper:
1333 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1334 gen_io_start();
1335 helper(va);
1336 return DISAS_PC_STALE;
1337 } else {
1338 helper(va);
1340 break;
1342 case 0: /* PS */
1343 ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1344 break;
1345 case 1: /* FEN */
1346 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1347 break;
1349 default:
1350 /* The basic registers are data only, and unknown registers
1351 are read-zero, write-ignore. */
1352 data = cpu_pr_data(regno);
1353 if (data == 0) {
1354 tcg_gen_movi_i64(va, 0);
1355 } else if (data & PR_LONG) {
1356 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1357 } else {
1358 tcg_gen_ld_i64(va, cpu_env, data);
1360 break;
1363 return DISAS_NEXT;
1366 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1368 int data;
1369 DisasJumpType ret = DISAS_NEXT;
1371 switch (regno) {
1372 case 255:
1373 /* TBIA */
1374 gen_helper_tbia(cpu_env);
1375 break;
1377 case 254:
1378 /* TBIS */
1379 gen_helper_tbis(cpu_env, vb);
1380 break;
1382 case 253:
1383 /* WAIT */
1385 TCGv_i32 tmp = tcg_const_i32(1);
1386 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1387 offsetof(CPUState, halted));
1388 tcg_temp_free_i32(tmp);
1390 return gen_excp(ctx, EXCP_HALTED, 0);
1392 case 252:
1393 /* HALT */
1394 gen_helper_halt(vb);
1395 return DISAS_PC_STALE;
1397 case 251:
1398 /* ALARM */
1399 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1400 gen_io_start();
1401 ret = DISAS_PC_STALE;
1403 gen_helper_set_alarm(cpu_env, vb);
1404 break;
1406 case 7:
1407 /* PALBR */
1408 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1409 /* Changing the PAL base register implies un-chaining all of the TBs
1410 that ended with a CALL_PAL. Since the base register usually only
1411 changes during boot, flushing everything works well. */
1412 gen_helper_tb_flush(cpu_env);
1413 return DISAS_PC_STALE;
1415 case 32 ... 39:
1416 /* Accessing the "non-shadow" general registers. */
1417 regno = regno == 39 ? 25 : regno - 32 + 8;
1418 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1419 break;
1421 case 0: /* PS */
1422 st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1423 break;
1424 case 1: /* FEN */
1425 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1426 break;
1428 default:
1429 /* The basic registers are data only, and unknown registers
1430 are read-zero, write-ignore. */
1431 data = cpu_pr_data(regno);
1432 if (data != 0) {
1433 if (data & PR_LONG) {
1434 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1435 } else {
1436 tcg_gen_st_i64(vb, cpu_env, data);
1439 break;
1442 return ret;
1444 #endif /* !USER_ONLY*/
1446 #define REQUIRE_NO_LIT \
1447 do { \
1448 if (real_islit) { \
1449 goto invalid_opc; \
1451 } while (0)
1453 #define REQUIRE_AMASK(FLAG) \
1454 do { \
1455 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1456 goto invalid_opc; \
1458 } while (0)
1460 #define REQUIRE_TB_FLAG(FLAG) \
1461 do { \
1462 if ((ctx->tbflags & (FLAG)) == 0) { \
1463 goto invalid_opc; \
1465 } while (0)
1467 #define REQUIRE_REG_31(WHICH) \
1468 do { \
1469 if (WHICH != 31) { \
1470 goto invalid_opc; \
1472 } while (0)
1474 #define REQUIRE_FEN \
1475 do { \
1476 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1477 goto raise_fen; \
1479 } while (0)
1481 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1483 int32_t disp21, disp16, disp12 __attribute__((unused));
1484 uint16_t fn11;
1485 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1486 bool islit, real_islit;
1487 TCGv va, vb, vc, tmp, tmp2;
1488 TCGv_i32 t32;
1489 DisasJumpType ret;
1491 /* Decode all instruction fields */
1492 opc = extract32(insn, 26, 6);
1493 ra = extract32(insn, 21, 5);
1494 rb = extract32(insn, 16, 5);
1495 rc = extract32(insn, 0, 5);
1496 real_islit = islit = extract32(insn, 12, 1);
1497 lit = extract32(insn, 13, 8);
1499 disp21 = sextract32(insn, 0, 21);
1500 disp16 = sextract32(insn, 0, 16);
1501 disp12 = sextract32(insn, 0, 12);
1503 fn11 = extract32(insn, 5, 11);
1504 fpfn = extract32(insn, 5, 6);
1505 fn7 = extract32(insn, 5, 7);
1507 if (rb == 31 && !islit) {
1508 islit = true;
1509 lit = 0;
1512 ret = DISAS_NEXT;
1513 switch (opc) {
1514 case 0x00:
1515 /* CALL_PAL */
1516 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1517 break;
1518 case 0x01:
1519 /* OPC01 */
1520 goto invalid_opc;
1521 case 0x02:
1522 /* OPC02 */
1523 goto invalid_opc;
1524 case 0x03:
1525 /* OPC03 */
1526 goto invalid_opc;
1527 case 0x04:
1528 /* OPC04 */
1529 goto invalid_opc;
1530 case 0x05:
1531 /* OPC05 */
1532 goto invalid_opc;
1533 case 0x06:
1534 /* OPC06 */
1535 goto invalid_opc;
1536 case 0x07:
1537 /* OPC07 */
1538 goto invalid_opc;
1540 case 0x09:
1541 /* LDAH */
1542 disp16 = (uint32_t)disp16 << 16;
1543 /* fall through */
1544 case 0x08:
1545 /* LDA */
1546 va = dest_gpr(ctx, ra);
1547 /* It's worth special-casing immediate loads. */
1548 if (rb == 31) {
1549 tcg_gen_movi_i64(va, disp16);
1550 } else {
1551 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1553 break;
1555 case 0x0A:
1556 /* LDBU */
1557 REQUIRE_AMASK(BWX);
1558 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1559 break;
1560 case 0x0B:
1561 /* LDQ_U */
1562 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1563 break;
1564 case 0x0C:
1565 /* LDWU */
1566 REQUIRE_AMASK(BWX);
1567 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1568 break;
1569 case 0x0D:
1570 /* STW */
1571 REQUIRE_AMASK(BWX);
1572 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1573 break;
1574 case 0x0E:
1575 /* STB */
1576 REQUIRE_AMASK(BWX);
1577 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1578 break;
1579 case 0x0F:
1580 /* STQ_U */
1581 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1582 break;
1584 case 0x10:
1585 vc = dest_gpr(ctx, rc);
1586 vb = load_gpr_lit(ctx, rb, lit, islit);
1588 if (ra == 31) {
1589 if (fn7 == 0x00) {
1590 /* Special case ADDL as SEXTL. */
1591 tcg_gen_ext32s_i64(vc, vb);
1592 break;
1594 if (fn7 == 0x29) {
1595 /* Special case SUBQ as NEGQ. */
1596 tcg_gen_neg_i64(vc, vb);
1597 break;
1601 va = load_gpr(ctx, ra);
1602 switch (fn7) {
1603 case 0x00:
1604 /* ADDL */
1605 tcg_gen_add_i64(vc, va, vb);
1606 tcg_gen_ext32s_i64(vc, vc);
1607 break;
1608 case 0x02:
1609 /* S4ADDL */
1610 tmp = tcg_temp_new();
1611 tcg_gen_shli_i64(tmp, va, 2);
1612 tcg_gen_add_i64(tmp, tmp, vb);
1613 tcg_gen_ext32s_i64(vc, tmp);
1614 tcg_temp_free(tmp);
1615 break;
1616 case 0x09:
1617 /* SUBL */
1618 tcg_gen_sub_i64(vc, va, vb);
1619 tcg_gen_ext32s_i64(vc, vc);
1620 break;
1621 case 0x0B:
1622 /* S4SUBL */
1623 tmp = tcg_temp_new();
1624 tcg_gen_shli_i64(tmp, va, 2);
1625 tcg_gen_sub_i64(tmp, tmp, vb);
1626 tcg_gen_ext32s_i64(vc, tmp);
1627 tcg_temp_free(tmp);
1628 break;
1629 case 0x0F:
1630 /* CMPBGE */
1631 if (ra == 31) {
1632 /* Special case 0 >= X as X == 0. */
1633 gen_helper_cmpbe0(vc, vb);
1634 } else {
1635 gen_helper_cmpbge(vc, va, vb);
1637 break;
1638 case 0x12:
1639 /* S8ADDL */
1640 tmp = tcg_temp_new();
1641 tcg_gen_shli_i64(tmp, va, 3);
1642 tcg_gen_add_i64(tmp, tmp, vb);
1643 tcg_gen_ext32s_i64(vc, tmp);
1644 tcg_temp_free(tmp);
1645 break;
1646 case 0x1B:
1647 /* S8SUBL */
1648 tmp = tcg_temp_new();
1649 tcg_gen_shli_i64(tmp, va, 3);
1650 tcg_gen_sub_i64(tmp, tmp, vb);
1651 tcg_gen_ext32s_i64(vc, tmp);
1652 tcg_temp_free(tmp);
1653 break;
1654 case 0x1D:
1655 /* CMPULT */
1656 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1657 break;
1658 case 0x20:
1659 /* ADDQ */
1660 tcg_gen_add_i64(vc, va, vb);
1661 break;
1662 case 0x22:
1663 /* S4ADDQ */
1664 tmp = tcg_temp_new();
1665 tcg_gen_shli_i64(tmp, va, 2);
1666 tcg_gen_add_i64(vc, tmp, vb);
1667 tcg_temp_free(tmp);
1668 break;
1669 case 0x29:
1670 /* SUBQ */
1671 tcg_gen_sub_i64(vc, va, vb);
1672 break;
1673 case 0x2B:
1674 /* S4SUBQ */
1675 tmp = tcg_temp_new();
1676 tcg_gen_shli_i64(tmp, va, 2);
1677 tcg_gen_sub_i64(vc, tmp, vb);
1678 tcg_temp_free(tmp);
1679 break;
1680 case 0x2D:
1681 /* CMPEQ */
1682 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1683 break;
1684 case 0x32:
1685 /* S8ADDQ */
1686 tmp = tcg_temp_new();
1687 tcg_gen_shli_i64(tmp, va, 3);
1688 tcg_gen_add_i64(vc, tmp, vb);
1689 tcg_temp_free(tmp);
1690 break;
1691 case 0x3B:
1692 /* S8SUBQ */
1693 tmp = tcg_temp_new();
1694 tcg_gen_shli_i64(tmp, va, 3);
1695 tcg_gen_sub_i64(vc, tmp, vb);
1696 tcg_temp_free(tmp);
1697 break;
1698 case 0x3D:
1699 /* CMPULE */
1700 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1701 break;
1702 case 0x40:
1703 /* ADDL/V */
1704 tmp = tcg_temp_new();
1705 tcg_gen_ext32s_i64(tmp, va);
1706 tcg_gen_ext32s_i64(vc, vb);
1707 tcg_gen_add_i64(tmp, tmp, vc);
1708 tcg_gen_ext32s_i64(vc, tmp);
1709 gen_helper_check_overflow(cpu_env, vc, tmp);
1710 tcg_temp_free(tmp);
1711 break;
1712 case 0x49:
1713 /* SUBL/V */
1714 tmp = tcg_temp_new();
1715 tcg_gen_ext32s_i64(tmp, va);
1716 tcg_gen_ext32s_i64(vc, vb);
1717 tcg_gen_sub_i64(tmp, tmp, vc);
1718 tcg_gen_ext32s_i64(vc, tmp);
1719 gen_helper_check_overflow(cpu_env, vc, tmp);
1720 tcg_temp_free(tmp);
1721 break;
1722 case 0x4D:
1723 /* CMPLT */
1724 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1725 break;
1726 case 0x60:
1727 /* ADDQ/V */
1728 tmp = tcg_temp_new();
1729 tmp2 = tcg_temp_new();
1730 tcg_gen_eqv_i64(tmp, va, vb);
1731 tcg_gen_mov_i64(tmp2, va);
1732 tcg_gen_add_i64(vc, va, vb);
1733 tcg_gen_xor_i64(tmp2, tmp2, vc);
1734 tcg_gen_and_i64(tmp, tmp, tmp2);
1735 tcg_gen_shri_i64(tmp, tmp, 63);
1736 tcg_gen_movi_i64(tmp2, 0);
1737 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1738 tcg_temp_free(tmp);
1739 tcg_temp_free(tmp2);
1740 break;
1741 case 0x69:
1742 /* SUBQ/V */
1743 tmp = tcg_temp_new();
1744 tmp2 = tcg_temp_new();
1745 tcg_gen_xor_i64(tmp, va, vb);
1746 tcg_gen_mov_i64(tmp2, va);
1747 tcg_gen_sub_i64(vc, va, vb);
1748 tcg_gen_xor_i64(tmp2, tmp2, vc);
1749 tcg_gen_and_i64(tmp, tmp, tmp2);
1750 tcg_gen_shri_i64(tmp, tmp, 63);
1751 tcg_gen_movi_i64(tmp2, 0);
1752 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1753 tcg_temp_free(tmp);
1754 tcg_temp_free(tmp2);
1755 break;
1756 case 0x6D:
1757 /* CMPLE */
1758 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1759 break;
1760 default:
1761 goto invalid_opc;
1763 break;
1765 case 0x11:
1766 if (fn7 == 0x20) {
1767 if (rc == 31) {
1768 /* Special case BIS as NOP. */
1769 break;
1771 if (ra == 31) {
1772 /* Special case BIS as MOV. */
1773 vc = dest_gpr(ctx, rc);
1774 if (islit) {
1775 tcg_gen_movi_i64(vc, lit);
1776 } else {
1777 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1779 break;
1783 vc = dest_gpr(ctx, rc);
1784 vb = load_gpr_lit(ctx, rb, lit, islit);
1786 if (fn7 == 0x28 && ra == 31) {
1787 /* Special case ORNOT as NOT. */
1788 tcg_gen_not_i64(vc, vb);
1789 break;
1792 va = load_gpr(ctx, ra);
1793 switch (fn7) {
1794 case 0x00:
1795 /* AND */
1796 tcg_gen_and_i64(vc, va, vb);
1797 break;
1798 case 0x08:
1799 /* BIC */
1800 tcg_gen_andc_i64(vc, va, vb);
1801 break;
1802 case 0x14:
1803 /* CMOVLBS */
1804 tmp = tcg_temp_new();
1805 tcg_gen_andi_i64(tmp, va, 1);
1806 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1807 vb, load_gpr(ctx, rc));
1808 tcg_temp_free(tmp);
1809 break;
1810 case 0x16:
1811 /* CMOVLBC */
1812 tmp = tcg_temp_new();
1813 tcg_gen_andi_i64(tmp, va, 1);
1814 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1815 vb, load_gpr(ctx, rc));
1816 tcg_temp_free(tmp);
1817 break;
1818 case 0x20:
1819 /* BIS */
1820 tcg_gen_or_i64(vc, va, vb);
1821 break;
1822 case 0x24:
1823 /* CMOVEQ */
1824 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1825 vb, load_gpr(ctx, rc));
1826 break;
1827 case 0x26:
1828 /* CMOVNE */
1829 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1830 vb, load_gpr(ctx, rc));
1831 break;
1832 case 0x28:
1833 /* ORNOT */
1834 tcg_gen_orc_i64(vc, va, vb);
1835 break;
1836 case 0x40:
1837 /* XOR */
1838 tcg_gen_xor_i64(vc, va, vb);
1839 break;
1840 case 0x44:
1841 /* CMOVLT */
1842 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1843 vb, load_gpr(ctx, rc));
1844 break;
1845 case 0x46:
1846 /* CMOVGE */
1847 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1848 vb, load_gpr(ctx, rc));
1849 break;
1850 case 0x48:
1851 /* EQV */
1852 tcg_gen_eqv_i64(vc, va, vb);
1853 break;
1854 case 0x61:
1855 /* AMASK */
1856 REQUIRE_REG_31(ra);
1857 tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1858 break;
1859 case 0x64:
1860 /* CMOVLE */
1861 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1862 vb, load_gpr(ctx, rc));
1863 break;
1864 case 0x66:
1865 /* CMOVGT */
1866 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1867 vb, load_gpr(ctx, rc));
1868 break;
1869 case 0x6C:
1870 /* IMPLVER */
1871 REQUIRE_REG_31(ra);
1872 tcg_gen_movi_i64(vc, ctx->implver);
1873 break;
1874 default:
1875 goto invalid_opc;
1877 break;
1879 case 0x12:
1880 vc = dest_gpr(ctx, rc);
1881 va = load_gpr(ctx, ra);
1882 switch (fn7) {
1883 case 0x02:
1884 /* MSKBL */
1885 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1886 break;
1887 case 0x06:
1888 /* EXTBL */
1889 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1890 break;
1891 case 0x0B:
1892 /* INSBL */
1893 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1894 break;
1895 case 0x12:
1896 /* MSKWL */
1897 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1898 break;
1899 case 0x16:
1900 /* EXTWL */
1901 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1902 break;
1903 case 0x1B:
1904 /* INSWL */
1905 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1906 break;
1907 case 0x22:
1908 /* MSKLL */
1909 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1910 break;
1911 case 0x26:
1912 /* EXTLL */
1913 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1914 break;
1915 case 0x2B:
1916 /* INSLL */
1917 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1918 break;
1919 case 0x30:
1920 /* ZAP */
1921 if (islit) {
1922 gen_zapnoti(vc, va, ~lit);
1923 } else {
1924 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1926 break;
1927 case 0x31:
1928 /* ZAPNOT */
1929 if (islit) {
1930 gen_zapnoti(vc, va, lit);
1931 } else {
1932 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1934 break;
1935 case 0x32:
1936 /* MSKQL */
1937 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1938 break;
1939 case 0x34:
1940 /* SRL */
1941 if (islit) {
1942 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1943 } else {
1944 tmp = tcg_temp_new();
1945 vb = load_gpr(ctx, rb);
1946 tcg_gen_andi_i64(tmp, vb, 0x3f);
1947 tcg_gen_shr_i64(vc, va, tmp);
1948 tcg_temp_free(tmp);
1950 break;
1951 case 0x36:
1952 /* EXTQL */
1953 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1954 break;
1955 case 0x39:
1956 /* SLL */
1957 if (islit) {
1958 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1959 } else {
1960 tmp = tcg_temp_new();
1961 vb = load_gpr(ctx, rb);
1962 tcg_gen_andi_i64(tmp, vb, 0x3f);
1963 tcg_gen_shl_i64(vc, va, tmp);
1964 tcg_temp_free(tmp);
1966 break;
1967 case 0x3B:
1968 /* INSQL */
1969 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1970 break;
1971 case 0x3C:
1972 /* SRA */
1973 if (islit) {
1974 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1975 } else {
1976 tmp = tcg_temp_new();
1977 vb = load_gpr(ctx, rb);
1978 tcg_gen_andi_i64(tmp, vb, 0x3f);
1979 tcg_gen_sar_i64(vc, va, tmp);
1980 tcg_temp_free(tmp);
1982 break;
1983 case 0x52:
1984 /* MSKWH */
1985 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1986 break;
1987 case 0x57:
1988 /* INSWH */
1989 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1990 break;
1991 case 0x5A:
1992 /* EXTWH */
1993 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1994 break;
1995 case 0x62:
1996 /* MSKLH */
1997 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1998 break;
1999 case 0x67:
2000 /* INSLH */
2001 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
2002 break;
2003 case 0x6A:
2004 /* EXTLH */
2005 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
2006 break;
2007 case 0x72:
2008 /* MSKQH */
2009 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
2010 break;
2011 case 0x77:
2012 /* INSQH */
2013 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
2014 break;
2015 case 0x7A:
2016 /* EXTQH */
2017 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
2018 break;
2019 default:
2020 goto invalid_opc;
2022 break;
2024 case 0x13:
2025 vc = dest_gpr(ctx, rc);
2026 vb = load_gpr_lit(ctx, rb, lit, islit);
2027 va = load_gpr(ctx, ra);
2028 switch (fn7) {
2029 case 0x00:
2030 /* MULL */
2031 tcg_gen_mul_i64(vc, va, vb);
2032 tcg_gen_ext32s_i64(vc, vc);
2033 break;
2034 case 0x20:
2035 /* MULQ */
2036 tcg_gen_mul_i64(vc, va, vb);
2037 break;
2038 case 0x30:
2039 /* UMULH */
2040 tmp = tcg_temp_new();
2041 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2042 tcg_temp_free(tmp);
2043 break;
2044 case 0x40:
2045 /* MULL/V */
2046 tmp = tcg_temp_new();
2047 tcg_gen_ext32s_i64(tmp, va);
2048 tcg_gen_ext32s_i64(vc, vb);
2049 tcg_gen_mul_i64(tmp, tmp, vc);
2050 tcg_gen_ext32s_i64(vc, tmp);
2051 gen_helper_check_overflow(cpu_env, vc, tmp);
2052 tcg_temp_free(tmp);
2053 break;
2054 case 0x60:
2055 /* MULQ/V */
2056 tmp = tcg_temp_new();
2057 tmp2 = tcg_temp_new();
2058 tcg_gen_muls2_i64(vc, tmp, va, vb);
2059 tcg_gen_sari_i64(tmp2, vc, 63);
2060 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2061 tcg_temp_free(tmp);
2062 tcg_temp_free(tmp2);
2063 break;
2064 default:
2065 goto invalid_opc;
2067 break;
2069 case 0x14:
2070 REQUIRE_AMASK(FIX);
2071 vc = dest_fpr(ctx, rc);
2072 switch (fpfn) { /* fn11 & 0x3F */
2073 case 0x04:
2074 /* ITOFS */
2075 REQUIRE_REG_31(rb);
2076 REQUIRE_FEN;
2077 t32 = tcg_temp_new_i32();
2078 va = load_gpr(ctx, ra);
2079 tcg_gen_extrl_i64_i32(t32, va);
2080 gen_helper_memory_to_s(vc, t32);
2081 tcg_temp_free_i32(t32);
2082 break;
2083 case 0x0A:
2084 /* SQRTF */
2085 REQUIRE_REG_31(ra);
2086 REQUIRE_FEN;
2087 vb = load_fpr(ctx, rb);
2088 gen_helper_sqrtf(vc, cpu_env, vb);
2089 break;
2090 case 0x0B:
2091 /* SQRTS */
2092 REQUIRE_REG_31(ra);
2093 REQUIRE_FEN;
2094 gen_sqrts(ctx, rb, rc, fn11);
2095 break;
2096 case 0x14:
2097 /* ITOFF */
2098 REQUIRE_REG_31(rb);
2099 REQUIRE_FEN;
2100 t32 = tcg_temp_new_i32();
2101 va = load_gpr(ctx, ra);
2102 tcg_gen_extrl_i64_i32(t32, va);
2103 gen_helper_memory_to_f(vc, t32);
2104 tcg_temp_free_i32(t32);
2105 break;
2106 case 0x24:
2107 /* ITOFT */
2108 REQUIRE_REG_31(rb);
2109 REQUIRE_FEN;
2110 va = load_gpr(ctx, ra);
2111 tcg_gen_mov_i64(vc, va);
2112 break;
2113 case 0x2A:
2114 /* SQRTG */
2115 REQUIRE_REG_31(ra);
2116 REQUIRE_FEN;
2117 vb = load_fpr(ctx, rb);
2118 gen_helper_sqrtg(vc, cpu_env, vb);
2119 break;
2120 case 0x02B:
2121 /* SQRTT */
2122 REQUIRE_REG_31(ra);
2123 REQUIRE_FEN;
2124 gen_sqrtt(ctx, rb, rc, fn11);
2125 break;
2126 default:
2127 goto invalid_opc;
2129 break;
2131 case 0x15:
2132 /* VAX floating point */
2133 /* XXX: rounding mode and trap are ignored (!) */
2134 vc = dest_fpr(ctx, rc);
2135 vb = load_fpr(ctx, rb);
2136 va = load_fpr(ctx, ra);
2137 switch (fpfn) { /* fn11 & 0x3F */
2138 case 0x00:
2139 /* ADDF */
2140 REQUIRE_FEN;
2141 gen_helper_addf(vc, cpu_env, va, vb);
2142 break;
2143 case 0x01:
2144 /* SUBF */
2145 REQUIRE_FEN;
2146 gen_helper_subf(vc, cpu_env, va, vb);
2147 break;
2148 case 0x02:
2149 /* MULF */
2150 REQUIRE_FEN;
2151 gen_helper_mulf(vc, cpu_env, va, vb);
2152 break;
2153 case 0x03:
2154 /* DIVF */
2155 REQUIRE_FEN;
2156 gen_helper_divf(vc, cpu_env, va, vb);
2157 break;
2158 case 0x1E:
2159 /* CVTDG -- TODO */
2160 REQUIRE_REG_31(ra);
2161 goto invalid_opc;
2162 case 0x20:
2163 /* ADDG */
2164 REQUIRE_FEN;
2165 gen_helper_addg(vc, cpu_env, va, vb);
2166 break;
2167 case 0x21:
2168 /* SUBG */
2169 REQUIRE_FEN;
2170 gen_helper_subg(vc, cpu_env, va, vb);
2171 break;
2172 case 0x22:
2173 /* MULG */
2174 REQUIRE_FEN;
2175 gen_helper_mulg(vc, cpu_env, va, vb);
2176 break;
2177 case 0x23:
2178 /* DIVG */
2179 REQUIRE_FEN;
2180 gen_helper_divg(vc, cpu_env, va, vb);
2181 break;
2182 case 0x25:
2183 /* CMPGEQ */
2184 REQUIRE_FEN;
2185 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2186 break;
2187 case 0x26:
2188 /* CMPGLT */
2189 REQUIRE_FEN;
2190 gen_helper_cmpglt(vc, cpu_env, va, vb);
2191 break;
2192 case 0x27:
2193 /* CMPGLE */
2194 REQUIRE_FEN;
2195 gen_helper_cmpgle(vc, cpu_env, va, vb);
2196 break;
2197 case 0x2C:
2198 /* CVTGF */
2199 REQUIRE_REG_31(ra);
2200 REQUIRE_FEN;
2201 gen_helper_cvtgf(vc, cpu_env, vb);
2202 break;
2203 case 0x2D:
2204 /* CVTGD -- TODO */
2205 REQUIRE_REG_31(ra);
2206 goto invalid_opc;
2207 case 0x2F:
2208 /* CVTGQ */
2209 REQUIRE_REG_31(ra);
2210 REQUIRE_FEN;
2211 gen_helper_cvtgq(vc, cpu_env, vb);
2212 break;
2213 case 0x3C:
2214 /* CVTQF */
2215 REQUIRE_REG_31(ra);
2216 REQUIRE_FEN;
2217 gen_helper_cvtqf(vc, cpu_env, vb);
2218 break;
2219 case 0x3E:
2220 /* CVTQG */
2221 REQUIRE_REG_31(ra);
2222 REQUIRE_FEN;
2223 gen_helper_cvtqg(vc, cpu_env, vb);
2224 break;
2225 default:
2226 goto invalid_opc;
2228 break;
2230 case 0x16:
2231 /* IEEE floating-point */
2232 switch (fpfn) { /* fn11 & 0x3F */
2233 case 0x00:
2234 /* ADDS */
2235 REQUIRE_FEN;
2236 gen_adds(ctx, ra, rb, rc, fn11);
2237 break;
2238 case 0x01:
2239 /* SUBS */
2240 REQUIRE_FEN;
2241 gen_subs(ctx, ra, rb, rc, fn11);
2242 break;
2243 case 0x02:
2244 /* MULS */
2245 REQUIRE_FEN;
2246 gen_muls(ctx, ra, rb, rc, fn11);
2247 break;
2248 case 0x03:
2249 /* DIVS */
2250 REQUIRE_FEN;
2251 gen_divs(ctx, ra, rb, rc, fn11);
2252 break;
2253 case 0x20:
2254 /* ADDT */
2255 REQUIRE_FEN;
2256 gen_addt(ctx, ra, rb, rc, fn11);
2257 break;
2258 case 0x21:
2259 /* SUBT */
2260 REQUIRE_FEN;
2261 gen_subt(ctx, ra, rb, rc, fn11);
2262 break;
2263 case 0x22:
2264 /* MULT */
2265 REQUIRE_FEN;
2266 gen_mult(ctx, ra, rb, rc, fn11);
2267 break;
2268 case 0x23:
2269 /* DIVT */
2270 REQUIRE_FEN;
2271 gen_divt(ctx, ra, rb, rc, fn11);
2272 break;
2273 case 0x24:
2274 /* CMPTUN */
2275 REQUIRE_FEN;
2276 gen_cmptun(ctx, ra, rb, rc, fn11);
2277 break;
2278 case 0x25:
2279 /* CMPTEQ */
2280 REQUIRE_FEN;
2281 gen_cmpteq(ctx, ra, rb, rc, fn11);
2282 break;
2283 case 0x26:
2284 /* CMPTLT */
2285 REQUIRE_FEN;
2286 gen_cmptlt(ctx, ra, rb, rc, fn11);
2287 break;
2288 case 0x27:
2289 /* CMPTLE */
2290 REQUIRE_FEN;
2291 gen_cmptle(ctx, ra, rb, rc, fn11);
2292 break;
2293 case 0x2C:
2294 REQUIRE_REG_31(ra);
2295 REQUIRE_FEN;
2296 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2297 /* CVTST */
2298 gen_cvtst(ctx, rb, rc, fn11);
2299 } else {
2300 /* CVTTS */
2301 gen_cvtts(ctx, rb, rc, fn11);
2303 break;
2304 case 0x2F:
2305 /* CVTTQ */
2306 REQUIRE_REG_31(ra);
2307 REQUIRE_FEN;
2308 gen_cvttq(ctx, rb, rc, fn11);
2309 break;
2310 case 0x3C:
2311 /* CVTQS */
2312 REQUIRE_REG_31(ra);
2313 REQUIRE_FEN;
2314 gen_cvtqs(ctx, rb, rc, fn11);
2315 break;
2316 case 0x3E:
2317 /* CVTQT */
2318 REQUIRE_REG_31(ra);
2319 REQUIRE_FEN;
2320 gen_cvtqt(ctx, rb, rc, fn11);
2321 break;
2322 default:
2323 goto invalid_opc;
2325 break;
2327 case 0x17:
2328 switch (fn11) {
2329 case 0x010:
2330 /* CVTLQ */
2331 REQUIRE_REG_31(ra);
2332 REQUIRE_FEN;
2333 vc = dest_fpr(ctx, rc);
2334 vb = load_fpr(ctx, rb);
2335 gen_cvtlq(vc, vb);
2336 break;
2337 case 0x020:
2338 /* CPYS */
2339 REQUIRE_FEN;
2340 if (rc == 31) {
2341 /* Special case CPYS as FNOP. */
2342 } else {
2343 vc = dest_fpr(ctx, rc);
2344 va = load_fpr(ctx, ra);
2345 if (ra == rb) {
2346 /* Special case CPYS as FMOV. */
2347 tcg_gen_mov_i64(vc, va);
2348 } else {
2349 vb = load_fpr(ctx, rb);
2350 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2353 break;
2354 case 0x021:
2355 /* CPYSN */
2356 REQUIRE_FEN;
2357 vc = dest_fpr(ctx, rc);
2358 vb = load_fpr(ctx, rb);
2359 va = load_fpr(ctx, ra);
2360 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2361 break;
2362 case 0x022:
2363 /* CPYSE */
2364 REQUIRE_FEN;
2365 vc = dest_fpr(ctx, rc);
2366 vb = load_fpr(ctx, rb);
2367 va = load_fpr(ctx, ra);
2368 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2369 break;
2370 case 0x024:
2371 /* MT_FPCR */
2372 REQUIRE_FEN;
2373 va = load_fpr(ctx, ra);
2374 gen_helper_store_fpcr(cpu_env, va);
2375 if (ctx->tb_rm == QUAL_RM_D) {
2376 /* Re-do the copy of the rounding mode to fp_status
2377 the next time we use dynamic rounding. */
2378 ctx->tb_rm = -1;
2380 break;
2381 case 0x025:
2382 /* MF_FPCR */
2383 REQUIRE_FEN;
2384 va = dest_fpr(ctx, ra);
2385 gen_helper_load_fpcr(va, cpu_env);
2386 break;
2387 case 0x02A:
2388 /* FCMOVEQ */
2389 REQUIRE_FEN;
2390 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2391 break;
2392 case 0x02B:
2393 /* FCMOVNE */
2394 REQUIRE_FEN;
2395 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2396 break;
2397 case 0x02C:
2398 /* FCMOVLT */
2399 REQUIRE_FEN;
2400 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2401 break;
2402 case 0x02D:
2403 /* FCMOVGE */
2404 REQUIRE_FEN;
2405 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2406 break;
2407 case 0x02E:
2408 /* FCMOVLE */
2409 REQUIRE_FEN;
2410 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2411 break;
2412 case 0x02F:
2413 /* FCMOVGT */
2414 REQUIRE_FEN;
2415 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2416 break;
2417 case 0x030: /* CVTQL */
2418 case 0x130: /* CVTQL/V */
2419 case 0x530: /* CVTQL/SV */
2420 REQUIRE_REG_31(ra);
2421 REQUIRE_FEN;
2422 vc = dest_fpr(ctx, rc);
2423 vb = load_fpr(ctx, rb);
2424 gen_helper_cvtql(vc, cpu_env, vb);
2425 gen_fp_exc_raise(rc, fn11);
2426 break;
2427 default:
2428 goto invalid_opc;
2430 break;
2432 case 0x18:
2433 switch ((uint16_t)disp16) {
2434 case 0x0000:
2435 /* TRAPB */
2436 /* No-op. */
2437 break;
2438 case 0x0400:
2439 /* EXCB */
2440 /* No-op. */
2441 break;
2442 case 0x4000:
2443 /* MB */
2444 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2445 break;
2446 case 0x4400:
2447 /* WMB */
2448 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2449 break;
2450 case 0x8000:
2451 /* FETCH */
2452 /* No-op */
2453 break;
2454 case 0xA000:
2455 /* FETCH_M */
2456 /* No-op */
2457 break;
2458 case 0xC000:
2459 /* RPCC */
2460 va = dest_gpr(ctx, ra);
2461 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2462 gen_io_start();
2463 gen_helper_load_pcc(va, cpu_env);
2464 ret = DISAS_PC_STALE;
2465 } else {
2466 gen_helper_load_pcc(va, cpu_env);
2468 break;
2469 case 0xE000:
2470 /* RC */
2471 gen_rx(ctx, ra, 0);
2472 break;
2473 case 0xE800:
2474 /* ECB */
2475 break;
2476 case 0xF000:
2477 /* RS */
2478 gen_rx(ctx, ra, 1);
2479 break;
2480 case 0xF800:
2481 /* WH64 */
2482 /* No-op */
2483 break;
2484 case 0xFC00:
2485 /* WH64EN */
2486 /* No-op */
2487 break;
2488 default:
2489 goto invalid_opc;
2491 break;
2493 case 0x19:
2494 /* HW_MFPR (PALcode) */
2495 #ifndef CONFIG_USER_ONLY
2496 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2497 va = dest_gpr(ctx, ra);
2498 ret = gen_mfpr(ctx, va, insn & 0xffff);
2499 break;
2500 #else
2501 goto invalid_opc;
2502 #endif
2504 case 0x1A:
2505 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2506 prediction stack action, which of course we don't implement. */
2507 vb = load_gpr(ctx, rb);
2508 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2509 if (ra != 31) {
2510 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2512 ret = DISAS_PC_UPDATED;
2513 break;
2515 case 0x1B:
2516 /* HW_LD (PALcode) */
2517 #ifndef CONFIG_USER_ONLY
2518 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2520 TCGv addr = tcg_temp_new();
2521 vb = load_gpr(ctx, rb);
2522 va = dest_gpr(ctx, ra);
2524 tcg_gen_addi_i64(addr, vb, disp12);
2525 switch ((insn >> 12) & 0xF) {
2526 case 0x0:
2527 /* Longword physical access (hw_ldl/p) */
2528 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2529 break;
2530 case 0x1:
2531 /* Quadword physical access (hw_ldq/p) */
2532 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2533 break;
2534 case 0x2:
2535 /* Longword physical access with lock (hw_ldl_l/p) */
2536 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2537 break;
2538 case 0x3:
2539 /* Quadword physical access with lock (hw_ldq_l/p) */
2540 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2541 break;
2542 case 0x4:
2543 /* Longword virtual PTE fetch (hw_ldl/v) */
2544 goto invalid_opc;
2545 case 0x5:
2546 /* Quadword virtual PTE fetch (hw_ldq/v) */
2547 goto invalid_opc;
2548 break;
2549 case 0x6:
2550 /* Invalid */
2551 goto invalid_opc;
2552 case 0x7:
2553 /* Invaliid */
2554 goto invalid_opc;
2555 case 0x8:
2556 /* Longword virtual access (hw_ldl) */
2557 goto invalid_opc;
2558 case 0x9:
2559 /* Quadword virtual access (hw_ldq) */
2560 goto invalid_opc;
2561 case 0xA:
2562 /* Longword virtual access with protection check (hw_ldl/w) */
2563 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2564 break;
2565 case 0xB:
2566 /* Quadword virtual access with protection check (hw_ldq/w) */
2567 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2568 break;
2569 case 0xC:
2570 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2571 goto invalid_opc;
2572 case 0xD:
2573 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2574 goto invalid_opc;
2575 case 0xE:
2576 /* Longword virtual access with alternate access mode and
2577 protection checks (hw_ldl/wa) */
2578 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2579 break;
2580 case 0xF:
2581 /* Quadword virtual access with alternate access mode and
2582 protection checks (hw_ldq/wa) */
2583 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2584 break;
2586 tcg_temp_free(addr);
2587 break;
2589 #else
2590 goto invalid_opc;
2591 #endif
2593 case 0x1C:
2594 vc = dest_gpr(ctx, rc);
2595 if (fn7 == 0x70) {
2596 /* FTOIT */
2597 REQUIRE_AMASK(FIX);
2598 REQUIRE_REG_31(rb);
2599 va = load_fpr(ctx, ra);
2600 tcg_gen_mov_i64(vc, va);
2601 break;
2602 } else if (fn7 == 0x78) {
2603 /* FTOIS */
2604 REQUIRE_AMASK(FIX);
2605 REQUIRE_REG_31(rb);
2606 t32 = tcg_temp_new_i32();
2607 va = load_fpr(ctx, ra);
2608 gen_helper_s_to_memory(t32, va);
2609 tcg_gen_ext_i32_i64(vc, t32);
2610 tcg_temp_free_i32(t32);
2611 break;
2614 vb = load_gpr_lit(ctx, rb, lit, islit);
2615 switch (fn7) {
2616 case 0x00:
2617 /* SEXTB */
2618 REQUIRE_AMASK(BWX);
2619 REQUIRE_REG_31(ra);
2620 tcg_gen_ext8s_i64(vc, vb);
2621 break;
2622 case 0x01:
2623 /* SEXTW */
2624 REQUIRE_AMASK(BWX);
2625 REQUIRE_REG_31(ra);
2626 tcg_gen_ext16s_i64(vc, vb);
2627 break;
2628 case 0x30:
2629 /* CTPOP */
2630 REQUIRE_AMASK(CIX);
2631 REQUIRE_REG_31(ra);
2632 REQUIRE_NO_LIT;
2633 tcg_gen_ctpop_i64(vc, vb);
2634 break;
2635 case 0x31:
2636 /* PERR */
2637 REQUIRE_AMASK(MVI);
2638 REQUIRE_NO_LIT;
2639 va = load_gpr(ctx, ra);
2640 gen_helper_perr(vc, va, vb);
2641 break;
2642 case 0x32:
2643 /* CTLZ */
2644 REQUIRE_AMASK(CIX);
2645 REQUIRE_REG_31(ra);
2646 REQUIRE_NO_LIT;
2647 tcg_gen_clzi_i64(vc, vb, 64);
2648 break;
2649 case 0x33:
2650 /* CTTZ */
2651 REQUIRE_AMASK(CIX);
2652 REQUIRE_REG_31(ra);
2653 REQUIRE_NO_LIT;
2654 tcg_gen_ctzi_i64(vc, vb, 64);
2655 break;
2656 case 0x34:
2657 /* UNPKBW */
2658 REQUIRE_AMASK(MVI);
2659 REQUIRE_REG_31(ra);
2660 REQUIRE_NO_LIT;
2661 gen_helper_unpkbw(vc, vb);
2662 break;
2663 case 0x35:
2664 /* UNPKBL */
2665 REQUIRE_AMASK(MVI);
2666 REQUIRE_REG_31(ra);
2667 REQUIRE_NO_LIT;
2668 gen_helper_unpkbl(vc, vb);
2669 break;
2670 case 0x36:
2671 /* PKWB */
2672 REQUIRE_AMASK(MVI);
2673 REQUIRE_REG_31(ra);
2674 REQUIRE_NO_LIT;
2675 gen_helper_pkwb(vc, vb);
2676 break;
2677 case 0x37:
2678 /* PKLB */
2679 REQUIRE_AMASK(MVI);
2680 REQUIRE_REG_31(ra);
2681 REQUIRE_NO_LIT;
2682 gen_helper_pklb(vc, vb);
2683 break;
2684 case 0x38:
2685 /* MINSB8 */
2686 REQUIRE_AMASK(MVI);
2687 va = load_gpr(ctx, ra);
2688 gen_helper_minsb8(vc, va, vb);
2689 break;
2690 case 0x39:
2691 /* MINSW4 */
2692 REQUIRE_AMASK(MVI);
2693 va = load_gpr(ctx, ra);
2694 gen_helper_minsw4(vc, va, vb);
2695 break;
2696 case 0x3A:
2697 /* MINUB8 */
2698 REQUIRE_AMASK(MVI);
2699 va = load_gpr(ctx, ra);
2700 gen_helper_minub8(vc, va, vb);
2701 break;
2702 case 0x3B:
2703 /* MINUW4 */
2704 REQUIRE_AMASK(MVI);
2705 va = load_gpr(ctx, ra);
2706 gen_helper_minuw4(vc, va, vb);
2707 break;
2708 case 0x3C:
2709 /* MAXUB8 */
2710 REQUIRE_AMASK(MVI);
2711 va = load_gpr(ctx, ra);
2712 gen_helper_maxub8(vc, va, vb);
2713 break;
2714 case 0x3D:
2715 /* MAXUW4 */
2716 REQUIRE_AMASK(MVI);
2717 va = load_gpr(ctx, ra);
2718 gen_helper_maxuw4(vc, va, vb);
2719 break;
2720 case 0x3E:
2721 /* MAXSB8 */
2722 REQUIRE_AMASK(MVI);
2723 va = load_gpr(ctx, ra);
2724 gen_helper_maxsb8(vc, va, vb);
2725 break;
2726 case 0x3F:
2727 /* MAXSW4 */
2728 REQUIRE_AMASK(MVI);
2729 va = load_gpr(ctx, ra);
2730 gen_helper_maxsw4(vc, va, vb);
2731 break;
2732 default:
2733 goto invalid_opc;
2735 break;
2737 case 0x1D:
2738 /* HW_MTPR (PALcode) */
2739 #ifndef CONFIG_USER_ONLY
2740 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2741 vb = load_gpr(ctx, rb);
2742 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2743 break;
2744 #else
2745 goto invalid_opc;
2746 #endif
2748 case 0x1E:
2749 /* HW_RET (PALcode) */
2750 #ifndef CONFIG_USER_ONLY
2751 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2752 if (rb == 31) {
2753 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2754 address from EXC_ADDR. This turns out to be useful for our
2755 emulation PALcode, so continue to accept it. */
2756 ctx->lit = vb = tcg_temp_new();
2757 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2758 } else {
2759 vb = load_gpr(ctx, rb);
2761 tcg_gen_movi_i64(cpu_lock_addr, -1);
2762 tmp = tcg_temp_new();
2763 tcg_gen_movi_i64(tmp, 0);
2764 st_flag_byte(tmp, ENV_FLAG_RX_SHIFT);
2765 tcg_gen_andi_i64(tmp, vb, 1);
2766 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2767 tcg_temp_free(tmp);
2768 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2769 /* Allow interrupts to be recognized right away. */
2770 ret = DISAS_PC_UPDATED_NOCHAIN;
2771 break;
2772 #else
2773 goto invalid_opc;
2774 #endif
2776 case 0x1F:
2777 /* HW_ST (PALcode) */
2778 #ifndef CONFIG_USER_ONLY
2779 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2781 switch ((insn >> 12) & 0xF) {
2782 case 0x0:
2783 /* Longword physical access */
2784 va = load_gpr(ctx, ra);
2785 vb = load_gpr(ctx, rb);
2786 tmp = tcg_temp_new();
2787 tcg_gen_addi_i64(tmp, vb, disp12);
2788 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2789 tcg_temp_free(tmp);
2790 break;
2791 case 0x1:
2792 /* Quadword physical access */
2793 va = load_gpr(ctx, ra);
2794 vb = load_gpr(ctx, rb);
2795 tmp = tcg_temp_new();
2796 tcg_gen_addi_i64(tmp, vb, disp12);
2797 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2798 tcg_temp_free(tmp);
2799 break;
2800 case 0x2:
2801 /* Longword physical access with lock */
2802 ret = gen_store_conditional(ctx, ra, rb, disp12,
2803 MMU_PHYS_IDX, MO_LESL);
2804 break;
2805 case 0x3:
2806 /* Quadword physical access with lock */
2807 ret = gen_store_conditional(ctx, ra, rb, disp12,
2808 MMU_PHYS_IDX, MO_LEQ);
2809 break;
2810 case 0x4:
2811 /* Longword virtual access */
2812 goto invalid_opc;
2813 case 0x5:
2814 /* Quadword virtual access */
2815 goto invalid_opc;
2816 case 0x6:
2817 /* Invalid */
2818 goto invalid_opc;
2819 case 0x7:
2820 /* Invalid */
2821 goto invalid_opc;
2822 case 0x8:
2823 /* Invalid */
2824 goto invalid_opc;
2825 case 0x9:
2826 /* Invalid */
2827 goto invalid_opc;
2828 case 0xA:
2829 /* Invalid */
2830 goto invalid_opc;
2831 case 0xB:
2832 /* Invalid */
2833 goto invalid_opc;
2834 case 0xC:
2835 /* Longword virtual access with alternate access mode */
2836 goto invalid_opc;
2837 case 0xD:
2838 /* Quadword virtual access with alternate access mode */
2839 goto invalid_opc;
2840 case 0xE:
2841 /* Invalid */
2842 goto invalid_opc;
2843 case 0xF:
2844 /* Invalid */
2845 goto invalid_opc;
2847 break;
2849 #else
2850 goto invalid_opc;
2851 #endif
2852 case 0x20:
2853 /* LDF */
2854 REQUIRE_FEN;
2855 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2856 break;
2857 case 0x21:
2858 /* LDG */
2859 REQUIRE_FEN;
2860 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2861 break;
2862 case 0x22:
2863 /* LDS */
2864 REQUIRE_FEN;
2865 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2866 break;
2867 case 0x23:
2868 /* LDT */
2869 REQUIRE_FEN;
2870 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2871 break;
2872 case 0x24:
2873 /* STF */
2874 REQUIRE_FEN;
2875 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2876 break;
2877 case 0x25:
2878 /* STG */
2879 REQUIRE_FEN;
2880 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2881 break;
2882 case 0x26:
2883 /* STS */
2884 REQUIRE_FEN;
2885 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2886 break;
2887 case 0x27:
2888 /* STT */
2889 REQUIRE_FEN;
2890 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2891 break;
2892 case 0x28:
2893 /* LDL */
2894 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2895 break;
2896 case 0x29:
2897 /* LDQ */
2898 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2899 break;
2900 case 0x2A:
2901 /* LDL_L */
2902 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2903 break;
2904 case 0x2B:
2905 /* LDQ_L */
2906 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2907 break;
2908 case 0x2C:
2909 /* STL */
2910 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2911 break;
2912 case 0x2D:
2913 /* STQ */
2914 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2915 break;
2916 case 0x2E:
2917 /* STL_C */
2918 ret = gen_store_conditional(ctx, ra, rb, disp16,
2919 ctx->mem_idx, MO_LESL);
2920 break;
2921 case 0x2F:
2922 /* STQ_C */
2923 ret = gen_store_conditional(ctx, ra, rb, disp16,
2924 ctx->mem_idx, MO_LEQ);
2925 break;
2926 case 0x30:
2927 /* BR */
2928 ret = gen_bdirect(ctx, ra, disp21);
2929 break;
2930 case 0x31: /* FBEQ */
2931 REQUIRE_FEN;
2932 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2933 break;
2934 case 0x32: /* FBLT */
2935 REQUIRE_FEN;
2936 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2937 break;
2938 case 0x33: /* FBLE */
2939 REQUIRE_FEN;
2940 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2941 break;
2942 case 0x34:
2943 /* BSR */
2944 ret = gen_bdirect(ctx, ra, disp21);
2945 break;
2946 case 0x35: /* FBNE */
2947 REQUIRE_FEN;
2948 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2949 break;
2950 case 0x36: /* FBGE */
2951 REQUIRE_FEN;
2952 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2953 break;
2954 case 0x37: /* FBGT */
2955 REQUIRE_FEN;
2956 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2957 break;
2958 case 0x38:
2959 /* BLBC */
2960 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2961 break;
2962 case 0x39:
2963 /* BEQ */
2964 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2965 break;
2966 case 0x3A:
2967 /* BLT */
2968 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2969 break;
2970 case 0x3B:
2971 /* BLE */
2972 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2973 break;
2974 case 0x3C:
2975 /* BLBS */
2976 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2977 break;
2978 case 0x3D:
2979 /* BNE */
2980 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2981 break;
2982 case 0x3E:
2983 /* BGE */
2984 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2985 break;
2986 case 0x3F:
2987 /* BGT */
2988 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2989 break;
2990 invalid_opc:
2991 ret = gen_invalid(ctx);
2992 break;
2993 raise_fen:
2994 ret = gen_excp(ctx, EXCP_FEN, 0);
2995 break;
2998 return ret;
3001 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3003 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3004 CPUAlphaState *env = cpu->env_ptr;
3005 int64_t bound, mask;
3007 ctx->tbflags = ctx->base.tb->flags;
3008 ctx->mem_idx = cpu_mmu_index(env, false);
3009 ctx->implver = env->implver;
3010 ctx->amask = env->amask;
3012 #ifdef CONFIG_USER_ONLY
3013 ctx->ir = cpu_std_ir;
3014 #else
3015 ctx->palbr = env->palbr;
3016 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
3017 #endif
3019 /* ??? Every TB begins with unset rounding mode, to be initialized on
3020 the first fp insn of the TB. Alternately we could define a proper
3021 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3022 to reset the FP_STATUS to that default at the end of any TB that
3023 changes the default. We could even (gasp) dynamiclly figure out
3024 what default would be most efficient given the running program. */
3025 ctx->tb_rm = -1;
3026 /* Similarly for flush-to-zero. */
3027 ctx->tb_ftz = -1;
3029 ctx->zero = NULL;
3030 ctx->sink = NULL;
3031 ctx->lit = NULL;
3033 /* Bound the number of insns to execute to those left on the page. */
3034 if (in_superpage(ctx, ctx->base.pc_first)) {
3035 mask = -1ULL << 41;
3036 } else {
3037 mask = TARGET_PAGE_MASK;
3039 bound = -(ctx->base.pc_first | mask) / 4;
3040 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
3043 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3047 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3049 tcg_gen_insn_start(dcbase->pc_next);
3052 static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
3053 const CPUBreakpoint *bp)
3055 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3057 ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0);
3059 /* The address covered by the breakpoint must be included in
3060 [tb->pc, tb->pc + tb->size) in order to for it to be
3061 properly cleared -- thus we increment the PC here so that
3062 the logic setting tb->size below does the right thing. */
3063 ctx->base.pc_next += 4;
3064 return true;
3067 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3069 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3070 CPUAlphaState *env = cpu->env_ptr;
3071 uint32_t insn = translator_ldl(env, ctx->base.pc_next);
3073 ctx->base.pc_next += 4;
3074 ctx->base.is_jmp = translate_one(ctx, insn);
3076 free_context_temps(ctx);
3077 translator_loop_temp_check(&ctx->base);
3080 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3082 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3084 switch (ctx->base.is_jmp) {
3085 case DISAS_NORETURN:
3086 break;
3087 case DISAS_TOO_MANY:
3088 if (use_goto_tb(ctx, ctx->base.pc_next)) {
3089 tcg_gen_goto_tb(0);
3090 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3091 tcg_gen_exit_tb(ctx->base.tb, 0);
3093 /* FALLTHRU */
3094 case DISAS_PC_STALE:
3095 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3096 /* FALLTHRU */
3097 case DISAS_PC_UPDATED:
3098 if (!use_exit_tb(ctx)) {
3099 tcg_gen_lookup_and_goto_ptr();
3100 break;
3102 /* FALLTHRU */
3103 case DISAS_PC_UPDATED_NOCHAIN:
3104 if (ctx->base.singlestep_enabled) {
3105 gen_excp_1(EXCP_DEBUG, 0);
3106 } else {
3107 tcg_gen_exit_tb(NULL, 0);
3109 break;
3110 default:
3111 g_assert_not_reached();
3115 static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
3117 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
3118 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
3121 static const TranslatorOps alpha_tr_ops = {
3122 .init_disas_context = alpha_tr_init_disas_context,
3123 .tb_start = alpha_tr_tb_start,
3124 .insn_start = alpha_tr_insn_start,
3125 .breakpoint_check = alpha_tr_breakpoint_check,
3126 .translate_insn = alpha_tr_translate_insn,
3127 .tb_stop = alpha_tr_tb_stop,
3128 .disas_log = alpha_tr_disas_log,
3131 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
3133 DisasContext dc;
3134 translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
3137 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3138 target_ulong *data)
3140 env->pc = data[0];