target/alpha: fix icount handling for timer instructions
[qemu/ar7.git] / target / alpha / translate.c
blobf454adea5e0e36472d8c3b78348ecd0b8f2b1b92
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "sysemu/cpu-timers.h"
24 #include "disas/disas.h"
25 #include "qemu/host-utils.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/translator.h"
33 #include "exec/log.h"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 # define LOG_DISAS(...) do { } while (0)
43 #endif
45 typedef struct DisasContext DisasContext;
46 struct DisasContext {
47 DisasContextBase base;
49 #ifndef CONFIG_USER_ONLY
50 uint64_t palbr;
51 #endif
52 uint32_t tbflags;
53 int mem_idx;
55 /* implver and amask values for this CPU. */
56 int implver;
57 int amask;
59 /* Current rounding mode for this TB. */
60 int tb_rm;
61 /* Current flush-to-zero setting for this TB. */
62 int tb_ftz;
64 /* The set of registers active in the current context. */
65 TCGv *ir;
67 /* Temporaries for $31 and $f31 as source and destination. */
68 TCGv zero;
69 TCGv sink;
70 /* Temporary for immediate constants. */
71 TCGv lit;
74 /* Target-specific return values from translate_one, indicating the
75 state of the TB. Note that DISAS_NEXT indicates that we are not
76 exiting the TB. */
77 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
78 #define DISAS_PC_UPDATED DISAS_TARGET_1
79 #define DISAS_PC_STALE DISAS_TARGET_2
81 /* global register indexes */
82 static TCGv cpu_std_ir[31];
83 static TCGv cpu_fir[31];
84 static TCGv cpu_pc;
85 static TCGv cpu_lock_addr;
86 static TCGv cpu_lock_value;
88 #ifndef CONFIG_USER_ONLY
89 static TCGv cpu_pal_ir[31];
90 #endif
92 #include "exec/gen-icount.h"
94 void alpha_translate_init(void)
96 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
98 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
99 static const GlobalVar vars[] = {
100 DEF_VAR(pc),
101 DEF_VAR(lock_addr),
102 DEF_VAR(lock_value),
105 #undef DEF_VAR
107 /* Use the symbolic register names that match the disassembler. */
108 static const char greg_names[31][4] = {
109 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
110 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
111 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
112 "t10", "t11", "ra", "t12", "at", "gp", "sp"
114 static const char freg_names[31][4] = {
115 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
116 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
117 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
118 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
120 #ifndef CONFIG_USER_ONLY
121 static const char shadow_names[8][8] = {
122 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
123 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
125 #endif
127 int i;
129 for (i = 0; i < 31; i++) {
130 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
131 offsetof(CPUAlphaState, ir[i]),
132 greg_names[i]);
135 for (i = 0; i < 31; i++) {
136 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
137 offsetof(CPUAlphaState, fir[i]),
138 freg_names[i]);
141 #ifndef CONFIG_USER_ONLY
142 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
143 for (i = 0; i < 8; i++) {
144 int r = (i == 7 ? 25 : i + 8);
145 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
146 offsetof(CPUAlphaState,
147 shadow[i]),
148 shadow_names[i]);
150 #endif
152 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
153 const GlobalVar *v = &vars[i];
154 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
158 static TCGv load_zero(DisasContext *ctx)
160 if (!ctx->zero) {
161 ctx->zero = tcg_const_i64(0);
163 return ctx->zero;
166 static TCGv dest_sink(DisasContext *ctx)
168 if (!ctx->sink) {
169 ctx->sink = tcg_temp_new();
171 return ctx->sink;
174 static void free_context_temps(DisasContext *ctx)
176 if (ctx->sink) {
177 tcg_gen_discard_i64(ctx->sink);
178 tcg_temp_free(ctx->sink);
179 ctx->sink = NULL;
181 if (ctx->zero) {
182 tcg_temp_free(ctx->zero);
183 ctx->zero = NULL;
185 if (ctx->lit) {
186 tcg_temp_free(ctx->lit);
187 ctx->lit = NULL;
191 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
193 if (likely(reg < 31)) {
194 return ctx->ir[reg];
195 } else {
196 return load_zero(ctx);
200 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
201 uint8_t lit, bool islit)
203 if (islit) {
204 ctx->lit = tcg_const_i64(lit);
205 return ctx->lit;
206 } else if (likely(reg < 31)) {
207 return ctx->ir[reg];
208 } else {
209 return load_zero(ctx);
213 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
215 if (likely(reg < 31)) {
216 return ctx->ir[reg];
217 } else {
218 return dest_sink(ctx);
222 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
224 if (likely(reg < 31)) {
225 return cpu_fir[reg];
226 } else {
227 return load_zero(ctx);
231 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
233 if (likely(reg < 31)) {
234 return cpu_fir[reg];
235 } else {
236 return dest_sink(ctx);
240 static int get_flag_ofs(unsigned shift)
242 int ofs = offsetof(CPUAlphaState, flags);
243 #ifdef HOST_WORDS_BIGENDIAN
244 ofs += 3 - (shift / 8);
245 #else
246 ofs += shift / 8;
247 #endif
248 return ofs;
251 static void ld_flag_byte(TCGv val, unsigned shift)
253 tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
256 static void st_flag_byte(TCGv val, unsigned shift)
258 tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
261 static void gen_excp_1(int exception, int error_code)
263 TCGv_i32 tmp1, tmp2;
265 tmp1 = tcg_const_i32(exception);
266 tmp2 = tcg_const_i32(error_code);
267 gen_helper_excp(cpu_env, tmp1, tmp2);
268 tcg_temp_free_i32(tmp2);
269 tcg_temp_free_i32(tmp1);
272 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
274 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
275 gen_excp_1(exception, error_code);
276 return DISAS_NORETURN;
279 static inline DisasJumpType gen_invalid(DisasContext *ctx)
281 return gen_excp(ctx, EXCP_OPCDEC, 0);
284 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
286 TCGv_i32 tmp32 = tcg_temp_new_i32();
287 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
288 gen_helper_memory_to_f(t0, tmp32);
289 tcg_temp_free_i32(tmp32);
292 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
294 TCGv tmp = tcg_temp_new();
295 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
296 gen_helper_memory_to_g(t0, tmp);
297 tcg_temp_free(tmp);
300 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
302 TCGv_i32 tmp32 = tcg_temp_new_i32();
303 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
304 gen_helper_memory_to_s(t0, tmp32);
305 tcg_temp_free_i32(tmp32);
308 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
310 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
311 tcg_gen_mov_i64(cpu_lock_addr, t1);
312 tcg_gen_mov_i64(cpu_lock_value, t0);
315 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
317 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
318 tcg_gen_mov_i64(cpu_lock_addr, t1);
319 tcg_gen_mov_i64(cpu_lock_value, t0);
322 static inline void gen_load_mem(DisasContext *ctx,
323 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
324 int flags),
325 int ra, int rb, int32_t disp16, bool fp,
326 bool clear)
328 TCGv tmp, addr, va;
330 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
331 prefetches, which we can treat as nops. No worries about
332 missed exceptions here. */
333 if (unlikely(ra == 31)) {
334 return;
337 tmp = tcg_temp_new();
338 addr = load_gpr(ctx, rb);
340 if (disp16) {
341 tcg_gen_addi_i64(tmp, addr, disp16);
342 addr = tmp;
344 if (clear) {
345 tcg_gen_andi_i64(tmp, addr, ~0x7);
346 addr = tmp;
349 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
350 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
352 tcg_temp_free(tmp);
355 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
357 TCGv_i32 tmp32 = tcg_temp_new_i32();
358 gen_helper_f_to_memory(tmp32, t0);
359 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
360 tcg_temp_free_i32(tmp32);
363 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
365 TCGv tmp = tcg_temp_new();
366 gen_helper_g_to_memory(tmp, t0);
367 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
368 tcg_temp_free(tmp);
371 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
373 TCGv_i32 tmp32 = tcg_temp_new_i32();
374 gen_helper_s_to_memory(tmp32, t0);
375 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
376 tcg_temp_free_i32(tmp32);
379 static inline void gen_store_mem(DisasContext *ctx,
380 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
381 int flags),
382 int ra, int rb, int32_t disp16, bool fp,
383 bool clear)
385 TCGv tmp, addr, va;
387 tmp = tcg_temp_new();
388 addr = load_gpr(ctx, rb);
390 if (disp16) {
391 tcg_gen_addi_i64(tmp, addr, disp16);
392 addr = tmp;
394 if (clear) {
395 tcg_gen_andi_i64(tmp, addr, ~0x7);
396 addr = tmp;
399 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
400 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
402 tcg_temp_free(tmp);
405 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
406 int32_t disp16, int mem_idx,
407 MemOp op)
409 TCGLabel *lab_fail, *lab_done;
410 TCGv addr, val;
412 addr = tcg_temp_new_i64();
413 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
414 free_context_temps(ctx);
416 lab_fail = gen_new_label();
417 lab_done = gen_new_label();
418 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
419 tcg_temp_free_i64(addr);
421 val = tcg_temp_new_i64();
422 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
423 load_gpr(ctx, ra), mem_idx, op);
424 free_context_temps(ctx);
426 if (ra != 31) {
427 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
429 tcg_temp_free_i64(val);
430 tcg_gen_br(lab_done);
432 gen_set_label(lab_fail);
433 if (ra != 31) {
434 tcg_gen_movi_i64(ctx->ir[ra], 0);
437 gen_set_label(lab_done);
438 tcg_gen_movi_i64(cpu_lock_addr, -1);
439 return DISAS_NEXT;
442 static bool in_superpage(DisasContext *ctx, int64_t addr)
444 #ifndef CONFIG_USER_ONLY
445 return ((ctx->tbflags & ENV_FLAG_PS_USER) == 0
446 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
447 && ((addr >> 41) & 3) == 2);
448 #else
449 return false;
450 #endif
453 static bool use_exit_tb(DisasContext *ctx)
455 return ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
456 || ctx->base.singlestep_enabled
457 || singlestep);
460 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
462 /* Suppress goto_tb in the case of single-steping and IO. */
463 if (unlikely(use_exit_tb(ctx))) {
464 return false;
466 #ifndef CONFIG_USER_ONLY
467 /* If the destination is in the superpage, the page perms can't change. */
468 if (in_superpage(ctx, dest)) {
469 return true;
471 /* Check for the dest on the same page as the start of the TB. */
472 return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
473 #else
474 return true;
475 #endif
478 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
480 uint64_t dest = ctx->base.pc_next + (disp << 2);
482 if (ra != 31) {
483 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
486 /* Notice branch-to-next; used to initialize RA with the PC. */
487 if (disp == 0) {
488 return 0;
489 } else if (use_goto_tb(ctx, dest)) {
490 tcg_gen_goto_tb(0);
491 tcg_gen_movi_i64(cpu_pc, dest);
492 tcg_gen_exit_tb(ctx->base.tb, 0);
493 return DISAS_NORETURN;
494 } else {
495 tcg_gen_movi_i64(cpu_pc, dest);
496 return DISAS_PC_UPDATED;
500 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
501 TCGv cmp, int32_t disp)
503 uint64_t dest = ctx->base.pc_next + (disp << 2);
504 TCGLabel *lab_true = gen_new_label();
506 if (use_goto_tb(ctx, dest)) {
507 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
509 tcg_gen_goto_tb(0);
510 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
511 tcg_gen_exit_tb(ctx->base.tb, 0);
513 gen_set_label(lab_true);
514 tcg_gen_goto_tb(1);
515 tcg_gen_movi_i64(cpu_pc, dest);
516 tcg_gen_exit_tb(ctx->base.tb, 1);
518 return DISAS_NORETURN;
519 } else {
520 TCGv_i64 z = tcg_const_i64(0);
521 TCGv_i64 d = tcg_const_i64(dest);
522 TCGv_i64 p = tcg_const_i64(ctx->base.pc_next);
524 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
526 tcg_temp_free_i64(z);
527 tcg_temp_free_i64(d);
528 tcg_temp_free_i64(p);
529 return DISAS_PC_UPDATED;
533 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
534 int32_t disp, int mask)
536 if (mask) {
537 TCGv tmp = tcg_temp_new();
538 DisasJumpType ret;
540 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
541 ret = gen_bcond_internal(ctx, cond, tmp, disp);
542 tcg_temp_free(tmp);
543 return ret;
545 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
548 /* Fold -0.0 for comparison with COND. */
550 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
552 uint64_t mzero = 1ull << 63;
554 switch (cond) {
555 case TCG_COND_LE:
556 case TCG_COND_GT:
557 /* For <= or >, the -0.0 value directly compares the way we want. */
558 tcg_gen_mov_i64(dest, src);
559 break;
561 case TCG_COND_EQ:
562 case TCG_COND_NE:
563 /* For == or !=, we can simply mask off the sign bit and compare. */
564 tcg_gen_andi_i64(dest, src, mzero - 1);
565 break;
567 case TCG_COND_GE:
568 case TCG_COND_LT:
569 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
570 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
571 tcg_gen_neg_i64(dest, dest);
572 tcg_gen_and_i64(dest, dest, src);
573 break;
575 default:
576 abort();
580 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
581 int32_t disp)
583 TCGv cmp_tmp = tcg_temp_new();
584 DisasJumpType ret;
586 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
587 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
588 tcg_temp_free(cmp_tmp);
589 return ret;
592 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
594 TCGv_i64 va, vb, z;
596 z = load_zero(ctx);
597 vb = load_fpr(ctx, rb);
598 va = tcg_temp_new();
599 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
601 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
603 tcg_temp_free(va);
606 #define QUAL_RM_N 0x080 /* Round mode nearest even */
607 #define QUAL_RM_C 0x000 /* Round mode chopped */
608 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
609 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
610 #define QUAL_RM_MASK 0x0c0
612 #define QUAL_U 0x100 /* Underflow enable (fp output) */
613 #define QUAL_V 0x100 /* Overflow enable (int output) */
614 #define QUAL_S 0x400 /* Software completion enable */
615 #define QUAL_I 0x200 /* Inexact detection enable */
617 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
619 TCGv_i32 tmp;
621 fn11 &= QUAL_RM_MASK;
622 if (fn11 == ctx->tb_rm) {
623 return;
625 ctx->tb_rm = fn11;
627 tmp = tcg_temp_new_i32();
628 switch (fn11) {
629 case QUAL_RM_N:
630 tcg_gen_movi_i32(tmp, float_round_nearest_even);
631 break;
632 case QUAL_RM_C:
633 tcg_gen_movi_i32(tmp, float_round_to_zero);
634 break;
635 case QUAL_RM_M:
636 tcg_gen_movi_i32(tmp, float_round_down);
637 break;
638 case QUAL_RM_D:
639 tcg_gen_ld8u_i32(tmp, cpu_env,
640 offsetof(CPUAlphaState, fpcr_dyn_round));
641 break;
644 #if defined(CONFIG_SOFTFLOAT_INLINE)
645 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
646 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
647 sets the one field. */
648 tcg_gen_st8_i32(tmp, cpu_env,
649 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
650 #else
651 gen_helper_setroundmode(tmp);
652 #endif
654 tcg_temp_free_i32(tmp);
657 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
659 TCGv_i32 tmp;
661 fn11 &= QUAL_U;
662 if (fn11 == ctx->tb_ftz) {
663 return;
665 ctx->tb_ftz = fn11;
667 tmp = tcg_temp_new_i32();
668 if (fn11) {
669 /* Underflow is enabled, use the FPCR setting. */
670 tcg_gen_ld8u_i32(tmp, cpu_env,
671 offsetof(CPUAlphaState, fpcr_flush_to_zero));
672 } else {
673 /* Underflow is disabled, force flush-to-zero. */
674 tcg_gen_movi_i32(tmp, 1);
677 #if defined(CONFIG_SOFTFLOAT_INLINE)
678 tcg_gen_st8_i32(tmp, cpu_env,
679 offsetof(CPUAlphaState, fp_status.flush_to_zero));
680 #else
681 gen_helper_setflushzero(tmp);
682 #endif
684 tcg_temp_free_i32(tmp);
687 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
689 TCGv val;
691 if (unlikely(reg == 31)) {
692 val = load_zero(ctx);
693 } else {
694 val = cpu_fir[reg];
695 if ((fn11 & QUAL_S) == 0) {
696 if (is_cmp) {
697 gen_helper_ieee_input_cmp(cpu_env, val);
698 } else {
699 gen_helper_ieee_input(cpu_env, val);
701 } else {
702 #ifndef CONFIG_USER_ONLY
703 /* In system mode, raise exceptions for denormals like real
704 hardware. In user mode, proceed as if the OS completion
705 handler is handling the denormal as per spec. */
706 gen_helper_ieee_input_s(cpu_env, val);
707 #endif
710 return val;
713 static void gen_fp_exc_raise(int rc, int fn11)
715 /* ??? We ought to be able to do something with imprecise exceptions.
716 E.g. notice we're still in the trap shadow of something within the
717 TB and do not generate the code to signal the exception; end the TB
718 when an exception is forced to arrive, either by consumption of a
719 register value or TRAPB or EXCB. */
720 TCGv_i32 reg, ign;
721 uint32_t ignore = 0;
723 if (!(fn11 & QUAL_U)) {
724 /* Note that QUAL_U == QUAL_V, so ignore either. */
725 ignore |= FPCR_UNF | FPCR_IOV;
727 if (!(fn11 & QUAL_I)) {
728 ignore |= FPCR_INE;
730 ign = tcg_const_i32(ignore);
732 /* ??? Pass in the regno of the destination so that the helper can
733 set EXC_MASK, which contains a bitmask of destination registers
734 that have caused arithmetic traps. A simple userspace emulation
735 does not require this. We do need it for a guest kernel's entArith,
736 or if we were to do something clever with imprecise exceptions. */
737 reg = tcg_const_i32(rc + 32);
738 if (fn11 & QUAL_S) {
739 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
740 } else {
741 gen_helper_fp_exc_raise(cpu_env, ign, reg);
744 tcg_temp_free_i32(reg);
745 tcg_temp_free_i32(ign);
748 static void gen_cvtlq(TCGv vc, TCGv vb)
750 TCGv tmp = tcg_temp_new();
752 /* The arithmetic right shift here, plus the sign-extended mask below
753 yields a sign-extended result without an explicit ext32s_i64. */
754 tcg_gen_shri_i64(tmp, vb, 29);
755 tcg_gen_sari_i64(vc, vb, 32);
756 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
758 tcg_temp_free(tmp);
761 static void gen_ieee_arith2(DisasContext *ctx,
762 void (*helper)(TCGv, TCGv_ptr, TCGv),
763 int rb, int rc, int fn11)
765 TCGv vb;
767 gen_qual_roundmode(ctx, fn11);
768 gen_qual_flushzero(ctx, fn11);
770 vb = gen_ieee_input(ctx, rb, fn11, 0);
771 helper(dest_fpr(ctx, rc), cpu_env, vb);
773 gen_fp_exc_raise(rc, fn11);
776 #define IEEE_ARITH2(name) \
777 static inline void glue(gen_, name)(DisasContext *ctx, \
778 int rb, int rc, int fn11) \
780 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
782 IEEE_ARITH2(sqrts)
783 IEEE_ARITH2(sqrtt)
784 IEEE_ARITH2(cvtst)
785 IEEE_ARITH2(cvtts)
787 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
789 TCGv vb, vc;
791 /* No need to set flushzero, since we have an integer output. */
792 vb = gen_ieee_input(ctx, rb, fn11, 0);
793 vc = dest_fpr(ctx, rc);
795 /* Almost all integer conversions use cropped rounding;
796 special case that. */
797 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
798 gen_helper_cvttq_c(vc, cpu_env, vb);
799 } else {
800 gen_qual_roundmode(ctx, fn11);
801 gen_helper_cvttq(vc, cpu_env, vb);
803 gen_fp_exc_raise(rc, fn11);
806 static void gen_ieee_intcvt(DisasContext *ctx,
807 void (*helper)(TCGv, TCGv_ptr, TCGv),
808 int rb, int rc, int fn11)
810 TCGv vb, vc;
812 gen_qual_roundmode(ctx, fn11);
813 vb = load_fpr(ctx, rb);
814 vc = dest_fpr(ctx, rc);
816 /* The only exception that can be raised by integer conversion
817 is inexact. Thus we only need to worry about exceptions when
818 inexact handling is requested. */
819 if (fn11 & QUAL_I) {
820 helper(vc, cpu_env, vb);
821 gen_fp_exc_raise(rc, fn11);
822 } else {
823 helper(vc, cpu_env, vb);
827 #define IEEE_INTCVT(name) \
828 static inline void glue(gen_, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
831 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
833 IEEE_INTCVT(cvtqs)
834 IEEE_INTCVT(cvtqt)
836 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
838 TCGv vmask = tcg_const_i64(mask);
839 TCGv tmp = tcg_temp_new_i64();
841 if (inv_a) {
842 tcg_gen_andc_i64(tmp, vmask, va);
843 } else {
844 tcg_gen_and_i64(tmp, va, vmask);
847 tcg_gen_andc_i64(vc, vb, vmask);
848 tcg_gen_or_i64(vc, vc, tmp);
850 tcg_temp_free(vmask);
851 tcg_temp_free(tmp);
854 static void gen_ieee_arith3(DisasContext *ctx,
855 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
856 int ra, int rb, int rc, int fn11)
858 TCGv va, vb, vc;
860 gen_qual_roundmode(ctx, fn11);
861 gen_qual_flushzero(ctx, fn11);
863 va = gen_ieee_input(ctx, ra, fn11, 0);
864 vb = gen_ieee_input(ctx, rb, fn11, 0);
865 vc = dest_fpr(ctx, rc);
866 helper(vc, cpu_env, va, vb);
868 gen_fp_exc_raise(rc, fn11);
871 #define IEEE_ARITH3(name) \
872 static inline void glue(gen_, name)(DisasContext *ctx, \
873 int ra, int rb, int rc, int fn11) \
875 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
877 IEEE_ARITH3(adds)
878 IEEE_ARITH3(subs)
879 IEEE_ARITH3(muls)
880 IEEE_ARITH3(divs)
881 IEEE_ARITH3(addt)
882 IEEE_ARITH3(subt)
883 IEEE_ARITH3(mult)
884 IEEE_ARITH3(divt)
886 static void gen_ieee_compare(DisasContext *ctx,
887 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
888 int ra, int rb, int rc, int fn11)
890 TCGv va, vb, vc;
892 va = gen_ieee_input(ctx, ra, fn11, 1);
893 vb = gen_ieee_input(ctx, rb, fn11, 1);
894 vc = dest_fpr(ctx, rc);
895 helper(vc, cpu_env, va, vb);
897 gen_fp_exc_raise(rc, fn11);
900 #define IEEE_CMP3(name) \
901 static inline void glue(gen_, name)(DisasContext *ctx, \
902 int ra, int rb, int rc, int fn11) \
904 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
906 IEEE_CMP3(cmptun)
907 IEEE_CMP3(cmpteq)
908 IEEE_CMP3(cmptlt)
909 IEEE_CMP3(cmptle)
911 static inline uint64_t zapnot_mask(uint8_t lit)
913 uint64_t mask = 0;
914 int i;
916 for (i = 0; i < 8; ++i) {
917 if ((lit >> i) & 1) {
918 mask |= 0xffull << (i * 8);
921 return mask;
924 /* Implement zapnot with an immediate operand, which expands to some
925 form of immediate AND. This is a basic building block in the
926 definition of many of the other byte manipulation instructions. */
927 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
929 switch (lit) {
930 case 0x00:
931 tcg_gen_movi_i64(dest, 0);
932 break;
933 case 0x01:
934 tcg_gen_ext8u_i64(dest, src);
935 break;
936 case 0x03:
937 tcg_gen_ext16u_i64(dest, src);
938 break;
939 case 0x0f:
940 tcg_gen_ext32u_i64(dest, src);
941 break;
942 case 0xff:
943 tcg_gen_mov_i64(dest, src);
944 break;
945 default:
946 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
947 break;
951 /* EXTWH, EXTLH, EXTQH */
952 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
953 uint8_t lit, uint8_t byte_mask)
955 if (islit) {
956 int pos = (64 - lit * 8) & 0x3f;
957 int len = cto32(byte_mask) * 8;
958 if (pos < len) {
959 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
960 } else {
961 tcg_gen_movi_i64(vc, 0);
963 } else {
964 TCGv tmp = tcg_temp_new();
965 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
966 tcg_gen_neg_i64(tmp, tmp);
967 tcg_gen_andi_i64(tmp, tmp, 0x3f);
968 tcg_gen_shl_i64(vc, va, tmp);
969 tcg_temp_free(tmp);
971 gen_zapnoti(vc, vc, byte_mask);
974 /* EXTBL, EXTWL, EXTLL, EXTQL */
975 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
976 uint8_t lit, uint8_t byte_mask)
978 if (islit) {
979 int pos = (lit & 7) * 8;
980 int len = cto32(byte_mask) * 8;
981 if (pos + len >= 64) {
982 len = 64 - pos;
984 tcg_gen_extract_i64(vc, va, pos, len);
985 } else {
986 TCGv tmp = tcg_temp_new();
987 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
988 tcg_gen_shli_i64(tmp, tmp, 3);
989 tcg_gen_shr_i64(vc, va, tmp);
990 tcg_temp_free(tmp);
991 gen_zapnoti(vc, vc, byte_mask);
995 /* INSWH, INSLH, INSQH */
996 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
997 uint8_t lit, uint8_t byte_mask)
999 if (islit) {
1000 int pos = 64 - (lit & 7) * 8;
1001 int len = cto32(byte_mask) * 8;
1002 if (pos < len) {
1003 tcg_gen_extract_i64(vc, va, pos, len - pos);
1004 } else {
1005 tcg_gen_movi_i64(vc, 0);
1007 } else {
1008 TCGv tmp = tcg_temp_new();
1009 TCGv shift = tcg_temp_new();
1011 /* The instruction description has us left-shift the byte mask
1012 and extract bits <15:8> and apply that zap at the end. This
1013 is equivalent to simply performing the zap first and shifting
1014 afterward. */
1015 gen_zapnoti(tmp, va, byte_mask);
1017 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1018 portably by splitting the shift into two parts: shift_count-1 and 1.
1019 Arrange for the -1 by using ones-complement instead of
1020 twos-complement in the negation: ~(B * 8) & 63. */
1022 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1023 tcg_gen_not_i64(shift, shift);
1024 tcg_gen_andi_i64(shift, shift, 0x3f);
1026 tcg_gen_shr_i64(vc, tmp, shift);
1027 tcg_gen_shri_i64(vc, vc, 1);
1028 tcg_temp_free(shift);
1029 tcg_temp_free(tmp);
1033 /* INSBL, INSWL, INSLL, INSQL */
1034 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1035 uint8_t lit, uint8_t byte_mask)
1037 if (islit) {
1038 int pos = (lit & 7) * 8;
1039 int len = cto32(byte_mask) * 8;
1040 if (pos + len > 64) {
1041 len = 64 - pos;
1043 tcg_gen_deposit_z_i64(vc, va, pos, len);
1044 } else {
1045 TCGv tmp = tcg_temp_new();
1046 TCGv shift = tcg_temp_new();
1048 /* The instruction description has us left-shift the byte mask
1049 and extract bits <15:8> and apply that zap at the end. This
1050 is equivalent to simply performing the zap first and shifting
1051 afterward. */
1052 gen_zapnoti(tmp, va, byte_mask);
1054 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1055 tcg_gen_shli_i64(shift, shift, 3);
1056 tcg_gen_shl_i64(vc, tmp, shift);
1057 tcg_temp_free(shift);
1058 tcg_temp_free(tmp);
1062 /* MSKWH, MSKLH, MSKQH */
1063 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1064 uint8_t lit, uint8_t byte_mask)
1066 if (islit) {
1067 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1068 } else {
1069 TCGv shift = tcg_temp_new();
1070 TCGv mask = tcg_temp_new();
1072 /* The instruction description is as above, where the byte_mask
1073 is shifted left, and then we extract bits <15:8>. This can be
1074 emulated with a right-shift on the expanded byte mask. This
1075 requires extra care because for an input <2:0> == 0 we need a
1076 shift of 64 bits in order to generate a zero. This is done by
1077 splitting the shift into two parts, the variable shift - 1
1078 followed by a constant 1 shift. The code we expand below is
1079 equivalent to ~(B * 8) & 63. */
1081 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1082 tcg_gen_not_i64(shift, shift);
1083 tcg_gen_andi_i64(shift, shift, 0x3f);
1084 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1085 tcg_gen_shr_i64(mask, mask, shift);
1086 tcg_gen_shri_i64(mask, mask, 1);
1088 tcg_gen_andc_i64(vc, va, mask);
1090 tcg_temp_free(mask);
1091 tcg_temp_free(shift);
1095 /* MSKBL, MSKWL, MSKLL, MSKQL */
1096 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1097 uint8_t lit, uint8_t byte_mask)
1099 if (islit) {
1100 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1101 } else {
1102 TCGv shift = tcg_temp_new();
1103 TCGv mask = tcg_temp_new();
1105 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1106 tcg_gen_shli_i64(shift, shift, 3);
1107 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1108 tcg_gen_shl_i64(mask, mask, shift);
1110 tcg_gen_andc_i64(vc, va, mask);
1112 tcg_temp_free(mask);
1113 tcg_temp_free(shift);
1117 static void gen_rx(DisasContext *ctx, int ra, int set)
1119 TCGv tmp;
1121 if (ra != 31) {
1122 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1125 tmp = tcg_const_i64(set);
1126 st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1127 tcg_temp_free(tmp);
1130 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1132 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1133 to internal cpu registers. */
1135 /* Unprivileged PAL call */
1136 if (palcode >= 0x80 && palcode < 0xC0) {
1137 switch (palcode) {
1138 case 0x86:
1139 /* IMB */
1140 /* No-op inside QEMU. */
1141 break;
1142 case 0x9E:
1143 /* RDUNIQUE */
1144 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1145 offsetof(CPUAlphaState, unique));
1146 break;
1147 case 0x9F:
1148 /* WRUNIQUE */
1149 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1150 offsetof(CPUAlphaState, unique));
1151 break;
1152 default:
1153 palcode &= 0xbf;
1154 goto do_call_pal;
1156 return DISAS_NEXT;
1159 #ifndef CONFIG_USER_ONLY
1160 /* Privileged PAL code */
1161 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1162 switch (palcode) {
1163 case 0x01:
1164 /* CFLUSH */
1165 /* No-op inside QEMU. */
1166 break;
1167 case 0x02:
1168 /* DRAINA */
1169 /* No-op inside QEMU. */
1170 break;
1171 case 0x2D:
1172 /* WRVPTPTR */
1173 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1174 offsetof(CPUAlphaState, vptptr));
1175 break;
1176 case 0x31:
1177 /* WRVAL */
1178 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1179 offsetof(CPUAlphaState, sysval));
1180 break;
1181 case 0x32:
1182 /* RDVAL */
1183 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1184 offsetof(CPUAlphaState, sysval));
1185 break;
1187 case 0x35:
1188 /* SWPIPL */
1189 /* Note that we already know we're in kernel mode, so we know
1190 that PS only contains the 3 IPL bits. */
1191 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1193 /* But make sure and store only the 3 IPL bits from the user. */
1195 TCGv tmp = tcg_temp_new();
1196 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1197 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1198 tcg_temp_free(tmp);
1201 /* Allow interrupts to be recognized right away. */
1202 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1203 return DISAS_PC_UPDATED_NOCHAIN;
1205 case 0x36:
1206 /* RDPS */
1207 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1208 break;
1210 case 0x38:
1211 /* WRUSP */
1212 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1213 offsetof(CPUAlphaState, usp));
1214 break;
1215 case 0x3A:
1216 /* RDUSP */
1217 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1218 offsetof(CPUAlphaState, usp));
1219 break;
1220 case 0x3C:
1221 /* WHAMI */
1222 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1223 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1224 break;
1226 case 0x3E:
1227 /* WTINT */
1229 TCGv_i32 tmp = tcg_const_i32(1);
1230 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1231 offsetof(CPUState, halted));
1232 tcg_temp_free_i32(tmp);
1234 tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1235 return gen_excp(ctx, EXCP_HALTED, 0);
1237 default:
1238 palcode &= 0x3f;
1239 goto do_call_pal;
1241 return DISAS_NEXT;
1243 #endif
1244 return gen_invalid(ctx);
1246 do_call_pal:
1247 #ifdef CONFIG_USER_ONLY
1248 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1249 #else
1251 TCGv tmp = tcg_temp_new();
1252 uint64_t exc_addr = ctx->base.pc_next;
1253 uint64_t entry = ctx->palbr;
1255 if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1256 exc_addr |= 1;
1257 } else {
1258 tcg_gen_movi_i64(tmp, 1);
1259 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1262 tcg_gen_movi_i64(tmp, exc_addr);
1263 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1264 tcg_temp_free(tmp);
1266 entry += (palcode & 0x80
1267 ? 0x2000 + (palcode - 0x80) * 64
1268 : 0x1000 + palcode * 64);
1270 /* Since the destination is running in PALmode, we don't really
1271 need the page permissions check. We'll see the existence of
1272 the page when we create the TB, and we'll flush all TBs if
1273 we change the PAL base register. */
1274 if (!use_exit_tb(ctx)) {
1275 tcg_gen_goto_tb(0);
1276 tcg_gen_movi_i64(cpu_pc, entry);
1277 tcg_gen_exit_tb(ctx->base.tb, 0);
1278 return DISAS_NORETURN;
1279 } else {
1280 tcg_gen_movi_i64(cpu_pc, entry);
1281 return DISAS_PC_UPDATED;
1284 #endif
1287 #ifndef CONFIG_USER_ONLY
1289 #define PR_LONG 0x200000
1291 static int cpu_pr_data(int pr)
1293 switch (pr) {
1294 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1295 case 3: return offsetof(CPUAlphaState, trap_arg0);
1296 case 4: return offsetof(CPUAlphaState, trap_arg1);
1297 case 5: return offsetof(CPUAlphaState, trap_arg2);
1298 case 6: return offsetof(CPUAlphaState, exc_addr);
1299 case 7: return offsetof(CPUAlphaState, palbr);
1300 case 8: return offsetof(CPUAlphaState, ptbr);
1301 case 9: return offsetof(CPUAlphaState, vptptr);
1302 case 10: return offsetof(CPUAlphaState, unique);
1303 case 11: return offsetof(CPUAlphaState, sysval);
1304 case 12: return offsetof(CPUAlphaState, usp);
1306 case 40 ... 63:
1307 return offsetof(CPUAlphaState, scratch[pr - 40]);
1309 case 251:
1310 return offsetof(CPUAlphaState, alarm_expire);
1312 return 0;
1315 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1317 void (*helper)(TCGv);
1318 int data;
1320 switch (regno) {
1321 case 32 ... 39:
1322 /* Accessing the "non-shadow" general registers. */
1323 regno = regno == 39 ? 25 : regno - 32 + 8;
1324 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1325 break;
1327 case 250: /* WALLTIME */
1328 helper = gen_helper_get_walltime;
1329 goto do_helper;
1330 case 249: /* VMTIME */
1331 helper = gen_helper_get_vmtime;
1332 do_helper:
1333 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1334 gen_io_start();
1335 helper(va);
1336 return DISAS_PC_STALE;
1337 } else {
1338 helper(va);
1340 break;
1342 case 0: /* PS */
1343 ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1344 break;
1345 case 1: /* FEN */
1346 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1347 break;
1349 default:
1350 /* The basic registers are data only, and unknown registers
1351 are read-zero, write-ignore. */
1352 data = cpu_pr_data(regno);
1353 if (data == 0) {
1354 tcg_gen_movi_i64(va, 0);
1355 } else if (data & PR_LONG) {
1356 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1357 } else {
1358 tcg_gen_ld_i64(va, cpu_env, data);
1360 break;
1363 return DISAS_NEXT;
1366 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1368 int data;
1369 DisasJumpType ret = DISAS_NEXT;
1371 switch (regno) {
1372 case 255:
1373 /* TBIA */
1374 gen_helper_tbia(cpu_env);
1375 break;
1377 case 254:
1378 /* TBIS */
1379 gen_helper_tbis(cpu_env, vb);
1380 break;
1382 case 253:
1383 /* WAIT */
1385 TCGv_i32 tmp = tcg_const_i32(1);
1386 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1387 offsetof(CPUState, halted));
1388 tcg_temp_free_i32(tmp);
1390 return gen_excp(ctx, EXCP_HALTED, 0);
1392 case 252:
1393 /* HALT */
1394 gen_helper_halt(vb);
1395 return DISAS_PC_STALE;
1397 case 251:
1398 /* ALARM */
1399 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1400 gen_io_start();
1401 ret = DISAS_PC_STALE;
1403 gen_helper_set_alarm(cpu_env, vb);
1404 break;
1406 case 7:
1407 /* PALBR */
1408 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1409 /* Changing the PAL base register implies un-chaining all of the TBs
1410 that ended with a CALL_PAL. Since the base register usually only
1411 changes during boot, flushing everything works well. */
1412 gen_helper_tb_flush(cpu_env);
1413 return DISAS_PC_STALE;
1415 case 32 ... 39:
1416 /* Accessing the "non-shadow" general registers. */
1417 regno = regno == 39 ? 25 : regno - 32 + 8;
1418 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1419 break;
1421 case 0: /* PS */
1422 st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1423 break;
1424 case 1: /* FEN */
1425 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1426 break;
1428 default:
1429 /* The basic registers are data only, and unknown registers
1430 are read-zero, write-ignore. */
1431 data = cpu_pr_data(regno);
1432 if (data != 0) {
1433 if (data & PR_LONG) {
1434 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1435 } else {
1436 tcg_gen_st_i64(vb, cpu_env, data);
1439 break;
1442 return ret;
1444 #endif /* !USER_ONLY*/
1446 #define REQUIRE_NO_LIT \
1447 do { \
1448 if (real_islit) { \
1449 goto invalid_opc; \
1451 } while (0)
1453 #define REQUIRE_AMASK(FLAG) \
1454 do { \
1455 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1456 goto invalid_opc; \
1458 } while (0)
1460 #define REQUIRE_TB_FLAG(FLAG) \
1461 do { \
1462 if ((ctx->tbflags & (FLAG)) == 0) { \
1463 goto invalid_opc; \
1465 } while (0)
1467 #define REQUIRE_REG_31(WHICH) \
1468 do { \
1469 if (WHICH != 31) { \
1470 goto invalid_opc; \
1472 } while (0)
1474 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1476 int32_t disp21, disp16, disp12 __attribute__((unused));
1477 uint16_t fn11;
1478 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1479 bool islit, real_islit;
1480 TCGv va, vb, vc, tmp, tmp2;
1481 TCGv_i32 t32;
1482 DisasJumpType ret;
1484 /* Decode all instruction fields */
1485 opc = extract32(insn, 26, 6);
1486 ra = extract32(insn, 21, 5);
1487 rb = extract32(insn, 16, 5);
1488 rc = extract32(insn, 0, 5);
1489 real_islit = islit = extract32(insn, 12, 1);
1490 lit = extract32(insn, 13, 8);
1492 disp21 = sextract32(insn, 0, 21);
1493 disp16 = sextract32(insn, 0, 16);
1494 disp12 = sextract32(insn, 0, 12);
1496 fn11 = extract32(insn, 5, 11);
1497 fpfn = extract32(insn, 5, 6);
1498 fn7 = extract32(insn, 5, 7);
1500 if (rb == 31 && !islit) {
1501 islit = true;
1502 lit = 0;
1505 ret = DISAS_NEXT;
1506 switch (opc) {
1507 case 0x00:
1508 /* CALL_PAL */
1509 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1510 break;
1511 case 0x01:
1512 /* OPC01 */
1513 goto invalid_opc;
1514 case 0x02:
1515 /* OPC02 */
1516 goto invalid_opc;
1517 case 0x03:
1518 /* OPC03 */
1519 goto invalid_opc;
1520 case 0x04:
1521 /* OPC04 */
1522 goto invalid_opc;
1523 case 0x05:
1524 /* OPC05 */
1525 goto invalid_opc;
1526 case 0x06:
1527 /* OPC06 */
1528 goto invalid_opc;
1529 case 0x07:
1530 /* OPC07 */
1531 goto invalid_opc;
1533 case 0x09:
1534 /* LDAH */
1535 disp16 = (uint32_t)disp16 << 16;
1536 /* fall through */
1537 case 0x08:
1538 /* LDA */
1539 va = dest_gpr(ctx, ra);
1540 /* It's worth special-casing immediate loads. */
1541 if (rb == 31) {
1542 tcg_gen_movi_i64(va, disp16);
1543 } else {
1544 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1546 break;
1548 case 0x0A:
1549 /* LDBU */
1550 REQUIRE_AMASK(BWX);
1551 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1552 break;
1553 case 0x0B:
1554 /* LDQ_U */
1555 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1556 break;
1557 case 0x0C:
1558 /* LDWU */
1559 REQUIRE_AMASK(BWX);
1560 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1561 break;
1562 case 0x0D:
1563 /* STW */
1564 REQUIRE_AMASK(BWX);
1565 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1566 break;
1567 case 0x0E:
1568 /* STB */
1569 REQUIRE_AMASK(BWX);
1570 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1571 break;
1572 case 0x0F:
1573 /* STQ_U */
1574 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1575 break;
1577 case 0x10:
1578 vc = dest_gpr(ctx, rc);
1579 vb = load_gpr_lit(ctx, rb, lit, islit);
1581 if (ra == 31) {
1582 if (fn7 == 0x00) {
1583 /* Special case ADDL as SEXTL. */
1584 tcg_gen_ext32s_i64(vc, vb);
1585 break;
1587 if (fn7 == 0x29) {
1588 /* Special case SUBQ as NEGQ. */
1589 tcg_gen_neg_i64(vc, vb);
1590 break;
1594 va = load_gpr(ctx, ra);
1595 switch (fn7) {
1596 case 0x00:
1597 /* ADDL */
1598 tcg_gen_add_i64(vc, va, vb);
1599 tcg_gen_ext32s_i64(vc, vc);
1600 break;
1601 case 0x02:
1602 /* S4ADDL */
1603 tmp = tcg_temp_new();
1604 tcg_gen_shli_i64(tmp, va, 2);
1605 tcg_gen_add_i64(tmp, tmp, vb);
1606 tcg_gen_ext32s_i64(vc, tmp);
1607 tcg_temp_free(tmp);
1608 break;
1609 case 0x09:
1610 /* SUBL */
1611 tcg_gen_sub_i64(vc, va, vb);
1612 tcg_gen_ext32s_i64(vc, vc);
1613 break;
1614 case 0x0B:
1615 /* S4SUBL */
1616 tmp = tcg_temp_new();
1617 tcg_gen_shli_i64(tmp, va, 2);
1618 tcg_gen_sub_i64(tmp, tmp, vb);
1619 tcg_gen_ext32s_i64(vc, tmp);
1620 tcg_temp_free(tmp);
1621 break;
1622 case 0x0F:
1623 /* CMPBGE */
1624 if (ra == 31) {
1625 /* Special case 0 >= X as X == 0. */
1626 gen_helper_cmpbe0(vc, vb);
1627 } else {
1628 gen_helper_cmpbge(vc, va, vb);
1630 break;
1631 case 0x12:
1632 /* S8ADDL */
1633 tmp = tcg_temp_new();
1634 tcg_gen_shli_i64(tmp, va, 3);
1635 tcg_gen_add_i64(tmp, tmp, vb);
1636 tcg_gen_ext32s_i64(vc, tmp);
1637 tcg_temp_free(tmp);
1638 break;
1639 case 0x1B:
1640 /* S8SUBL */
1641 tmp = tcg_temp_new();
1642 tcg_gen_shli_i64(tmp, va, 3);
1643 tcg_gen_sub_i64(tmp, tmp, vb);
1644 tcg_gen_ext32s_i64(vc, tmp);
1645 tcg_temp_free(tmp);
1646 break;
1647 case 0x1D:
1648 /* CMPULT */
1649 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1650 break;
1651 case 0x20:
1652 /* ADDQ */
1653 tcg_gen_add_i64(vc, va, vb);
1654 break;
1655 case 0x22:
1656 /* S4ADDQ */
1657 tmp = tcg_temp_new();
1658 tcg_gen_shli_i64(tmp, va, 2);
1659 tcg_gen_add_i64(vc, tmp, vb);
1660 tcg_temp_free(tmp);
1661 break;
1662 case 0x29:
1663 /* SUBQ */
1664 tcg_gen_sub_i64(vc, va, vb);
1665 break;
1666 case 0x2B:
1667 /* S4SUBQ */
1668 tmp = tcg_temp_new();
1669 tcg_gen_shli_i64(tmp, va, 2);
1670 tcg_gen_sub_i64(vc, tmp, vb);
1671 tcg_temp_free(tmp);
1672 break;
1673 case 0x2D:
1674 /* CMPEQ */
1675 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1676 break;
1677 case 0x32:
1678 /* S8ADDQ */
1679 tmp = tcg_temp_new();
1680 tcg_gen_shli_i64(tmp, va, 3);
1681 tcg_gen_add_i64(vc, tmp, vb);
1682 tcg_temp_free(tmp);
1683 break;
1684 case 0x3B:
1685 /* S8SUBQ */
1686 tmp = tcg_temp_new();
1687 tcg_gen_shli_i64(tmp, va, 3);
1688 tcg_gen_sub_i64(vc, tmp, vb);
1689 tcg_temp_free(tmp);
1690 break;
1691 case 0x3D:
1692 /* CMPULE */
1693 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1694 break;
1695 case 0x40:
1696 /* ADDL/V */
1697 tmp = tcg_temp_new();
1698 tcg_gen_ext32s_i64(tmp, va);
1699 tcg_gen_ext32s_i64(vc, vb);
1700 tcg_gen_add_i64(tmp, tmp, vc);
1701 tcg_gen_ext32s_i64(vc, tmp);
1702 gen_helper_check_overflow(cpu_env, vc, tmp);
1703 tcg_temp_free(tmp);
1704 break;
1705 case 0x49:
1706 /* SUBL/V */
1707 tmp = tcg_temp_new();
1708 tcg_gen_ext32s_i64(tmp, va);
1709 tcg_gen_ext32s_i64(vc, vb);
1710 tcg_gen_sub_i64(tmp, tmp, vc);
1711 tcg_gen_ext32s_i64(vc, tmp);
1712 gen_helper_check_overflow(cpu_env, vc, tmp);
1713 tcg_temp_free(tmp);
1714 break;
1715 case 0x4D:
1716 /* CMPLT */
1717 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1718 break;
1719 case 0x60:
1720 /* ADDQ/V */
1721 tmp = tcg_temp_new();
1722 tmp2 = tcg_temp_new();
1723 tcg_gen_eqv_i64(tmp, va, vb);
1724 tcg_gen_mov_i64(tmp2, va);
1725 tcg_gen_add_i64(vc, va, vb);
1726 tcg_gen_xor_i64(tmp2, tmp2, vc);
1727 tcg_gen_and_i64(tmp, tmp, tmp2);
1728 tcg_gen_shri_i64(tmp, tmp, 63);
1729 tcg_gen_movi_i64(tmp2, 0);
1730 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1731 tcg_temp_free(tmp);
1732 tcg_temp_free(tmp2);
1733 break;
1734 case 0x69:
1735 /* SUBQ/V */
1736 tmp = tcg_temp_new();
1737 tmp2 = tcg_temp_new();
1738 tcg_gen_xor_i64(tmp, va, vb);
1739 tcg_gen_mov_i64(tmp2, va);
1740 tcg_gen_sub_i64(vc, va, vb);
1741 tcg_gen_xor_i64(tmp2, tmp2, vc);
1742 tcg_gen_and_i64(tmp, tmp, tmp2);
1743 tcg_gen_shri_i64(tmp, tmp, 63);
1744 tcg_gen_movi_i64(tmp2, 0);
1745 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1746 tcg_temp_free(tmp);
1747 tcg_temp_free(tmp2);
1748 break;
1749 case 0x6D:
1750 /* CMPLE */
1751 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1752 break;
1753 default:
1754 goto invalid_opc;
1756 break;
1758 case 0x11:
1759 if (fn7 == 0x20) {
1760 if (rc == 31) {
1761 /* Special case BIS as NOP. */
1762 break;
1764 if (ra == 31) {
1765 /* Special case BIS as MOV. */
1766 vc = dest_gpr(ctx, rc);
1767 if (islit) {
1768 tcg_gen_movi_i64(vc, lit);
1769 } else {
1770 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1772 break;
1776 vc = dest_gpr(ctx, rc);
1777 vb = load_gpr_lit(ctx, rb, lit, islit);
1779 if (fn7 == 0x28 && ra == 31) {
1780 /* Special case ORNOT as NOT. */
1781 tcg_gen_not_i64(vc, vb);
1782 break;
1785 va = load_gpr(ctx, ra);
1786 switch (fn7) {
1787 case 0x00:
1788 /* AND */
1789 tcg_gen_and_i64(vc, va, vb);
1790 break;
1791 case 0x08:
1792 /* BIC */
1793 tcg_gen_andc_i64(vc, va, vb);
1794 break;
1795 case 0x14:
1796 /* CMOVLBS */
1797 tmp = tcg_temp_new();
1798 tcg_gen_andi_i64(tmp, va, 1);
1799 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1800 vb, load_gpr(ctx, rc));
1801 tcg_temp_free(tmp);
1802 break;
1803 case 0x16:
1804 /* CMOVLBC */
1805 tmp = tcg_temp_new();
1806 tcg_gen_andi_i64(tmp, va, 1);
1807 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1808 vb, load_gpr(ctx, rc));
1809 tcg_temp_free(tmp);
1810 break;
1811 case 0x20:
1812 /* BIS */
1813 tcg_gen_or_i64(vc, va, vb);
1814 break;
1815 case 0x24:
1816 /* CMOVEQ */
1817 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1818 vb, load_gpr(ctx, rc));
1819 break;
1820 case 0x26:
1821 /* CMOVNE */
1822 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1823 vb, load_gpr(ctx, rc));
1824 break;
1825 case 0x28:
1826 /* ORNOT */
1827 tcg_gen_orc_i64(vc, va, vb);
1828 break;
1829 case 0x40:
1830 /* XOR */
1831 tcg_gen_xor_i64(vc, va, vb);
1832 break;
1833 case 0x44:
1834 /* CMOVLT */
1835 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1836 vb, load_gpr(ctx, rc));
1837 break;
1838 case 0x46:
1839 /* CMOVGE */
1840 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1841 vb, load_gpr(ctx, rc));
1842 break;
1843 case 0x48:
1844 /* EQV */
1845 tcg_gen_eqv_i64(vc, va, vb);
1846 break;
1847 case 0x61:
1848 /* AMASK */
1849 REQUIRE_REG_31(ra);
1850 tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1851 break;
1852 case 0x64:
1853 /* CMOVLE */
1854 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1855 vb, load_gpr(ctx, rc));
1856 break;
1857 case 0x66:
1858 /* CMOVGT */
1859 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1860 vb, load_gpr(ctx, rc));
1861 break;
1862 case 0x6C:
1863 /* IMPLVER */
1864 REQUIRE_REG_31(ra);
1865 tcg_gen_movi_i64(vc, ctx->implver);
1866 break;
1867 default:
1868 goto invalid_opc;
1870 break;
1872 case 0x12:
1873 vc = dest_gpr(ctx, rc);
1874 va = load_gpr(ctx, ra);
1875 switch (fn7) {
1876 case 0x02:
1877 /* MSKBL */
1878 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1879 break;
1880 case 0x06:
1881 /* EXTBL */
1882 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1883 break;
1884 case 0x0B:
1885 /* INSBL */
1886 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1887 break;
1888 case 0x12:
1889 /* MSKWL */
1890 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1891 break;
1892 case 0x16:
1893 /* EXTWL */
1894 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1895 break;
1896 case 0x1B:
1897 /* INSWL */
1898 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1899 break;
1900 case 0x22:
1901 /* MSKLL */
1902 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1903 break;
1904 case 0x26:
1905 /* EXTLL */
1906 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1907 break;
1908 case 0x2B:
1909 /* INSLL */
1910 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1911 break;
1912 case 0x30:
1913 /* ZAP */
1914 if (islit) {
1915 gen_zapnoti(vc, va, ~lit);
1916 } else {
1917 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1919 break;
1920 case 0x31:
1921 /* ZAPNOT */
1922 if (islit) {
1923 gen_zapnoti(vc, va, lit);
1924 } else {
1925 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1927 break;
1928 case 0x32:
1929 /* MSKQL */
1930 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1931 break;
1932 case 0x34:
1933 /* SRL */
1934 if (islit) {
1935 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1936 } else {
1937 tmp = tcg_temp_new();
1938 vb = load_gpr(ctx, rb);
1939 tcg_gen_andi_i64(tmp, vb, 0x3f);
1940 tcg_gen_shr_i64(vc, va, tmp);
1941 tcg_temp_free(tmp);
1943 break;
1944 case 0x36:
1945 /* EXTQL */
1946 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1947 break;
1948 case 0x39:
1949 /* SLL */
1950 if (islit) {
1951 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1952 } else {
1953 tmp = tcg_temp_new();
1954 vb = load_gpr(ctx, rb);
1955 tcg_gen_andi_i64(tmp, vb, 0x3f);
1956 tcg_gen_shl_i64(vc, va, tmp);
1957 tcg_temp_free(tmp);
1959 break;
1960 case 0x3B:
1961 /* INSQL */
1962 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1963 break;
1964 case 0x3C:
1965 /* SRA */
1966 if (islit) {
1967 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1968 } else {
1969 tmp = tcg_temp_new();
1970 vb = load_gpr(ctx, rb);
1971 tcg_gen_andi_i64(tmp, vb, 0x3f);
1972 tcg_gen_sar_i64(vc, va, tmp);
1973 tcg_temp_free(tmp);
1975 break;
1976 case 0x52:
1977 /* MSKWH */
1978 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1979 break;
1980 case 0x57:
1981 /* INSWH */
1982 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1983 break;
1984 case 0x5A:
1985 /* EXTWH */
1986 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1987 break;
1988 case 0x62:
1989 /* MSKLH */
1990 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1991 break;
1992 case 0x67:
1993 /* INSLH */
1994 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1995 break;
1996 case 0x6A:
1997 /* EXTLH */
1998 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1999 break;
2000 case 0x72:
2001 /* MSKQH */
2002 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
2003 break;
2004 case 0x77:
2005 /* INSQH */
2006 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
2007 break;
2008 case 0x7A:
2009 /* EXTQH */
2010 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
2011 break;
2012 default:
2013 goto invalid_opc;
2015 break;
2017 case 0x13:
2018 vc = dest_gpr(ctx, rc);
2019 vb = load_gpr_lit(ctx, rb, lit, islit);
2020 va = load_gpr(ctx, ra);
2021 switch (fn7) {
2022 case 0x00:
2023 /* MULL */
2024 tcg_gen_mul_i64(vc, va, vb);
2025 tcg_gen_ext32s_i64(vc, vc);
2026 break;
2027 case 0x20:
2028 /* MULQ */
2029 tcg_gen_mul_i64(vc, va, vb);
2030 break;
2031 case 0x30:
2032 /* UMULH */
2033 tmp = tcg_temp_new();
2034 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2035 tcg_temp_free(tmp);
2036 break;
2037 case 0x40:
2038 /* MULL/V */
2039 tmp = tcg_temp_new();
2040 tcg_gen_ext32s_i64(tmp, va);
2041 tcg_gen_ext32s_i64(vc, vb);
2042 tcg_gen_mul_i64(tmp, tmp, vc);
2043 tcg_gen_ext32s_i64(vc, tmp);
2044 gen_helper_check_overflow(cpu_env, vc, tmp);
2045 tcg_temp_free(tmp);
2046 break;
2047 case 0x60:
2048 /* MULQ/V */
2049 tmp = tcg_temp_new();
2050 tmp2 = tcg_temp_new();
2051 tcg_gen_muls2_i64(vc, tmp, va, vb);
2052 tcg_gen_sari_i64(tmp2, vc, 63);
2053 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2054 tcg_temp_free(tmp);
2055 tcg_temp_free(tmp2);
2056 break;
2057 default:
2058 goto invalid_opc;
2060 break;
2062 case 0x14:
2063 REQUIRE_AMASK(FIX);
2064 vc = dest_fpr(ctx, rc);
2065 switch (fpfn) { /* fn11 & 0x3F */
2066 case 0x04:
2067 /* ITOFS */
2068 REQUIRE_REG_31(rb);
2069 t32 = tcg_temp_new_i32();
2070 va = load_gpr(ctx, ra);
2071 tcg_gen_extrl_i64_i32(t32, va);
2072 gen_helper_memory_to_s(vc, t32);
2073 tcg_temp_free_i32(t32);
2074 break;
2075 case 0x0A:
2076 /* SQRTF */
2077 REQUIRE_REG_31(ra);
2078 vb = load_fpr(ctx, rb);
2079 gen_helper_sqrtf(vc, cpu_env, vb);
2080 break;
2081 case 0x0B:
2082 /* SQRTS */
2083 REQUIRE_REG_31(ra);
2084 gen_sqrts(ctx, rb, rc, fn11);
2085 break;
2086 case 0x14:
2087 /* ITOFF */
2088 REQUIRE_REG_31(rb);
2089 t32 = tcg_temp_new_i32();
2090 va = load_gpr(ctx, ra);
2091 tcg_gen_extrl_i64_i32(t32, va);
2092 gen_helper_memory_to_f(vc, t32);
2093 tcg_temp_free_i32(t32);
2094 break;
2095 case 0x24:
2096 /* ITOFT */
2097 REQUIRE_REG_31(rb);
2098 va = load_gpr(ctx, ra);
2099 tcg_gen_mov_i64(vc, va);
2100 break;
2101 case 0x2A:
2102 /* SQRTG */
2103 REQUIRE_REG_31(ra);
2104 vb = load_fpr(ctx, rb);
2105 gen_helper_sqrtg(vc, cpu_env, vb);
2106 break;
2107 case 0x02B:
2108 /* SQRTT */
2109 REQUIRE_REG_31(ra);
2110 gen_sqrtt(ctx, rb, rc, fn11);
2111 break;
2112 default:
2113 goto invalid_opc;
2115 break;
2117 case 0x15:
2118 /* VAX floating point */
2119 /* XXX: rounding mode and trap are ignored (!) */
2120 vc = dest_fpr(ctx, rc);
2121 vb = load_fpr(ctx, rb);
2122 va = load_fpr(ctx, ra);
2123 switch (fpfn) { /* fn11 & 0x3F */
2124 case 0x00:
2125 /* ADDF */
2126 gen_helper_addf(vc, cpu_env, va, vb);
2127 break;
2128 case 0x01:
2129 /* SUBF */
2130 gen_helper_subf(vc, cpu_env, va, vb);
2131 break;
2132 case 0x02:
2133 /* MULF */
2134 gen_helper_mulf(vc, cpu_env, va, vb);
2135 break;
2136 case 0x03:
2137 /* DIVF */
2138 gen_helper_divf(vc, cpu_env, va, vb);
2139 break;
2140 case 0x1E:
2141 /* CVTDG -- TODO */
2142 REQUIRE_REG_31(ra);
2143 goto invalid_opc;
2144 case 0x20:
2145 /* ADDG */
2146 gen_helper_addg(vc, cpu_env, va, vb);
2147 break;
2148 case 0x21:
2149 /* SUBG */
2150 gen_helper_subg(vc, cpu_env, va, vb);
2151 break;
2152 case 0x22:
2153 /* MULG */
2154 gen_helper_mulg(vc, cpu_env, va, vb);
2155 break;
2156 case 0x23:
2157 /* DIVG */
2158 gen_helper_divg(vc, cpu_env, va, vb);
2159 break;
2160 case 0x25:
2161 /* CMPGEQ */
2162 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2163 break;
2164 case 0x26:
2165 /* CMPGLT */
2166 gen_helper_cmpglt(vc, cpu_env, va, vb);
2167 break;
2168 case 0x27:
2169 /* CMPGLE */
2170 gen_helper_cmpgle(vc, cpu_env, va, vb);
2171 break;
2172 case 0x2C:
2173 /* CVTGF */
2174 REQUIRE_REG_31(ra);
2175 gen_helper_cvtgf(vc, cpu_env, vb);
2176 break;
2177 case 0x2D:
2178 /* CVTGD -- TODO */
2179 REQUIRE_REG_31(ra);
2180 goto invalid_opc;
2181 case 0x2F:
2182 /* CVTGQ */
2183 REQUIRE_REG_31(ra);
2184 gen_helper_cvtgq(vc, cpu_env, vb);
2185 break;
2186 case 0x3C:
2187 /* CVTQF */
2188 REQUIRE_REG_31(ra);
2189 gen_helper_cvtqf(vc, cpu_env, vb);
2190 break;
2191 case 0x3E:
2192 /* CVTQG */
2193 REQUIRE_REG_31(ra);
2194 gen_helper_cvtqg(vc, cpu_env, vb);
2195 break;
2196 default:
2197 goto invalid_opc;
2199 break;
2201 case 0x16:
2202 /* IEEE floating-point */
2203 switch (fpfn) { /* fn11 & 0x3F */
2204 case 0x00:
2205 /* ADDS */
2206 gen_adds(ctx, ra, rb, rc, fn11);
2207 break;
2208 case 0x01:
2209 /* SUBS */
2210 gen_subs(ctx, ra, rb, rc, fn11);
2211 break;
2212 case 0x02:
2213 /* MULS */
2214 gen_muls(ctx, ra, rb, rc, fn11);
2215 break;
2216 case 0x03:
2217 /* DIVS */
2218 gen_divs(ctx, ra, rb, rc, fn11);
2219 break;
2220 case 0x20:
2221 /* ADDT */
2222 gen_addt(ctx, ra, rb, rc, fn11);
2223 break;
2224 case 0x21:
2225 /* SUBT */
2226 gen_subt(ctx, ra, rb, rc, fn11);
2227 break;
2228 case 0x22:
2229 /* MULT */
2230 gen_mult(ctx, ra, rb, rc, fn11);
2231 break;
2232 case 0x23:
2233 /* DIVT */
2234 gen_divt(ctx, ra, rb, rc, fn11);
2235 break;
2236 case 0x24:
2237 /* CMPTUN */
2238 gen_cmptun(ctx, ra, rb, rc, fn11);
2239 break;
2240 case 0x25:
2241 /* CMPTEQ */
2242 gen_cmpteq(ctx, ra, rb, rc, fn11);
2243 break;
2244 case 0x26:
2245 /* CMPTLT */
2246 gen_cmptlt(ctx, ra, rb, rc, fn11);
2247 break;
2248 case 0x27:
2249 /* CMPTLE */
2250 gen_cmptle(ctx, ra, rb, rc, fn11);
2251 break;
2252 case 0x2C:
2253 REQUIRE_REG_31(ra);
2254 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2255 /* CVTST */
2256 gen_cvtst(ctx, rb, rc, fn11);
2257 } else {
2258 /* CVTTS */
2259 gen_cvtts(ctx, rb, rc, fn11);
2261 break;
2262 case 0x2F:
2263 /* CVTTQ */
2264 REQUIRE_REG_31(ra);
2265 gen_cvttq(ctx, rb, rc, fn11);
2266 break;
2267 case 0x3C:
2268 /* CVTQS */
2269 REQUIRE_REG_31(ra);
2270 gen_cvtqs(ctx, rb, rc, fn11);
2271 break;
2272 case 0x3E:
2273 /* CVTQT */
2274 REQUIRE_REG_31(ra);
2275 gen_cvtqt(ctx, rb, rc, fn11);
2276 break;
2277 default:
2278 goto invalid_opc;
2280 break;
2282 case 0x17:
2283 switch (fn11) {
2284 case 0x010:
2285 /* CVTLQ */
2286 REQUIRE_REG_31(ra);
2287 vc = dest_fpr(ctx, rc);
2288 vb = load_fpr(ctx, rb);
2289 gen_cvtlq(vc, vb);
2290 break;
2291 case 0x020:
2292 /* CPYS */
2293 if (rc == 31) {
2294 /* Special case CPYS as FNOP. */
2295 } else {
2296 vc = dest_fpr(ctx, rc);
2297 va = load_fpr(ctx, ra);
2298 if (ra == rb) {
2299 /* Special case CPYS as FMOV. */
2300 tcg_gen_mov_i64(vc, va);
2301 } else {
2302 vb = load_fpr(ctx, rb);
2303 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2306 break;
2307 case 0x021:
2308 /* CPYSN */
2309 vc = dest_fpr(ctx, rc);
2310 vb = load_fpr(ctx, rb);
2311 va = load_fpr(ctx, ra);
2312 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2313 break;
2314 case 0x022:
2315 /* CPYSE */
2316 vc = dest_fpr(ctx, rc);
2317 vb = load_fpr(ctx, rb);
2318 va = load_fpr(ctx, ra);
2319 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2320 break;
2321 case 0x024:
2322 /* MT_FPCR */
2323 va = load_fpr(ctx, ra);
2324 gen_helper_store_fpcr(cpu_env, va);
2325 if (ctx->tb_rm == QUAL_RM_D) {
2326 /* Re-do the copy of the rounding mode to fp_status
2327 the next time we use dynamic rounding. */
2328 ctx->tb_rm = -1;
2330 break;
2331 case 0x025:
2332 /* MF_FPCR */
2333 va = dest_fpr(ctx, ra);
2334 gen_helper_load_fpcr(va, cpu_env);
2335 break;
2336 case 0x02A:
2337 /* FCMOVEQ */
2338 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2339 break;
2340 case 0x02B:
2341 /* FCMOVNE */
2342 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2343 break;
2344 case 0x02C:
2345 /* FCMOVLT */
2346 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2347 break;
2348 case 0x02D:
2349 /* FCMOVGE */
2350 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2351 break;
2352 case 0x02E:
2353 /* FCMOVLE */
2354 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2355 break;
2356 case 0x02F:
2357 /* FCMOVGT */
2358 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2359 break;
2360 case 0x030: /* CVTQL */
2361 case 0x130: /* CVTQL/V */
2362 case 0x530: /* CVTQL/SV */
2363 REQUIRE_REG_31(ra);
2364 vc = dest_fpr(ctx, rc);
2365 vb = load_fpr(ctx, rb);
2366 gen_helper_cvtql(vc, cpu_env, vb);
2367 gen_fp_exc_raise(rc, fn11);
2368 break;
2369 default:
2370 goto invalid_opc;
2372 break;
2374 case 0x18:
2375 switch ((uint16_t)disp16) {
2376 case 0x0000:
2377 /* TRAPB */
2378 /* No-op. */
2379 break;
2380 case 0x0400:
2381 /* EXCB */
2382 /* No-op. */
2383 break;
2384 case 0x4000:
2385 /* MB */
2386 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2387 break;
2388 case 0x4400:
2389 /* WMB */
2390 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2391 break;
2392 case 0x8000:
2393 /* FETCH */
2394 /* No-op */
2395 break;
2396 case 0xA000:
2397 /* FETCH_M */
2398 /* No-op */
2399 break;
2400 case 0xC000:
2401 /* RPCC */
2402 va = dest_gpr(ctx, ra);
2403 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2404 gen_io_start();
2405 gen_helper_load_pcc(va, cpu_env);
2406 ret = DISAS_PC_STALE;
2407 } else {
2408 gen_helper_load_pcc(va, cpu_env);
2410 break;
2411 case 0xE000:
2412 /* RC */
2413 gen_rx(ctx, ra, 0);
2414 break;
2415 case 0xE800:
2416 /* ECB */
2417 break;
2418 case 0xF000:
2419 /* RS */
2420 gen_rx(ctx, ra, 1);
2421 break;
2422 case 0xF800:
2423 /* WH64 */
2424 /* No-op */
2425 break;
2426 case 0xFC00:
2427 /* WH64EN */
2428 /* No-op */
2429 break;
2430 default:
2431 goto invalid_opc;
2433 break;
2435 case 0x19:
2436 /* HW_MFPR (PALcode) */
2437 #ifndef CONFIG_USER_ONLY
2438 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2439 va = dest_gpr(ctx, ra);
2440 ret = gen_mfpr(ctx, va, insn & 0xffff);
2441 break;
2442 #else
2443 goto invalid_opc;
2444 #endif
2446 case 0x1A:
2447 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2448 prediction stack action, which of course we don't implement. */
2449 vb = load_gpr(ctx, rb);
2450 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2451 if (ra != 31) {
2452 tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2454 ret = DISAS_PC_UPDATED;
2455 break;
2457 case 0x1B:
2458 /* HW_LD (PALcode) */
2459 #ifndef CONFIG_USER_ONLY
2460 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2462 TCGv addr = tcg_temp_new();
2463 vb = load_gpr(ctx, rb);
2464 va = dest_gpr(ctx, ra);
2466 tcg_gen_addi_i64(addr, vb, disp12);
2467 switch ((insn >> 12) & 0xF) {
2468 case 0x0:
2469 /* Longword physical access (hw_ldl/p) */
2470 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2471 break;
2472 case 0x1:
2473 /* Quadword physical access (hw_ldq/p) */
2474 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2475 break;
2476 case 0x2:
2477 /* Longword physical access with lock (hw_ldl_l/p) */
2478 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2479 break;
2480 case 0x3:
2481 /* Quadword physical access with lock (hw_ldq_l/p) */
2482 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2483 break;
2484 case 0x4:
2485 /* Longword virtual PTE fetch (hw_ldl/v) */
2486 goto invalid_opc;
2487 case 0x5:
2488 /* Quadword virtual PTE fetch (hw_ldq/v) */
2489 goto invalid_opc;
2490 break;
2491 case 0x6:
2492 /* Invalid */
2493 goto invalid_opc;
2494 case 0x7:
2495 /* Invaliid */
2496 goto invalid_opc;
2497 case 0x8:
2498 /* Longword virtual access (hw_ldl) */
2499 goto invalid_opc;
2500 case 0x9:
2501 /* Quadword virtual access (hw_ldq) */
2502 goto invalid_opc;
2503 case 0xA:
2504 /* Longword virtual access with protection check (hw_ldl/w) */
2505 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2506 break;
2507 case 0xB:
2508 /* Quadword virtual access with protection check (hw_ldq/w) */
2509 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2510 break;
2511 case 0xC:
2512 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2513 goto invalid_opc;
2514 case 0xD:
2515 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2516 goto invalid_opc;
2517 case 0xE:
2518 /* Longword virtual access with alternate access mode and
2519 protection checks (hw_ldl/wa) */
2520 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2521 break;
2522 case 0xF:
2523 /* Quadword virtual access with alternate access mode and
2524 protection checks (hw_ldq/wa) */
2525 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2526 break;
2528 tcg_temp_free(addr);
2529 break;
2531 #else
2532 goto invalid_opc;
2533 #endif
2535 case 0x1C:
2536 vc = dest_gpr(ctx, rc);
2537 if (fn7 == 0x70) {
2538 /* FTOIT */
2539 REQUIRE_AMASK(FIX);
2540 REQUIRE_REG_31(rb);
2541 va = load_fpr(ctx, ra);
2542 tcg_gen_mov_i64(vc, va);
2543 break;
2544 } else if (fn7 == 0x78) {
2545 /* FTOIS */
2546 REQUIRE_AMASK(FIX);
2547 REQUIRE_REG_31(rb);
2548 t32 = tcg_temp_new_i32();
2549 va = load_fpr(ctx, ra);
2550 gen_helper_s_to_memory(t32, va);
2551 tcg_gen_ext_i32_i64(vc, t32);
2552 tcg_temp_free_i32(t32);
2553 break;
2556 vb = load_gpr_lit(ctx, rb, lit, islit);
2557 switch (fn7) {
2558 case 0x00:
2559 /* SEXTB */
2560 REQUIRE_AMASK(BWX);
2561 REQUIRE_REG_31(ra);
2562 tcg_gen_ext8s_i64(vc, vb);
2563 break;
2564 case 0x01:
2565 /* SEXTW */
2566 REQUIRE_AMASK(BWX);
2567 REQUIRE_REG_31(ra);
2568 tcg_gen_ext16s_i64(vc, vb);
2569 break;
2570 case 0x30:
2571 /* CTPOP */
2572 REQUIRE_AMASK(CIX);
2573 REQUIRE_REG_31(ra);
2574 REQUIRE_NO_LIT;
2575 tcg_gen_ctpop_i64(vc, vb);
2576 break;
2577 case 0x31:
2578 /* PERR */
2579 REQUIRE_AMASK(MVI);
2580 REQUIRE_NO_LIT;
2581 va = load_gpr(ctx, ra);
2582 gen_helper_perr(vc, va, vb);
2583 break;
2584 case 0x32:
2585 /* CTLZ */
2586 REQUIRE_AMASK(CIX);
2587 REQUIRE_REG_31(ra);
2588 REQUIRE_NO_LIT;
2589 tcg_gen_clzi_i64(vc, vb, 64);
2590 break;
2591 case 0x33:
2592 /* CTTZ */
2593 REQUIRE_AMASK(CIX);
2594 REQUIRE_REG_31(ra);
2595 REQUIRE_NO_LIT;
2596 tcg_gen_ctzi_i64(vc, vb, 64);
2597 break;
2598 case 0x34:
2599 /* UNPKBW */
2600 REQUIRE_AMASK(MVI);
2601 REQUIRE_REG_31(ra);
2602 REQUIRE_NO_LIT;
2603 gen_helper_unpkbw(vc, vb);
2604 break;
2605 case 0x35:
2606 /* UNPKBL */
2607 REQUIRE_AMASK(MVI);
2608 REQUIRE_REG_31(ra);
2609 REQUIRE_NO_LIT;
2610 gen_helper_unpkbl(vc, vb);
2611 break;
2612 case 0x36:
2613 /* PKWB */
2614 REQUIRE_AMASK(MVI);
2615 REQUIRE_REG_31(ra);
2616 REQUIRE_NO_LIT;
2617 gen_helper_pkwb(vc, vb);
2618 break;
2619 case 0x37:
2620 /* PKLB */
2621 REQUIRE_AMASK(MVI);
2622 REQUIRE_REG_31(ra);
2623 REQUIRE_NO_LIT;
2624 gen_helper_pklb(vc, vb);
2625 break;
2626 case 0x38:
2627 /* MINSB8 */
2628 REQUIRE_AMASK(MVI);
2629 va = load_gpr(ctx, ra);
2630 gen_helper_minsb8(vc, va, vb);
2631 break;
2632 case 0x39:
2633 /* MINSW4 */
2634 REQUIRE_AMASK(MVI);
2635 va = load_gpr(ctx, ra);
2636 gen_helper_minsw4(vc, va, vb);
2637 break;
2638 case 0x3A:
2639 /* MINUB8 */
2640 REQUIRE_AMASK(MVI);
2641 va = load_gpr(ctx, ra);
2642 gen_helper_minub8(vc, va, vb);
2643 break;
2644 case 0x3B:
2645 /* MINUW4 */
2646 REQUIRE_AMASK(MVI);
2647 va = load_gpr(ctx, ra);
2648 gen_helper_minuw4(vc, va, vb);
2649 break;
2650 case 0x3C:
2651 /* MAXUB8 */
2652 REQUIRE_AMASK(MVI);
2653 va = load_gpr(ctx, ra);
2654 gen_helper_maxub8(vc, va, vb);
2655 break;
2656 case 0x3D:
2657 /* MAXUW4 */
2658 REQUIRE_AMASK(MVI);
2659 va = load_gpr(ctx, ra);
2660 gen_helper_maxuw4(vc, va, vb);
2661 break;
2662 case 0x3E:
2663 /* MAXSB8 */
2664 REQUIRE_AMASK(MVI);
2665 va = load_gpr(ctx, ra);
2666 gen_helper_maxsb8(vc, va, vb);
2667 break;
2668 case 0x3F:
2669 /* MAXSW4 */
2670 REQUIRE_AMASK(MVI);
2671 va = load_gpr(ctx, ra);
2672 gen_helper_maxsw4(vc, va, vb);
2673 break;
2674 default:
2675 goto invalid_opc;
2677 break;
2679 case 0x1D:
2680 /* HW_MTPR (PALcode) */
2681 #ifndef CONFIG_USER_ONLY
2682 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2683 vb = load_gpr(ctx, rb);
2684 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2685 break;
2686 #else
2687 goto invalid_opc;
2688 #endif
2690 case 0x1E:
2691 /* HW_RET (PALcode) */
2692 #ifndef CONFIG_USER_ONLY
2693 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2694 if (rb == 31) {
2695 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2696 address from EXC_ADDR. This turns out to be useful for our
2697 emulation PALcode, so continue to accept it. */
2698 ctx->lit = vb = tcg_temp_new();
2699 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2700 } else {
2701 vb = load_gpr(ctx, rb);
2703 tcg_gen_movi_i64(cpu_lock_addr, -1);
2704 tmp = tcg_temp_new();
2705 tcg_gen_movi_i64(tmp, 0);
2706 st_flag_byte(tmp, ENV_FLAG_RX_SHIFT);
2707 tcg_gen_andi_i64(tmp, vb, 1);
2708 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2709 tcg_temp_free(tmp);
2710 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2711 /* Allow interrupts to be recognized right away. */
2712 ret = DISAS_PC_UPDATED_NOCHAIN;
2713 break;
2714 #else
2715 goto invalid_opc;
2716 #endif
2718 case 0x1F:
2719 /* HW_ST (PALcode) */
2720 #ifndef CONFIG_USER_ONLY
2721 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2723 switch ((insn >> 12) & 0xF) {
2724 case 0x0:
2725 /* Longword physical access */
2726 va = load_gpr(ctx, ra);
2727 vb = load_gpr(ctx, rb);
2728 tmp = tcg_temp_new();
2729 tcg_gen_addi_i64(tmp, vb, disp12);
2730 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2731 tcg_temp_free(tmp);
2732 break;
2733 case 0x1:
2734 /* Quadword physical access */
2735 va = load_gpr(ctx, ra);
2736 vb = load_gpr(ctx, rb);
2737 tmp = tcg_temp_new();
2738 tcg_gen_addi_i64(tmp, vb, disp12);
2739 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2740 tcg_temp_free(tmp);
2741 break;
2742 case 0x2:
2743 /* Longword physical access with lock */
2744 ret = gen_store_conditional(ctx, ra, rb, disp12,
2745 MMU_PHYS_IDX, MO_LESL);
2746 break;
2747 case 0x3:
2748 /* Quadword physical access with lock */
2749 ret = gen_store_conditional(ctx, ra, rb, disp12,
2750 MMU_PHYS_IDX, MO_LEQ);
2751 break;
2752 case 0x4:
2753 /* Longword virtual access */
2754 goto invalid_opc;
2755 case 0x5:
2756 /* Quadword virtual access */
2757 goto invalid_opc;
2758 case 0x6:
2759 /* Invalid */
2760 goto invalid_opc;
2761 case 0x7:
2762 /* Invalid */
2763 goto invalid_opc;
2764 case 0x8:
2765 /* Invalid */
2766 goto invalid_opc;
2767 case 0x9:
2768 /* Invalid */
2769 goto invalid_opc;
2770 case 0xA:
2771 /* Invalid */
2772 goto invalid_opc;
2773 case 0xB:
2774 /* Invalid */
2775 goto invalid_opc;
2776 case 0xC:
2777 /* Longword virtual access with alternate access mode */
2778 goto invalid_opc;
2779 case 0xD:
2780 /* Quadword virtual access with alternate access mode */
2781 goto invalid_opc;
2782 case 0xE:
2783 /* Invalid */
2784 goto invalid_opc;
2785 case 0xF:
2786 /* Invalid */
2787 goto invalid_opc;
2789 break;
2791 #else
2792 goto invalid_opc;
2793 #endif
2794 case 0x20:
2795 /* LDF */
2796 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2797 break;
2798 case 0x21:
2799 /* LDG */
2800 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2801 break;
2802 case 0x22:
2803 /* LDS */
2804 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2805 break;
2806 case 0x23:
2807 /* LDT */
2808 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2809 break;
2810 case 0x24:
2811 /* STF */
2812 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2813 break;
2814 case 0x25:
2815 /* STG */
2816 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2817 break;
2818 case 0x26:
2819 /* STS */
2820 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2821 break;
2822 case 0x27:
2823 /* STT */
2824 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2825 break;
2826 case 0x28:
2827 /* LDL */
2828 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2829 break;
2830 case 0x29:
2831 /* LDQ */
2832 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2833 break;
2834 case 0x2A:
2835 /* LDL_L */
2836 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2837 break;
2838 case 0x2B:
2839 /* LDQ_L */
2840 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2841 break;
2842 case 0x2C:
2843 /* STL */
2844 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2845 break;
2846 case 0x2D:
2847 /* STQ */
2848 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2849 break;
2850 case 0x2E:
2851 /* STL_C */
2852 ret = gen_store_conditional(ctx, ra, rb, disp16,
2853 ctx->mem_idx, MO_LESL);
2854 break;
2855 case 0x2F:
2856 /* STQ_C */
2857 ret = gen_store_conditional(ctx, ra, rb, disp16,
2858 ctx->mem_idx, MO_LEQ);
2859 break;
2860 case 0x30:
2861 /* BR */
2862 ret = gen_bdirect(ctx, ra, disp21);
2863 break;
2864 case 0x31: /* FBEQ */
2865 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2866 break;
2867 case 0x32: /* FBLT */
2868 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2869 break;
2870 case 0x33: /* FBLE */
2871 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2872 break;
2873 case 0x34:
2874 /* BSR */
2875 ret = gen_bdirect(ctx, ra, disp21);
2876 break;
2877 case 0x35: /* FBNE */
2878 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2879 break;
2880 case 0x36: /* FBGE */
2881 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2882 break;
2883 case 0x37: /* FBGT */
2884 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2885 break;
2886 case 0x38:
2887 /* BLBC */
2888 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2889 break;
2890 case 0x39:
2891 /* BEQ */
2892 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2893 break;
2894 case 0x3A:
2895 /* BLT */
2896 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2897 break;
2898 case 0x3B:
2899 /* BLE */
2900 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2901 break;
2902 case 0x3C:
2903 /* BLBS */
2904 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2905 break;
2906 case 0x3D:
2907 /* BNE */
2908 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2909 break;
2910 case 0x3E:
2911 /* BGE */
2912 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2913 break;
2914 case 0x3F:
2915 /* BGT */
2916 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2917 break;
2918 invalid_opc:
2919 ret = gen_invalid(ctx);
2920 break;
2923 return ret;
2926 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2928 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2929 CPUAlphaState *env = cpu->env_ptr;
2930 int64_t bound, mask;
2932 ctx->tbflags = ctx->base.tb->flags;
2933 ctx->mem_idx = cpu_mmu_index(env, false);
2934 ctx->implver = env->implver;
2935 ctx->amask = env->amask;
2937 #ifdef CONFIG_USER_ONLY
2938 ctx->ir = cpu_std_ir;
2939 #else
2940 ctx->palbr = env->palbr;
2941 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2942 #endif
2944 /* ??? Every TB begins with unset rounding mode, to be initialized on
2945 the first fp insn of the TB. Alternately we could define a proper
2946 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2947 to reset the FP_STATUS to that default at the end of any TB that
2948 changes the default. We could even (gasp) dynamiclly figure out
2949 what default would be most efficient given the running program. */
2950 ctx->tb_rm = -1;
2951 /* Similarly for flush-to-zero. */
2952 ctx->tb_ftz = -1;
2954 ctx->zero = NULL;
2955 ctx->sink = NULL;
2956 ctx->lit = NULL;
2958 /* Bound the number of insns to execute to those left on the page. */
2959 if (in_superpage(ctx, ctx->base.pc_first)) {
2960 mask = -1ULL << 41;
2961 } else {
2962 mask = TARGET_PAGE_MASK;
2964 bound = -(ctx->base.pc_first | mask) / 4;
2965 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2968 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2972 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2974 tcg_gen_insn_start(dcbase->pc_next);
2977 static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
2978 const CPUBreakpoint *bp)
2980 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2982 ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0);
2984 /* The address covered by the breakpoint must be included in
2985 [tb->pc, tb->pc + tb->size) in order to for it to be
2986 properly cleared -- thus we increment the PC here so that
2987 the logic setting tb->size below does the right thing. */
2988 ctx->base.pc_next += 4;
2989 return true;
2992 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2994 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2995 CPUAlphaState *env = cpu->env_ptr;
2996 uint32_t insn = translator_ldl(env, ctx->base.pc_next);
2998 ctx->base.pc_next += 4;
2999 ctx->base.is_jmp = translate_one(ctx, insn);
3001 free_context_temps(ctx);
3002 translator_loop_temp_check(&ctx->base);
3005 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3007 DisasContext *ctx = container_of(dcbase, DisasContext, base);
3009 switch (ctx->base.is_jmp) {
3010 case DISAS_NORETURN:
3011 break;
3012 case DISAS_TOO_MANY:
3013 if (use_goto_tb(ctx, ctx->base.pc_next)) {
3014 tcg_gen_goto_tb(0);
3015 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3016 tcg_gen_exit_tb(ctx->base.tb, 0);
3018 /* FALLTHRU */
3019 case DISAS_PC_STALE:
3020 tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3021 /* FALLTHRU */
3022 case DISAS_PC_UPDATED:
3023 if (!use_exit_tb(ctx)) {
3024 tcg_gen_lookup_and_goto_ptr();
3025 break;
3027 /* FALLTHRU */
3028 case DISAS_PC_UPDATED_NOCHAIN:
3029 if (ctx->base.singlestep_enabled) {
3030 gen_excp_1(EXCP_DEBUG, 0);
3031 } else {
3032 tcg_gen_exit_tb(NULL, 0);
3034 break;
3035 default:
3036 g_assert_not_reached();
3040 static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
3042 qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
3043 log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
3046 static const TranslatorOps alpha_tr_ops = {
3047 .init_disas_context = alpha_tr_init_disas_context,
3048 .tb_start = alpha_tr_tb_start,
3049 .insn_start = alpha_tr_insn_start,
3050 .breakpoint_check = alpha_tr_breakpoint_check,
3051 .translate_insn = alpha_tr_translate_insn,
3052 .tb_stop = alpha_tr_tb_stop,
3053 .disas_log = alpha_tr_disas_log,
3056 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
3058 DisasContext dc;
3059 translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
3062 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3063 target_ulong *data)
3065 env->pc = data[0];