Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging
[qemu.git] / target / alpha / translate.c
blob055286a7b8ddb31f9372a381b0a1570eb2283a5b
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 struct TranslationBlock *tb;
47 uint64_t pc;
48 #ifndef CONFIG_USER_ONLY
49 uint64_t palbr;
50 #endif
51 int mem_idx;
53 /* Current rounding mode for this TB. */
54 int tb_rm;
55 /* Current flush-to-zero setting for this TB. */
56 int tb_ftz;
58 /* implver value for this CPU. */
59 int implver;
61 /* The set of registers active in the current context. */
62 TCGv *ir;
64 /* Temporaries for $31 and $f31 as source and destination. */
65 TCGv zero;
66 TCGv sink;
67 /* Temporary for immediate constants. */
68 TCGv lit;
70 bool singlestep_enabled;
73 /* Return values from translate_one, indicating the state of the TB.
74 Note that zero indicates that we are not exiting the TB. */
76 typedef enum {
77 NO_EXIT,
79 /* We have emitted one or more goto_tb. No fixup required. */
80 EXIT_GOTO_TB,
82 /* We are not using a goto_tb (for whatever reason), but have updated
83 the PC (for whatever reason), so there's no need to do it again on
84 exiting the TB. */
85 EXIT_PC_UPDATED,
87 /* We are exiting the TB, but have neither emitted a goto_tb, nor
88 updated the PC for the next instruction to be executed. */
89 EXIT_PC_STALE,
91 /* We are ending the TB with a noreturn function call, e.g. longjmp.
92 No following code will be executed. */
93 EXIT_NORETURN,
94 } ExitStatus;
96 /* global register indexes */
97 static TCGv_env cpu_env;
98 static TCGv cpu_std_ir[31];
99 static TCGv cpu_fir[31];
100 static TCGv cpu_pc;
101 static TCGv cpu_lock_addr;
102 static TCGv cpu_lock_value;
104 #ifndef CONFIG_USER_ONLY
105 static TCGv cpu_pal_ir[31];
106 #endif
108 #include "exec/gen-icount.h"
110 void alpha_translate_init(void)
112 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
114 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
115 static const GlobalVar vars[] = {
116 DEF_VAR(pc),
117 DEF_VAR(lock_addr),
118 DEF_VAR(lock_value),
121 #undef DEF_VAR
123 /* Use the symbolic register names that match the disassembler. */
124 static const char greg_names[31][4] = {
125 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
126 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
127 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
128 "t10", "t11", "ra", "t12", "at", "gp", "sp"
130 static const char freg_names[31][4] = {
131 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
132 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
133 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
134 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
136 #ifndef CONFIG_USER_ONLY
137 static const char shadow_names[8][8] = {
138 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
139 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
141 #endif
143 static bool done_init = 0;
144 int i;
146 if (done_init) {
147 return;
149 done_init = 1;
151 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
152 tcg_ctx.tcg_env = cpu_env;
154 for (i = 0; i < 31; i++) {
155 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
156 offsetof(CPUAlphaState, ir[i]),
157 greg_names[i]);
160 for (i = 0; i < 31; i++) {
161 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
162 offsetof(CPUAlphaState, fir[i]),
163 freg_names[i]);
166 #ifndef CONFIG_USER_ONLY
167 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
168 for (i = 0; i < 8; i++) {
169 int r = (i == 7 ? 25 : i + 8);
170 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
171 offsetof(CPUAlphaState,
172 shadow[i]),
173 shadow_names[i]);
175 #endif
177 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
178 const GlobalVar *v = &vars[i];
179 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
183 static TCGv load_zero(DisasContext *ctx)
185 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
186 ctx->zero = tcg_const_i64(0);
188 return ctx->zero;
191 static TCGv dest_sink(DisasContext *ctx)
193 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
194 ctx->sink = tcg_temp_new();
196 return ctx->sink;
199 static void free_context_temps(DisasContext *ctx)
201 if (!TCGV_IS_UNUSED_I64(ctx->sink)) {
202 tcg_gen_discard_i64(ctx->sink);
203 tcg_temp_free(ctx->sink);
204 TCGV_UNUSED_I64(ctx->sink);
206 if (!TCGV_IS_UNUSED_I64(ctx->zero)) {
207 tcg_temp_free(ctx->zero);
208 TCGV_UNUSED_I64(ctx->zero);
210 if (!TCGV_IS_UNUSED_I64(ctx->lit)) {
211 tcg_temp_free(ctx->lit);
212 TCGV_UNUSED_I64(ctx->lit);
216 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
218 if (likely(reg < 31)) {
219 return ctx->ir[reg];
220 } else {
221 return load_zero(ctx);
225 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
226 uint8_t lit, bool islit)
228 if (islit) {
229 ctx->lit = tcg_const_i64(lit);
230 return ctx->lit;
231 } else if (likely(reg < 31)) {
232 return ctx->ir[reg];
233 } else {
234 return load_zero(ctx);
238 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
240 if (likely(reg < 31)) {
241 return ctx->ir[reg];
242 } else {
243 return dest_sink(ctx);
247 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
249 if (likely(reg < 31)) {
250 return cpu_fir[reg];
251 } else {
252 return load_zero(ctx);
256 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
258 if (likely(reg < 31)) {
259 return cpu_fir[reg];
260 } else {
261 return dest_sink(ctx);
265 static void gen_excp_1(int exception, int error_code)
267 TCGv_i32 tmp1, tmp2;
269 tmp1 = tcg_const_i32(exception);
270 tmp2 = tcg_const_i32(error_code);
271 gen_helper_excp(cpu_env, tmp1, tmp2);
272 tcg_temp_free_i32(tmp2);
273 tcg_temp_free_i32(tmp1);
276 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
278 tcg_gen_movi_i64(cpu_pc, ctx->pc);
279 gen_excp_1(exception, error_code);
280 return EXIT_NORETURN;
283 static inline ExitStatus gen_invalid(DisasContext *ctx)
285 return gen_excp(ctx, EXCP_OPCDEC, 0);
288 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
290 TCGv_i32 tmp32 = tcg_temp_new_i32();
291 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
292 gen_helper_memory_to_f(t0, tmp32);
293 tcg_temp_free_i32(tmp32);
296 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
298 TCGv tmp = tcg_temp_new();
299 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
300 gen_helper_memory_to_g(t0, tmp);
301 tcg_temp_free(tmp);
304 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
306 TCGv_i32 tmp32 = tcg_temp_new_i32();
307 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
308 gen_helper_memory_to_s(t0, tmp32);
309 tcg_temp_free_i32(tmp32);
312 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
314 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
315 tcg_gen_mov_i64(cpu_lock_addr, t1);
316 tcg_gen_mov_i64(cpu_lock_value, t0);
319 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
321 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
322 tcg_gen_mov_i64(cpu_lock_addr, t1);
323 tcg_gen_mov_i64(cpu_lock_value, t0);
326 static inline void gen_load_mem(DisasContext *ctx,
327 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
328 int flags),
329 int ra, int rb, int32_t disp16, bool fp,
330 bool clear)
332 TCGv tmp, addr, va;
334 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
335 prefetches, which we can treat as nops. No worries about
336 missed exceptions here. */
337 if (unlikely(ra == 31)) {
338 return;
341 tmp = tcg_temp_new();
342 addr = load_gpr(ctx, rb);
344 if (disp16) {
345 tcg_gen_addi_i64(tmp, addr, disp16);
346 addr = tmp;
348 if (clear) {
349 tcg_gen_andi_i64(tmp, addr, ~0x7);
350 addr = tmp;
353 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
354 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
356 tcg_temp_free(tmp);
359 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
361 TCGv_i32 tmp32 = tcg_temp_new_i32();
362 gen_helper_f_to_memory(tmp32, t0);
363 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
364 tcg_temp_free_i32(tmp32);
367 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
369 TCGv tmp = tcg_temp_new();
370 gen_helper_g_to_memory(tmp, t0);
371 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
372 tcg_temp_free(tmp);
375 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
377 TCGv_i32 tmp32 = tcg_temp_new_i32();
378 gen_helper_s_to_memory(tmp32, t0);
379 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
380 tcg_temp_free_i32(tmp32);
383 static inline void gen_store_mem(DisasContext *ctx,
384 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
385 int flags),
386 int ra, int rb, int32_t disp16, bool fp,
387 bool clear)
389 TCGv tmp, addr, va;
391 tmp = tcg_temp_new();
392 addr = load_gpr(ctx, rb);
394 if (disp16) {
395 tcg_gen_addi_i64(tmp, addr, disp16);
396 addr = tmp;
398 if (clear) {
399 tcg_gen_andi_i64(tmp, addr, ~0x7);
400 addr = tmp;
403 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
404 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
406 tcg_temp_free(tmp);
409 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
410 int32_t disp16, int mem_idx,
411 TCGMemOp op)
413 TCGLabel *lab_fail, *lab_done;
414 TCGv addr, val;
416 addr = tcg_temp_new_i64();
417 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
418 free_context_temps(ctx);
420 lab_fail = gen_new_label();
421 lab_done = gen_new_label();
422 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
423 tcg_temp_free_i64(addr);
425 val = tcg_temp_new_i64();
426 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
427 load_gpr(ctx, ra), mem_idx, op);
428 free_context_temps(ctx);
430 if (ra != 31) {
431 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
433 tcg_temp_free_i64(val);
434 tcg_gen_br(lab_done);
436 gen_set_label(lab_fail);
437 if (ra != 31) {
438 tcg_gen_movi_i64(ctx->ir[ra], 0);
441 gen_set_label(lab_done);
442 tcg_gen_movi_i64(cpu_lock_addr, -1);
443 return NO_EXIT;
446 static bool in_superpage(DisasContext *ctx, int64_t addr)
448 #ifndef CONFIG_USER_ONLY
449 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
450 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
451 && ((addr >> 41) & 3) == 2);
452 #else
453 return false;
454 #endif
457 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
459 /* Suppress goto_tb in the case of single-steping and IO. */
460 if ((ctx->tb->cflags & CF_LAST_IO)
461 || ctx->singlestep_enabled || singlestep) {
462 return false;
464 #ifndef CONFIG_USER_ONLY
465 /* If the destination is in the superpage, the page perms can't change. */
466 if (in_superpage(ctx, dest)) {
467 return true;
469 /* Check for the dest on the same page as the start of the TB. */
470 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
471 #else
472 return true;
473 #endif
476 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
478 uint64_t dest = ctx->pc + (disp << 2);
480 if (ra != 31) {
481 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
484 /* Notice branch-to-next; used to initialize RA with the PC. */
485 if (disp == 0) {
486 return 0;
487 } else if (use_goto_tb(ctx, dest)) {
488 tcg_gen_goto_tb(0);
489 tcg_gen_movi_i64(cpu_pc, dest);
490 tcg_gen_exit_tb((uintptr_t)ctx->tb);
491 return EXIT_GOTO_TB;
492 } else {
493 tcg_gen_movi_i64(cpu_pc, dest);
494 return EXIT_PC_UPDATED;
498 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
499 TCGv cmp, int32_t disp)
501 uint64_t dest = ctx->pc + (disp << 2);
502 TCGLabel *lab_true = gen_new_label();
504 if (use_goto_tb(ctx, dest)) {
505 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
507 tcg_gen_goto_tb(0);
508 tcg_gen_movi_i64(cpu_pc, ctx->pc);
509 tcg_gen_exit_tb((uintptr_t)ctx->tb);
511 gen_set_label(lab_true);
512 tcg_gen_goto_tb(1);
513 tcg_gen_movi_i64(cpu_pc, dest);
514 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
516 return EXIT_GOTO_TB;
517 } else {
518 TCGv_i64 z = tcg_const_i64(0);
519 TCGv_i64 d = tcg_const_i64(dest);
520 TCGv_i64 p = tcg_const_i64(ctx->pc);
522 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
524 tcg_temp_free_i64(z);
525 tcg_temp_free_i64(d);
526 tcg_temp_free_i64(p);
527 return EXIT_PC_UPDATED;
531 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
532 int32_t disp, int mask)
534 TCGv cmp_tmp;
536 if (mask) {
537 cmp_tmp = tcg_temp_new();
538 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
539 } else {
540 cmp_tmp = load_gpr(ctx, ra);
543 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
546 /* Fold -0.0 for comparison with COND. */
548 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
550 uint64_t mzero = 1ull << 63;
552 switch (cond) {
553 case TCG_COND_LE:
554 case TCG_COND_GT:
555 /* For <= or >, the -0.0 value directly compares the way we want. */
556 tcg_gen_mov_i64(dest, src);
557 break;
559 case TCG_COND_EQ:
560 case TCG_COND_NE:
561 /* For == or !=, we can simply mask off the sign bit and compare. */
562 tcg_gen_andi_i64(dest, src, mzero - 1);
563 break;
565 case TCG_COND_GE:
566 case TCG_COND_LT:
567 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
568 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
569 tcg_gen_neg_i64(dest, dest);
570 tcg_gen_and_i64(dest, dest, src);
571 break;
573 default:
574 abort();
578 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
579 int32_t disp)
581 TCGv cmp_tmp = tcg_temp_new();
582 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
583 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
586 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
588 TCGv_i64 va, vb, z;
590 z = load_zero(ctx);
591 vb = load_fpr(ctx, rb);
592 va = tcg_temp_new();
593 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
595 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
597 tcg_temp_free(va);
600 #define QUAL_RM_N 0x080 /* Round mode nearest even */
601 #define QUAL_RM_C 0x000 /* Round mode chopped */
602 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
603 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
604 #define QUAL_RM_MASK 0x0c0
606 #define QUAL_U 0x100 /* Underflow enable (fp output) */
607 #define QUAL_V 0x100 /* Overflow enable (int output) */
608 #define QUAL_S 0x400 /* Software completion enable */
609 #define QUAL_I 0x200 /* Inexact detection enable */
611 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
613 TCGv_i32 tmp;
615 fn11 &= QUAL_RM_MASK;
616 if (fn11 == ctx->tb_rm) {
617 return;
619 ctx->tb_rm = fn11;
621 tmp = tcg_temp_new_i32();
622 switch (fn11) {
623 case QUAL_RM_N:
624 tcg_gen_movi_i32(tmp, float_round_nearest_even);
625 break;
626 case QUAL_RM_C:
627 tcg_gen_movi_i32(tmp, float_round_to_zero);
628 break;
629 case QUAL_RM_M:
630 tcg_gen_movi_i32(tmp, float_round_down);
631 break;
632 case QUAL_RM_D:
633 tcg_gen_ld8u_i32(tmp, cpu_env,
634 offsetof(CPUAlphaState, fpcr_dyn_round));
635 break;
638 #if defined(CONFIG_SOFTFLOAT_INLINE)
639 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
640 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
641 sets the one field. */
642 tcg_gen_st8_i32(tmp, cpu_env,
643 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
644 #else
645 gen_helper_setroundmode(tmp);
646 #endif
648 tcg_temp_free_i32(tmp);
651 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
653 TCGv_i32 tmp;
655 fn11 &= QUAL_U;
656 if (fn11 == ctx->tb_ftz) {
657 return;
659 ctx->tb_ftz = fn11;
661 tmp = tcg_temp_new_i32();
662 if (fn11) {
663 /* Underflow is enabled, use the FPCR setting. */
664 tcg_gen_ld8u_i32(tmp, cpu_env,
665 offsetof(CPUAlphaState, fpcr_flush_to_zero));
666 } else {
667 /* Underflow is disabled, force flush-to-zero. */
668 tcg_gen_movi_i32(tmp, 1);
671 #if defined(CONFIG_SOFTFLOAT_INLINE)
672 tcg_gen_st8_i32(tmp, cpu_env,
673 offsetof(CPUAlphaState, fp_status.flush_to_zero));
674 #else
675 gen_helper_setflushzero(tmp);
676 #endif
678 tcg_temp_free_i32(tmp);
681 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
683 TCGv val;
685 if (unlikely(reg == 31)) {
686 val = load_zero(ctx);
687 } else {
688 val = cpu_fir[reg];
689 if ((fn11 & QUAL_S) == 0) {
690 if (is_cmp) {
691 gen_helper_ieee_input_cmp(cpu_env, val);
692 } else {
693 gen_helper_ieee_input(cpu_env, val);
695 } else {
696 #ifndef CONFIG_USER_ONLY
697 /* In system mode, raise exceptions for denormals like real
698 hardware. In user mode, proceed as if the OS completion
699 handler is handling the denormal as per spec. */
700 gen_helper_ieee_input_s(cpu_env, val);
701 #endif
704 return val;
707 static void gen_fp_exc_raise(int rc, int fn11)
709 /* ??? We ought to be able to do something with imprecise exceptions.
710 E.g. notice we're still in the trap shadow of something within the
711 TB and do not generate the code to signal the exception; end the TB
712 when an exception is forced to arrive, either by consumption of a
713 register value or TRAPB or EXCB. */
714 TCGv_i32 reg, ign;
715 uint32_t ignore = 0;
717 if (!(fn11 & QUAL_U)) {
718 /* Note that QUAL_U == QUAL_V, so ignore either. */
719 ignore |= FPCR_UNF | FPCR_IOV;
721 if (!(fn11 & QUAL_I)) {
722 ignore |= FPCR_INE;
724 ign = tcg_const_i32(ignore);
726 /* ??? Pass in the regno of the destination so that the helper can
727 set EXC_MASK, which contains a bitmask of destination registers
728 that have caused arithmetic traps. A simple userspace emulation
729 does not require this. We do need it for a guest kernel's entArith,
730 or if we were to do something clever with imprecise exceptions. */
731 reg = tcg_const_i32(rc + 32);
732 if (fn11 & QUAL_S) {
733 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
734 } else {
735 gen_helper_fp_exc_raise(cpu_env, ign, reg);
738 tcg_temp_free_i32(reg);
739 tcg_temp_free_i32(ign);
742 static void gen_cvtlq(TCGv vc, TCGv vb)
744 TCGv tmp = tcg_temp_new();
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp, vb, 32);
749 tcg_gen_shri_i64(vc, vb, 29);
750 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
752 tcg_gen_or_i64(vc, vc, tmp);
754 tcg_temp_free(tmp);
757 static void gen_ieee_arith2(DisasContext *ctx,
758 void (*helper)(TCGv, TCGv_ptr, TCGv),
759 int rb, int rc, int fn11)
761 TCGv vb;
763 gen_qual_roundmode(ctx, fn11);
764 gen_qual_flushzero(ctx, fn11);
766 vb = gen_ieee_input(ctx, rb, fn11, 0);
767 helper(dest_fpr(ctx, rc), cpu_env, vb);
769 gen_fp_exc_raise(rc, fn11);
772 #define IEEE_ARITH2(name) \
773 static inline void glue(gen_, name)(DisasContext *ctx, \
774 int rb, int rc, int fn11) \
776 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
778 IEEE_ARITH2(sqrts)
779 IEEE_ARITH2(sqrtt)
780 IEEE_ARITH2(cvtst)
781 IEEE_ARITH2(cvtts)
783 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
785 TCGv vb, vc;
787 /* No need to set flushzero, since we have an integer output. */
788 vb = gen_ieee_input(ctx, rb, fn11, 0);
789 vc = dest_fpr(ctx, rc);
791 /* Almost all integer conversions use cropped rounding;
792 special case that. */
793 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
794 gen_helper_cvttq_c(vc, cpu_env, vb);
795 } else {
796 gen_qual_roundmode(ctx, fn11);
797 gen_helper_cvttq(vc, cpu_env, vb);
799 gen_fp_exc_raise(rc, fn11);
802 static void gen_ieee_intcvt(DisasContext *ctx,
803 void (*helper)(TCGv, TCGv_ptr, TCGv),
804 int rb, int rc, int fn11)
806 TCGv vb, vc;
808 gen_qual_roundmode(ctx, fn11);
809 vb = load_fpr(ctx, rb);
810 vc = dest_fpr(ctx, rc);
812 /* The only exception that can be raised by integer conversion
813 is inexact. Thus we only need to worry about exceptions when
814 inexact handling is requested. */
815 if (fn11 & QUAL_I) {
816 helper(vc, cpu_env, vb);
817 gen_fp_exc_raise(rc, fn11);
818 } else {
819 helper(vc, cpu_env, vb);
823 #define IEEE_INTCVT(name) \
824 static inline void glue(gen_, name)(DisasContext *ctx, \
825 int rb, int rc, int fn11) \
827 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
829 IEEE_INTCVT(cvtqs)
830 IEEE_INTCVT(cvtqt)
832 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
834 TCGv vmask = tcg_const_i64(mask);
835 TCGv tmp = tcg_temp_new_i64();
837 if (inv_a) {
838 tcg_gen_andc_i64(tmp, vmask, va);
839 } else {
840 tcg_gen_and_i64(tmp, va, vmask);
843 tcg_gen_andc_i64(vc, vb, vmask);
844 tcg_gen_or_i64(vc, vc, tmp);
846 tcg_temp_free(vmask);
847 tcg_temp_free(tmp);
850 static void gen_ieee_arith3(DisasContext *ctx,
851 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
852 int ra, int rb, int rc, int fn11)
854 TCGv va, vb, vc;
856 gen_qual_roundmode(ctx, fn11);
857 gen_qual_flushzero(ctx, fn11);
859 va = gen_ieee_input(ctx, ra, fn11, 0);
860 vb = gen_ieee_input(ctx, rb, fn11, 0);
861 vc = dest_fpr(ctx, rc);
862 helper(vc, cpu_env, va, vb);
864 gen_fp_exc_raise(rc, fn11);
867 #define IEEE_ARITH3(name) \
868 static inline void glue(gen_, name)(DisasContext *ctx, \
869 int ra, int rb, int rc, int fn11) \
871 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
873 IEEE_ARITH3(adds)
874 IEEE_ARITH3(subs)
875 IEEE_ARITH3(muls)
876 IEEE_ARITH3(divs)
877 IEEE_ARITH3(addt)
878 IEEE_ARITH3(subt)
879 IEEE_ARITH3(mult)
880 IEEE_ARITH3(divt)
882 static void gen_ieee_compare(DisasContext *ctx,
883 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
884 int ra, int rb, int rc, int fn11)
886 TCGv va, vb, vc;
888 va = gen_ieee_input(ctx, ra, fn11, 1);
889 vb = gen_ieee_input(ctx, rb, fn11, 1);
890 vc = dest_fpr(ctx, rc);
891 helper(vc, cpu_env, va, vb);
893 gen_fp_exc_raise(rc, fn11);
896 #define IEEE_CMP3(name) \
897 static inline void glue(gen_, name)(DisasContext *ctx, \
898 int ra, int rb, int rc, int fn11) \
900 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
902 IEEE_CMP3(cmptun)
903 IEEE_CMP3(cmpteq)
904 IEEE_CMP3(cmptlt)
905 IEEE_CMP3(cmptle)
907 static inline uint64_t zapnot_mask(uint8_t lit)
909 uint64_t mask = 0;
910 int i;
912 for (i = 0; i < 8; ++i) {
913 if ((lit >> i) & 1) {
914 mask |= 0xffull << (i * 8);
917 return mask;
920 /* Implement zapnot with an immediate operand, which expands to some
921 form of immediate AND. This is a basic building block in the
922 definition of many of the other byte manipulation instructions. */
923 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
925 switch (lit) {
926 case 0x00:
927 tcg_gen_movi_i64(dest, 0);
928 break;
929 case 0x01:
930 tcg_gen_ext8u_i64(dest, src);
931 break;
932 case 0x03:
933 tcg_gen_ext16u_i64(dest, src);
934 break;
935 case 0x0f:
936 tcg_gen_ext32u_i64(dest, src);
937 break;
938 case 0xff:
939 tcg_gen_mov_i64(dest, src);
940 break;
941 default:
942 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
943 break;
947 /* EXTWH, EXTLH, EXTQH */
948 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
949 uint8_t lit, uint8_t byte_mask)
951 if (islit) {
952 int pos = (64 - lit * 8) & 0x3f;
953 int len = cto32(byte_mask) * 8;
954 if (pos < len) {
955 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
956 } else {
957 tcg_gen_movi_i64(vc, 0);
959 } else {
960 TCGv tmp = tcg_temp_new();
961 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
962 tcg_gen_neg_i64(tmp, tmp);
963 tcg_gen_andi_i64(tmp, tmp, 0x3f);
964 tcg_gen_shl_i64(vc, va, tmp);
965 tcg_temp_free(tmp);
967 gen_zapnoti(vc, vc, byte_mask);
970 /* EXTBL, EXTWL, EXTLL, EXTQL */
971 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
972 uint8_t lit, uint8_t byte_mask)
974 if (islit) {
975 int pos = (lit & 7) * 8;
976 int len = cto32(byte_mask) * 8;
977 if (pos + len >= 64) {
978 len = 64 - pos;
980 tcg_gen_extract_i64(vc, va, pos, len);
981 } else {
982 TCGv tmp = tcg_temp_new();
983 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
984 tcg_gen_shli_i64(tmp, tmp, 3);
985 tcg_gen_shr_i64(vc, va, tmp);
986 tcg_temp_free(tmp);
987 gen_zapnoti(vc, vc, byte_mask);
991 /* INSWH, INSLH, INSQH */
992 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
993 uint8_t lit, uint8_t byte_mask)
995 if (islit) {
996 int pos = 64 - (lit & 7) * 8;
997 int len = cto32(byte_mask) * 8;
998 if (pos < len) {
999 tcg_gen_extract_i64(vc, va, pos, len - pos);
1000 } else {
1001 tcg_gen_movi_i64(vc, 0);
1003 } else {
1004 TCGv tmp = tcg_temp_new();
1005 TCGv shift = tcg_temp_new();
1007 /* The instruction description has us left-shift the byte mask
1008 and extract bits <15:8> and apply that zap at the end. This
1009 is equivalent to simply performing the zap first and shifting
1010 afterward. */
1011 gen_zapnoti(tmp, va, byte_mask);
1013 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1014 portably by splitting the shift into two parts: shift_count-1 and 1.
1015 Arrange for the -1 by using ones-complement instead of
1016 twos-complement in the negation: ~(B * 8) & 63. */
1018 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1019 tcg_gen_not_i64(shift, shift);
1020 tcg_gen_andi_i64(shift, shift, 0x3f);
1022 tcg_gen_shr_i64(vc, tmp, shift);
1023 tcg_gen_shri_i64(vc, vc, 1);
1024 tcg_temp_free(shift);
1025 tcg_temp_free(tmp);
1029 /* INSBL, INSWL, INSLL, INSQL */
1030 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1031 uint8_t lit, uint8_t byte_mask)
1033 if (islit) {
1034 int pos = (lit & 7) * 8;
1035 int len = cto32(byte_mask) * 8;
1036 if (pos + len > 64) {
1037 len = 64 - pos;
1039 tcg_gen_deposit_z_i64(vc, va, pos, len);
1040 } else {
1041 TCGv tmp = tcg_temp_new();
1042 TCGv shift = tcg_temp_new();
1044 /* The instruction description has us left-shift the byte mask
1045 and extract bits <15:8> and apply that zap at the end. This
1046 is equivalent to simply performing the zap first and shifting
1047 afterward. */
1048 gen_zapnoti(tmp, va, byte_mask);
1050 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1051 tcg_gen_shli_i64(shift, shift, 3);
1052 tcg_gen_shl_i64(vc, tmp, shift);
1053 tcg_temp_free(shift);
1054 tcg_temp_free(tmp);
1058 /* MSKWH, MSKLH, MSKQH */
1059 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1060 uint8_t lit, uint8_t byte_mask)
1062 if (islit) {
1063 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1064 } else {
1065 TCGv shift = tcg_temp_new();
1066 TCGv mask = tcg_temp_new();
1068 /* The instruction description is as above, where the byte_mask
1069 is shifted left, and then we extract bits <15:8>. This can be
1070 emulated with a right-shift on the expanded byte mask. This
1071 requires extra care because for an input <2:0> == 0 we need a
1072 shift of 64 bits in order to generate a zero. This is done by
1073 splitting the shift into two parts, the variable shift - 1
1074 followed by a constant 1 shift. The code we expand below is
1075 equivalent to ~(B * 8) & 63. */
1077 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1078 tcg_gen_not_i64(shift, shift);
1079 tcg_gen_andi_i64(shift, shift, 0x3f);
1080 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1081 tcg_gen_shr_i64(mask, mask, shift);
1082 tcg_gen_shri_i64(mask, mask, 1);
1084 tcg_gen_andc_i64(vc, va, mask);
1086 tcg_temp_free(mask);
1087 tcg_temp_free(shift);
1091 /* MSKBL, MSKWL, MSKLL, MSKQL */
1092 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1093 uint8_t lit, uint8_t byte_mask)
1095 if (islit) {
1096 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1097 } else {
1098 TCGv shift = tcg_temp_new();
1099 TCGv mask = tcg_temp_new();
1101 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1102 tcg_gen_shli_i64(shift, shift, 3);
1103 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1104 tcg_gen_shl_i64(mask, mask, shift);
1106 tcg_gen_andc_i64(vc, va, mask);
1108 tcg_temp_free(mask);
1109 tcg_temp_free(shift);
1113 static void gen_rx(DisasContext *ctx, int ra, int set)
1115 TCGv_i32 tmp;
1117 if (ra != 31) {
1118 tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
1119 offsetof(CPUAlphaState, intr_flag));
1122 tmp = tcg_const_i32(set);
1123 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1124 tcg_temp_free_i32(tmp);
1127 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1129 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1130 to internal cpu registers. */
1132 /* Unprivileged PAL call */
1133 if (palcode >= 0x80 && palcode < 0xC0) {
1134 switch (palcode) {
1135 case 0x86:
1136 /* IMB */
1137 /* No-op inside QEMU. */
1138 break;
1139 case 0x9E:
1140 /* RDUNIQUE */
1141 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1142 offsetof(CPUAlphaState, unique));
1143 break;
1144 case 0x9F:
1145 /* WRUNIQUE */
1146 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1147 offsetof(CPUAlphaState, unique));
1148 break;
1149 default:
1150 palcode &= 0xbf;
1151 goto do_call_pal;
1153 return NO_EXIT;
1156 #ifndef CONFIG_USER_ONLY
1157 /* Privileged PAL code */
1158 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1159 switch (palcode) {
1160 case 0x01:
1161 /* CFLUSH */
1162 /* No-op inside QEMU. */
1163 break;
1164 case 0x02:
1165 /* DRAINA */
1166 /* No-op inside QEMU. */
1167 break;
1168 case 0x2D:
1169 /* WRVPTPTR */
1170 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1171 offsetof(CPUAlphaState, vptptr));
1172 break;
1173 case 0x31:
1174 /* WRVAL */
1175 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1176 offsetof(CPUAlphaState, sysval));
1177 break;
1178 case 0x32:
1179 /* RDVAL */
1180 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1181 offsetof(CPUAlphaState, sysval));
1182 break;
1184 case 0x35: {
1185 /* SWPIPL */
1186 TCGv tmp;
1188 /* Note that we already know we're in kernel mode, so we know
1189 that PS only contains the 3 IPL bits. */
1190 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1191 offsetof(CPUAlphaState, ps));
1193 /* But make sure and store only the 3 IPL bits from the user. */
1194 tmp = tcg_temp_new();
1195 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1196 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1197 tcg_temp_free(tmp);
1198 break;
1201 case 0x36:
1202 /* RDPS */
1203 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1204 offsetof(CPUAlphaState, ps));
1205 break;
1206 case 0x38:
1207 /* WRUSP */
1208 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1209 offsetof(CPUAlphaState, usp));
1210 break;
1211 case 0x3A:
1212 /* RDUSP */
1213 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1214 offsetof(CPUAlphaState, usp));
1215 break;
1216 case 0x3C:
1217 /* WHAMI */
1218 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1219 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1220 break;
1222 default:
1223 palcode &= 0x3f;
1224 goto do_call_pal;
1226 return NO_EXIT;
1228 #endif
1229 return gen_invalid(ctx);
1231 do_call_pal:
1232 #ifdef CONFIG_USER_ONLY
1233 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1234 #else
1236 TCGv tmp = tcg_temp_new();
1237 uint64_t exc_addr = ctx->pc;
1238 uint64_t entry = ctx->palbr;
1240 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
1241 exc_addr |= 1;
1242 } else {
1243 tcg_gen_movi_i64(tmp, 1);
1244 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
1247 tcg_gen_movi_i64(tmp, exc_addr);
1248 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1249 tcg_temp_free(tmp);
1251 entry += (palcode & 0x80
1252 ? 0x2000 + (palcode - 0x80) * 64
1253 : 0x1000 + palcode * 64);
1255 /* Since the destination is running in PALmode, we don't really
1256 need the page permissions check. We'll see the existence of
1257 the page when we create the TB, and we'll flush all TBs if
1258 we change the PAL base register. */
1259 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1260 tcg_gen_goto_tb(0);
1261 tcg_gen_movi_i64(cpu_pc, entry);
1262 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1263 return EXIT_GOTO_TB;
1264 } else {
1265 tcg_gen_movi_i64(cpu_pc, entry);
1266 return EXIT_PC_UPDATED;
1269 #endif
1272 #ifndef CONFIG_USER_ONLY
1274 #define PR_BYTE 0x100000
1275 #define PR_LONG 0x200000
1277 static int cpu_pr_data(int pr)
1279 switch (pr) {
1280 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1281 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1282 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1283 case 3: return offsetof(CPUAlphaState, trap_arg0);
1284 case 4: return offsetof(CPUAlphaState, trap_arg1);
1285 case 5: return offsetof(CPUAlphaState, trap_arg2);
1286 case 6: return offsetof(CPUAlphaState, exc_addr);
1287 case 7: return offsetof(CPUAlphaState, palbr);
1288 case 8: return offsetof(CPUAlphaState, ptbr);
1289 case 9: return offsetof(CPUAlphaState, vptptr);
1290 case 10: return offsetof(CPUAlphaState, unique);
1291 case 11: return offsetof(CPUAlphaState, sysval);
1292 case 12: return offsetof(CPUAlphaState, usp);
1294 case 40 ... 63:
1295 return offsetof(CPUAlphaState, scratch[pr - 40]);
1297 case 251:
1298 return offsetof(CPUAlphaState, alarm_expire);
1300 return 0;
1303 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1305 void (*helper)(TCGv);
1306 int data;
1308 switch (regno) {
1309 case 32 ... 39:
1310 /* Accessing the "non-shadow" general registers. */
1311 regno = regno == 39 ? 25 : regno - 32 + 8;
1312 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1313 break;
1315 case 250: /* WALLTIME */
1316 helper = gen_helper_get_walltime;
1317 goto do_helper;
1318 case 249: /* VMTIME */
1319 helper = gen_helper_get_vmtime;
1320 do_helper:
1321 if (use_icount) {
1322 gen_io_start();
1323 helper(va);
1324 gen_io_end();
1325 return EXIT_PC_STALE;
1326 } else {
1327 helper(va);
1329 break;
1331 default:
1332 /* The basic registers are data only, and unknown registers
1333 are read-zero, write-ignore. */
1334 data = cpu_pr_data(regno);
1335 if (data == 0) {
1336 tcg_gen_movi_i64(va, 0);
1337 } else if (data & PR_BYTE) {
1338 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1339 } else if (data & PR_LONG) {
1340 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1341 } else {
1342 tcg_gen_ld_i64(va, cpu_env, data);
1344 break;
1347 return NO_EXIT;
1350 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1352 TCGv tmp;
1353 int data;
1355 switch (regno) {
1356 case 255:
1357 /* TBIA */
1358 gen_helper_tbia(cpu_env);
1359 break;
1361 case 254:
1362 /* TBIS */
1363 gen_helper_tbis(cpu_env, vb);
1364 break;
1366 case 253:
1367 /* WAIT */
1368 tmp = tcg_const_i64(1);
1369 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1370 offsetof(CPUState, halted));
1371 return gen_excp(ctx, EXCP_HLT, 0);
1373 case 252:
1374 /* HALT */
1375 gen_helper_halt(vb);
1376 return EXIT_PC_STALE;
1378 case 251:
1379 /* ALARM */
1380 gen_helper_set_alarm(cpu_env, vb);
1381 break;
1383 case 7:
1384 /* PALBR */
1385 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1386 /* Changing the PAL base register implies un-chaining all of the TBs
1387 that ended with a CALL_PAL. Since the base register usually only
1388 changes during boot, flushing everything works well. */
1389 gen_helper_tb_flush(cpu_env);
1390 return EXIT_PC_STALE;
1392 case 32 ... 39:
1393 /* Accessing the "non-shadow" general registers. */
1394 regno = regno == 39 ? 25 : regno - 32 + 8;
1395 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1396 break;
1398 default:
1399 /* The basic registers are data only, and unknown registers
1400 are read-zero, write-ignore. */
1401 data = cpu_pr_data(regno);
1402 if (data != 0) {
1403 if (data & PR_BYTE) {
1404 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1405 } else if (data & PR_LONG) {
1406 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1407 } else {
1408 tcg_gen_st_i64(vb, cpu_env, data);
1411 break;
1414 return NO_EXIT;
1416 #endif /* !USER_ONLY*/
1418 #define REQUIRE_NO_LIT \
1419 do { \
1420 if (real_islit) { \
1421 goto invalid_opc; \
1423 } while (0)
1425 #define REQUIRE_TB_FLAG(FLAG) \
1426 do { \
1427 if ((ctx->tb->flags & (FLAG)) == 0) { \
1428 goto invalid_opc; \
1430 } while (0)
1432 #define REQUIRE_REG_31(WHICH) \
1433 do { \
1434 if (WHICH != 31) { \
1435 goto invalid_opc; \
1437 } while (0)
1439 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1441 int32_t disp21, disp16, disp12 __attribute__((unused));
1442 uint16_t fn11;
1443 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1444 bool islit, real_islit;
1445 TCGv va, vb, vc, tmp, tmp2;
1446 TCGv_i32 t32;
1447 ExitStatus ret;
1449 /* Decode all instruction fields */
1450 opc = extract32(insn, 26, 6);
1451 ra = extract32(insn, 21, 5);
1452 rb = extract32(insn, 16, 5);
1453 rc = extract32(insn, 0, 5);
1454 real_islit = islit = extract32(insn, 12, 1);
1455 lit = extract32(insn, 13, 8);
1457 disp21 = sextract32(insn, 0, 21);
1458 disp16 = sextract32(insn, 0, 16);
1459 disp12 = sextract32(insn, 0, 12);
1461 fn11 = extract32(insn, 5, 11);
1462 fpfn = extract32(insn, 5, 6);
1463 fn7 = extract32(insn, 5, 7);
1465 if (rb == 31 && !islit) {
1466 islit = true;
1467 lit = 0;
1470 ret = NO_EXIT;
1471 switch (opc) {
1472 case 0x00:
1473 /* CALL_PAL */
1474 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1475 break;
1476 case 0x01:
1477 /* OPC01 */
1478 goto invalid_opc;
1479 case 0x02:
1480 /* OPC02 */
1481 goto invalid_opc;
1482 case 0x03:
1483 /* OPC03 */
1484 goto invalid_opc;
1485 case 0x04:
1486 /* OPC04 */
1487 goto invalid_opc;
1488 case 0x05:
1489 /* OPC05 */
1490 goto invalid_opc;
1491 case 0x06:
1492 /* OPC06 */
1493 goto invalid_opc;
1494 case 0x07:
1495 /* OPC07 */
1496 goto invalid_opc;
1498 case 0x09:
1499 /* LDAH */
1500 disp16 = (uint32_t)disp16 << 16;
1501 /* fall through */
1502 case 0x08:
1503 /* LDA */
1504 va = dest_gpr(ctx, ra);
1505 /* It's worth special-casing immediate loads. */
1506 if (rb == 31) {
1507 tcg_gen_movi_i64(va, disp16);
1508 } else {
1509 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1511 break;
1513 case 0x0A:
1514 /* LDBU */
1515 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1516 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1517 break;
1518 case 0x0B:
1519 /* LDQ_U */
1520 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1521 break;
1522 case 0x0C:
1523 /* LDWU */
1524 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1525 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1526 break;
1527 case 0x0D:
1528 /* STW */
1529 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1530 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1531 break;
1532 case 0x0E:
1533 /* STB */
1534 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1535 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1536 break;
1537 case 0x0F:
1538 /* STQ_U */
1539 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1540 break;
1542 case 0x10:
1543 vc = dest_gpr(ctx, rc);
1544 vb = load_gpr_lit(ctx, rb, lit, islit);
1546 if (ra == 31) {
1547 if (fn7 == 0x00) {
1548 /* Special case ADDL as SEXTL. */
1549 tcg_gen_ext32s_i64(vc, vb);
1550 break;
1552 if (fn7 == 0x29) {
1553 /* Special case SUBQ as NEGQ. */
1554 tcg_gen_neg_i64(vc, vb);
1555 break;
1559 va = load_gpr(ctx, ra);
1560 switch (fn7) {
1561 case 0x00:
1562 /* ADDL */
1563 tcg_gen_add_i64(vc, va, vb);
1564 tcg_gen_ext32s_i64(vc, vc);
1565 break;
1566 case 0x02:
1567 /* S4ADDL */
1568 tmp = tcg_temp_new();
1569 tcg_gen_shli_i64(tmp, va, 2);
1570 tcg_gen_add_i64(tmp, tmp, vb);
1571 tcg_gen_ext32s_i64(vc, tmp);
1572 tcg_temp_free(tmp);
1573 break;
1574 case 0x09:
1575 /* SUBL */
1576 tcg_gen_sub_i64(vc, va, vb);
1577 tcg_gen_ext32s_i64(vc, vc);
1578 break;
1579 case 0x0B:
1580 /* S4SUBL */
1581 tmp = tcg_temp_new();
1582 tcg_gen_shli_i64(tmp, va, 2);
1583 tcg_gen_sub_i64(tmp, tmp, vb);
1584 tcg_gen_ext32s_i64(vc, tmp);
1585 tcg_temp_free(tmp);
1586 break;
1587 case 0x0F:
1588 /* CMPBGE */
1589 if (ra == 31) {
1590 /* Special case 0 >= X as X == 0. */
1591 gen_helper_cmpbe0(vc, vb);
1592 } else {
1593 gen_helper_cmpbge(vc, va, vb);
1595 break;
1596 case 0x12:
1597 /* S8ADDL */
1598 tmp = tcg_temp_new();
1599 tcg_gen_shli_i64(tmp, va, 3);
1600 tcg_gen_add_i64(tmp, tmp, vb);
1601 tcg_gen_ext32s_i64(vc, tmp);
1602 tcg_temp_free(tmp);
1603 break;
1604 case 0x1B:
1605 /* S8SUBL */
1606 tmp = tcg_temp_new();
1607 tcg_gen_shli_i64(tmp, va, 3);
1608 tcg_gen_sub_i64(tmp, tmp, vb);
1609 tcg_gen_ext32s_i64(vc, tmp);
1610 tcg_temp_free(tmp);
1611 break;
1612 case 0x1D:
1613 /* CMPULT */
1614 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1615 break;
1616 case 0x20:
1617 /* ADDQ */
1618 tcg_gen_add_i64(vc, va, vb);
1619 break;
1620 case 0x22:
1621 /* S4ADDQ */
1622 tmp = tcg_temp_new();
1623 tcg_gen_shli_i64(tmp, va, 2);
1624 tcg_gen_add_i64(vc, tmp, vb);
1625 tcg_temp_free(tmp);
1626 break;
1627 case 0x29:
1628 /* SUBQ */
1629 tcg_gen_sub_i64(vc, va, vb);
1630 break;
1631 case 0x2B:
1632 /* S4SUBQ */
1633 tmp = tcg_temp_new();
1634 tcg_gen_shli_i64(tmp, va, 2);
1635 tcg_gen_sub_i64(vc, tmp, vb);
1636 tcg_temp_free(tmp);
1637 break;
1638 case 0x2D:
1639 /* CMPEQ */
1640 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1641 break;
1642 case 0x32:
1643 /* S8ADDQ */
1644 tmp = tcg_temp_new();
1645 tcg_gen_shli_i64(tmp, va, 3);
1646 tcg_gen_add_i64(vc, tmp, vb);
1647 tcg_temp_free(tmp);
1648 break;
1649 case 0x3B:
1650 /* S8SUBQ */
1651 tmp = tcg_temp_new();
1652 tcg_gen_shli_i64(tmp, va, 3);
1653 tcg_gen_sub_i64(vc, tmp, vb);
1654 tcg_temp_free(tmp);
1655 break;
1656 case 0x3D:
1657 /* CMPULE */
1658 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1659 break;
1660 case 0x40:
1661 /* ADDL/V */
1662 tmp = tcg_temp_new();
1663 tcg_gen_ext32s_i64(tmp, va);
1664 tcg_gen_ext32s_i64(vc, vb);
1665 tcg_gen_add_i64(tmp, tmp, vc);
1666 tcg_gen_ext32s_i64(vc, tmp);
1667 gen_helper_check_overflow(cpu_env, vc, tmp);
1668 tcg_temp_free(tmp);
1669 break;
1670 case 0x49:
1671 /* SUBL/V */
1672 tmp = tcg_temp_new();
1673 tcg_gen_ext32s_i64(tmp, va);
1674 tcg_gen_ext32s_i64(vc, vb);
1675 tcg_gen_sub_i64(tmp, tmp, vc);
1676 tcg_gen_ext32s_i64(vc, tmp);
1677 gen_helper_check_overflow(cpu_env, vc, tmp);
1678 tcg_temp_free(tmp);
1679 break;
1680 case 0x4D:
1681 /* CMPLT */
1682 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1683 break;
1684 case 0x60:
1685 /* ADDQ/V */
1686 tmp = tcg_temp_new();
1687 tmp2 = tcg_temp_new();
1688 tcg_gen_eqv_i64(tmp, va, vb);
1689 tcg_gen_mov_i64(tmp2, va);
1690 tcg_gen_add_i64(vc, va, vb);
1691 tcg_gen_xor_i64(tmp2, tmp2, vc);
1692 tcg_gen_and_i64(tmp, tmp, tmp2);
1693 tcg_gen_shri_i64(tmp, tmp, 63);
1694 tcg_gen_movi_i64(tmp2, 0);
1695 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1696 tcg_temp_free(tmp);
1697 tcg_temp_free(tmp2);
1698 break;
1699 case 0x69:
1700 /* SUBQ/V */
1701 tmp = tcg_temp_new();
1702 tmp2 = tcg_temp_new();
1703 tcg_gen_xor_i64(tmp, va, vb);
1704 tcg_gen_mov_i64(tmp2, va);
1705 tcg_gen_sub_i64(vc, va, vb);
1706 tcg_gen_xor_i64(tmp2, tmp2, vc);
1707 tcg_gen_and_i64(tmp, tmp, tmp2);
1708 tcg_gen_shri_i64(tmp, tmp, 63);
1709 tcg_gen_movi_i64(tmp2, 0);
1710 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1711 tcg_temp_free(tmp);
1712 tcg_temp_free(tmp2);
1713 break;
1714 case 0x6D:
1715 /* CMPLE */
1716 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1717 break;
1718 default:
1719 goto invalid_opc;
1721 break;
1723 case 0x11:
1724 if (fn7 == 0x20) {
1725 if (rc == 31) {
1726 /* Special case BIS as NOP. */
1727 break;
1729 if (ra == 31) {
1730 /* Special case BIS as MOV. */
1731 vc = dest_gpr(ctx, rc);
1732 if (islit) {
1733 tcg_gen_movi_i64(vc, lit);
1734 } else {
1735 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1737 break;
1741 vc = dest_gpr(ctx, rc);
1742 vb = load_gpr_lit(ctx, rb, lit, islit);
1744 if (fn7 == 0x28 && ra == 31) {
1745 /* Special case ORNOT as NOT. */
1746 tcg_gen_not_i64(vc, vb);
1747 break;
1750 va = load_gpr(ctx, ra);
1751 switch (fn7) {
1752 case 0x00:
1753 /* AND */
1754 tcg_gen_and_i64(vc, va, vb);
1755 break;
1756 case 0x08:
1757 /* BIC */
1758 tcg_gen_andc_i64(vc, va, vb);
1759 break;
1760 case 0x14:
1761 /* CMOVLBS */
1762 tmp = tcg_temp_new();
1763 tcg_gen_andi_i64(tmp, va, 1);
1764 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1765 vb, load_gpr(ctx, rc));
1766 tcg_temp_free(tmp);
1767 break;
1768 case 0x16:
1769 /* CMOVLBC */
1770 tmp = tcg_temp_new();
1771 tcg_gen_andi_i64(tmp, va, 1);
1772 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1773 vb, load_gpr(ctx, rc));
1774 tcg_temp_free(tmp);
1775 break;
1776 case 0x20:
1777 /* BIS */
1778 tcg_gen_or_i64(vc, va, vb);
1779 break;
1780 case 0x24:
1781 /* CMOVEQ */
1782 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1783 vb, load_gpr(ctx, rc));
1784 break;
1785 case 0x26:
1786 /* CMOVNE */
1787 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1788 vb, load_gpr(ctx, rc));
1789 break;
1790 case 0x28:
1791 /* ORNOT */
1792 tcg_gen_orc_i64(vc, va, vb);
1793 break;
1794 case 0x40:
1795 /* XOR */
1796 tcg_gen_xor_i64(vc, va, vb);
1797 break;
1798 case 0x44:
1799 /* CMOVLT */
1800 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1801 vb, load_gpr(ctx, rc));
1802 break;
1803 case 0x46:
1804 /* CMOVGE */
1805 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1806 vb, load_gpr(ctx, rc));
1807 break;
1808 case 0x48:
1809 /* EQV */
1810 tcg_gen_eqv_i64(vc, va, vb);
1811 break;
1812 case 0x61:
1813 /* AMASK */
1814 REQUIRE_REG_31(ra);
1816 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1817 tcg_gen_andi_i64(vc, vb, ~amask);
1819 break;
1820 case 0x64:
1821 /* CMOVLE */
1822 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1823 vb, load_gpr(ctx, rc));
1824 break;
1825 case 0x66:
1826 /* CMOVGT */
1827 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1828 vb, load_gpr(ctx, rc));
1829 break;
1830 case 0x6C:
1831 /* IMPLVER */
1832 REQUIRE_REG_31(ra);
1833 tcg_gen_movi_i64(vc, ctx->implver);
1834 break;
1835 default:
1836 goto invalid_opc;
1838 break;
1840 case 0x12:
1841 vc = dest_gpr(ctx, rc);
1842 va = load_gpr(ctx, ra);
1843 switch (fn7) {
1844 case 0x02:
1845 /* MSKBL */
1846 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1847 break;
1848 case 0x06:
1849 /* EXTBL */
1850 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1851 break;
1852 case 0x0B:
1853 /* INSBL */
1854 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1855 break;
1856 case 0x12:
1857 /* MSKWL */
1858 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1859 break;
1860 case 0x16:
1861 /* EXTWL */
1862 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1863 break;
1864 case 0x1B:
1865 /* INSWL */
1866 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1867 break;
1868 case 0x22:
1869 /* MSKLL */
1870 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1871 break;
1872 case 0x26:
1873 /* EXTLL */
1874 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1875 break;
1876 case 0x2B:
1877 /* INSLL */
1878 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1879 break;
1880 case 0x30:
1881 /* ZAP */
1882 if (islit) {
1883 gen_zapnoti(vc, va, ~lit);
1884 } else {
1885 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1887 break;
1888 case 0x31:
1889 /* ZAPNOT */
1890 if (islit) {
1891 gen_zapnoti(vc, va, lit);
1892 } else {
1893 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1895 break;
1896 case 0x32:
1897 /* MSKQL */
1898 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1899 break;
1900 case 0x34:
1901 /* SRL */
1902 if (islit) {
1903 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1904 } else {
1905 tmp = tcg_temp_new();
1906 vb = load_gpr(ctx, rb);
1907 tcg_gen_andi_i64(tmp, vb, 0x3f);
1908 tcg_gen_shr_i64(vc, va, tmp);
1909 tcg_temp_free(tmp);
1911 break;
1912 case 0x36:
1913 /* EXTQL */
1914 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1915 break;
1916 case 0x39:
1917 /* SLL */
1918 if (islit) {
1919 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1920 } else {
1921 tmp = tcg_temp_new();
1922 vb = load_gpr(ctx, rb);
1923 tcg_gen_andi_i64(tmp, vb, 0x3f);
1924 tcg_gen_shl_i64(vc, va, tmp);
1925 tcg_temp_free(tmp);
1927 break;
1928 case 0x3B:
1929 /* INSQL */
1930 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1931 break;
1932 case 0x3C:
1933 /* SRA */
1934 if (islit) {
1935 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1936 } else {
1937 tmp = tcg_temp_new();
1938 vb = load_gpr(ctx, rb);
1939 tcg_gen_andi_i64(tmp, vb, 0x3f);
1940 tcg_gen_sar_i64(vc, va, tmp);
1941 tcg_temp_free(tmp);
1943 break;
1944 case 0x52:
1945 /* MSKWH */
1946 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1947 break;
1948 case 0x57:
1949 /* INSWH */
1950 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1951 break;
1952 case 0x5A:
1953 /* EXTWH */
1954 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1955 break;
1956 case 0x62:
1957 /* MSKLH */
1958 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1959 break;
1960 case 0x67:
1961 /* INSLH */
1962 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1963 break;
1964 case 0x6A:
1965 /* EXTLH */
1966 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1967 break;
1968 case 0x72:
1969 /* MSKQH */
1970 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1971 break;
1972 case 0x77:
1973 /* INSQH */
1974 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1975 break;
1976 case 0x7A:
1977 /* EXTQH */
1978 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1979 break;
1980 default:
1981 goto invalid_opc;
1983 break;
1985 case 0x13:
1986 vc = dest_gpr(ctx, rc);
1987 vb = load_gpr_lit(ctx, rb, lit, islit);
1988 va = load_gpr(ctx, ra);
1989 switch (fn7) {
1990 case 0x00:
1991 /* MULL */
1992 tcg_gen_mul_i64(vc, va, vb);
1993 tcg_gen_ext32s_i64(vc, vc);
1994 break;
1995 case 0x20:
1996 /* MULQ */
1997 tcg_gen_mul_i64(vc, va, vb);
1998 break;
1999 case 0x30:
2000 /* UMULH */
2001 tmp = tcg_temp_new();
2002 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2003 tcg_temp_free(tmp);
2004 break;
2005 case 0x40:
2006 /* MULL/V */
2007 tmp = tcg_temp_new();
2008 tcg_gen_ext32s_i64(tmp, va);
2009 tcg_gen_ext32s_i64(vc, vb);
2010 tcg_gen_mul_i64(tmp, tmp, vc);
2011 tcg_gen_ext32s_i64(vc, tmp);
2012 gen_helper_check_overflow(cpu_env, vc, tmp);
2013 tcg_temp_free(tmp);
2014 break;
2015 case 0x60:
2016 /* MULQ/V */
2017 tmp = tcg_temp_new();
2018 tmp2 = tcg_temp_new();
2019 tcg_gen_muls2_i64(vc, tmp, va, vb);
2020 tcg_gen_sari_i64(tmp2, vc, 63);
2021 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2022 tcg_temp_free(tmp);
2023 tcg_temp_free(tmp2);
2024 break;
2025 default:
2026 goto invalid_opc;
2028 break;
2030 case 0x14:
2031 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2032 vc = dest_fpr(ctx, rc);
2033 switch (fpfn) { /* fn11 & 0x3F */
2034 case 0x04:
2035 /* ITOFS */
2036 REQUIRE_REG_31(rb);
2037 t32 = tcg_temp_new_i32();
2038 va = load_gpr(ctx, ra);
2039 tcg_gen_extrl_i64_i32(t32, va);
2040 gen_helper_memory_to_s(vc, t32);
2041 tcg_temp_free_i32(t32);
2042 break;
2043 case 0x0A:
2044 /* SQRTF */
2045 REQUIRE_REG_31(ra);
2046 vb = load_fpr(ctx, rb);
2047 gen_helper_sqrtf(vc, cpu_env, vb);
2048 break;
2049 case 0x0B:
2050 /* SQRTS */
2051 REQUIRE_REG_31(ra);
2052 gen_sqrts(ctx, rb, rc, fn11);
2053 break;
2054 case 0x14:
2055 /* ITOFF */
2056 REQUIRE_REG_31(rb);
2057 t32 = tcg_temp_new_i32();
2058 va = load_gpr(ctx, ra);
2059 tcg_gen_extrl_i64_i32(t32, va);
2060 gen_helper_memory_to_f(vc, t32);
2061 tcg_temp_free_i32(t32);
2062 break;
2063 case 0x24:
2064 /* ITOFT */
2065 REQUIRE_REG_31(rb);
2066 va = load_gpr(ctx, ra);
2067 tcg_gen_mov_i64(vc, va);
2068 break;
2069 case 0x2A:
2070 /* SQRTG */
2071 REQUIRE_REG_31(ra);
2072 vb = load_fpr(ctx, rb);
2073 gen_helper_sqrtg(vc, cpu_env, vb);
2074 break;
2075 case 0x02B:
2076 /* SQRTT */
2077 REQUIRE_REG_31(ra);
2078 gen_sqrtt(ctx, rb, rc, fn11);
2079 break;
2080 default:
2081 goto invalid_opc;
2083 break;
2085 case 0x15:
2086 /* VAX floating point */
2087 /* XXX: rounding mode and trap are ignored (!) */
2088 vc = dest_fpr(ctx, rc);
2089 vb = load_fpr(ctx, rb);
2090 va = load_fpr(ctx, ra);
2091 switch (fpfn) { /* fn11 & 0x3F */
2092 case 0x00:
2093 /* ADDF */
2094 gen_helper_addf(vc, cpu_env, va, vb);
2095 break;
2096 case 0x01:
2097 /* SUBF */
2098 gen_helper_subf(vc, cpu_env, va, vb);
2099 break;
2100 case 0x02:
2101 /* MULF */
2102 gen_helper_mulf(vc, cpu_env, va, vb);
2103 break;
2104 case 0x03:
2105 /* DIVF */
2106 gen_helper_divf(vc, cpu_env, va, vb);
2107 break;
2108 case 0x1E:
2109 /* CVTDG -- TODO */
2110 REQUIRE_REG_31(ra);
2111 goto invalid_opc;
2112 case 0x20:
2113 /* ADDG */
2114 gen_helper_addg(vc, cpu_env, va, vb);
2115 break;
2116 case 0x21:
2117 /* SUBG */
2118 gen_helper_subg(vc, cpu_env, va, vb);
2119 break;
2120 case 0x22:
2121 /* MULG */
2122 gen_helper_mulg(vc, cpu_env, va, vb);
2123 break;
2124 case 0x23:
2125 /* DIVG */
2126 gen_helper_divg(vc, cpu_env, va, vb);
2127 break;
2128 case 0x25:
2129 /* CMPGEQ */
2130 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2131 break;
2132 case 0x26:
2133 /* CMPGLT */
2134 gen_helper_cmpglt(vc, cpu_env, va, vb);
2135 break;
2136 case 0x27:
2137 /* CMPGLE */
2138 gen_helper_cmpgle(vc, cpu_env, va, vb);
2139 break;
2140 case 0x2C:
2141 /* CVTGF */
2142 REQUIRE_REG_31(ra);
2143 gen_helper_cvtgf(vc, cpu_env, vb);
2144 break;
2145 case 0x2D:
2146 /* CVTGD -- TODO */
2147 REQUIRE_REG_31(ra);
2148 goto invalid_opc;
2149 case 0x2F:
2150 /* CVTGQ */
2151 REQUIRE_REG_31(ra);
2152 gen_helper_cvtgq(vc, cpu_env, vb);
2153 break;
2154 case 0x3C:
2155 /* CVTQF */
2156 REQUIRE_REG_31(ra);
2157 gen_helper_cvtqf(vc, cpu_env, vb);
2158 break;
2159 case 0x3E:
2160 /* CVTQG */
2161 REQUIRE_REG_31(ra);
2162 gen_helper_cvtqg(vc, cpu_env, vb);
2163 break;
2164 default:
2165 goto invalid_opc;
2167 break;
2169 case 0x16:
2170 /* IEEE floating-point */
2171 switch (fpfn) { /* fn11 & 0x3F */
2172 case 0x00:
2173 /* ADDS */
2174 gen_adds(ctx, ra, rb, rc, fn11);
2175 break;
2176 case 0x01:
2177 /* SUBS */
2178 gen_subs(ctx, ra, rb, rc, fn11);
2179 break;
2180 case 0x02:
2181 /* MULS */
2182 gen_muls(ctx, ra, rb, rc, fn11);
2183 break;
2184 case 0x03:
2185 /* DIVS */
2186 gen_divs(ctx, ra, rb, rc, fn11);
2187 break;
2188 case 0x20:
2189 /* ADDT */
2190 gen_addt(ctx, ra, rb, rc, fn11);
2191 break;
2192 case 0x21:
2193 /* SUBT */
2194 gen_subt(ctx, ra, rb, rc, fn11);
2195 break;
2196 case 0x22:
2197 /* MULT */
2198 gen_mult(ctx, ra, rb, rc, fn11);
2199 break;
2200 case 0x23:
2201 /* DIVT */
2202 gen_divt(ctx, ra, rb, rc, fn11);
2203 break;
2204 case 0x24:
2205 /* CMPTUN */
2206 gen_cmptun(ctx, ra, rb, rc, fn11);
2207 break;
2208 case 0x25:
2209 /* CMPTEQ */
2210 gen_cmpteq(ctx, ra, rb, rc, fn11);
2211 break;
2212 case 0x26:
2213 /* CMPTLT */
2214 gen_cmptlt(ctx, ra, rb, rc, fn11);
2215 break;
2216 case 0x27:
2217 /* CMPTLE */
2218 gen_cmptle(ctx, ra, rb, rc, fn11);
2219 break;
2220 case 0x2C:
2221 REQUIRE_REG_31(ra);
2222 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2223 /* CVTST */
2224 gen_cvtst(ctx, rb, rc, fn11);
2225 } else {
2226 /* CVTTS */
2227 gen_cvtts(ctx, rb, rc, fn11);
2229 break;
2230 case 0x2F:
2231 /* CVTTQ */
2232 REQUIRE_REG_31(ra);
2233 gen_cvttq(ctx, rb, rc, fn11);
2234 break;
2235 case 0x3C:
2236 /* CVTQS */
2237 REQUIRE_REG_31(ra);
2238 gen_cvtqs(ctx, rb, rc, fn11);
2239 break;
2240 case 0x3E:
2241 /* CVTQT */
2242 REQUIRE_REG_31(ra);
2243 gen_cvtqt(ctx, rb, rc, fn11);
2244 break;
2245 default:
2246 goto invalid_opc;
2248 break;
2250 case 0x17:
2251 switch (fn11) {
2252 case 0x010:
2253 /* CVTLQ */
2254 REQUIRE_REG_31(ra);
2255 vc = dest_fpr(ctx, rc);
2256 vb = load_fpr(ctx, rb);
2257 gen_cvtlq(vc, vb);
2258 break;
2259 case 0x020:
2260 /* CPYS */
2261 if (rc == 31) {
2262 /* Special case CPYS as FNOP. */
2263 } else {
2264 vc = dest_fpr(ctx, rc);
2265 va = load_fpr(ctx, ra);
2266 if (ra == rb) {
2267 /* Special case CPYS as FMOV. */
2268 tcg_gen_mov_i64(vc, va);
2269 } else {
2270 vb = load_fpr(ctx, rb);
2271 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2274 break;
2275 case 0x021:
2276 /* CPYSN */
2277 vc = dest_fpr(ctx, rc);
2278 vb = load_fpr(ctx, rb);
2279 va = load_fpr(ctx, ra);
2280 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2281 break;
2282 case 0x022:
2283 /* CPYSE */
2284 vc = dest_fpr(ctx, rc);
2285 vb = load_fpr(ctx, rb);
2286 va = load_fpr(ctx, ra);
2287 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2288 break;
2289 case 0x024:
2290 /* MT_FPCR */
2291 va = load_fpr(ctx, ra);
2292 gen_helper_store_fpcr(cpu_env, va);
2293 if (ctx->tb_rm == QUAL_RM_D) {
2294 /* Re-do the copy of the rounding mode to fp_status
2295 the next time we use dynamic rounding. */
2296 ctx->tb_rm = -1;
2298 break;
2299 case 0x025:
2300 /* MF_FPCR */
2301 va = dest_fpr(ctx, ra);
2302 gen_helper_load_fpcr(va, cpu_env);
2303 break;
2304 case 0x02A:
2305 /* FCMOVEQ */
2306 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2307 break;
2308 case 0x02B:
2309 /* FCMOVNE */
2310 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2311 break;
2312 case 0x02C:
2313 /* FCMOVLT */
2314 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2315 break;
2316 case 0x02D:
2317 /* FCMOVGE */
2318 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2319 break;
2320 case 0x02E:
2321 /* FCMOVLE */
2322 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2323 break;
2324 case 0x02F:
2325 /* FCMOVGT */
2326 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2327 break;
2328 case 0x030: /* CVTQL */
2329 case 0x130: /* CVTQL/V */
2330 case 0x530: /* CVTQL/SV */
2331 REQUIRE_REG_31(ra);
2332 vc = dest_fpr(ctx, rc);
2333 vb = load_fpr(ctx, rb);
2334 gen_helper_cvtql(vc, cpu_env, vb);
2335 gen_fp_exc_raise(rc, fn11);
2336 break;
2337 default:
2338 goto invalid_opc;
2340 break;
2342 case 0x18:
2343 switch ((uint16_t)disp16) {
2344 case 0x0000:
2345 /* TRAPB */
2346 /* No-op. */
2347 break;
2348 case 0x0400:
2349 /* EXCB */
2350 /* No-op. */
2351 break;
2352 case 0x4000:
2353 /* MB */
2354 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2355 break;
2356 case 0x4400:
2357 /* WMB */
2358 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2359 break;
2360 case 0x8000:
2361 /* FETCH */
2362 /* No-op */
2363 break;
2364 case 0xA000:
2365 /* FETCH_M */
2366 /* No-op */
2367 break;
2368 case 0xC000:
2369 /* RPCC */
2370 va = dest_gpr(ctx, ra);
2371 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2372 gen_io_start();
2373 gen_helper_load_pcc(va, cpu_env);
2374 gen_io_end();
2375 ret = EXIT_PC_STALE;
2376 } else {
2377 gen_helper_load_pcc(va, cpu_env);
2379 break;
2380 case 0xE000:
2381 /* RC */
2382 gen_rx(ctx, ra, 0);
2383 break;
2384 case 0xE800:
2385 /* ECB */
2386 break;
2387 case 0xF000:
2388 /* RS */
2389 gen_rx(ctx, ra, 1);
2390 break;
2391 case 0xF800:
2392 /* WH64 */
2393 /* No-op */
2394 break;
2395 case 0xFC00:
2396 /* WH64EN */
2397 /* No-op */
2398 break;
2399 default:
2400 goto invalid_opc;
2402 break;
2404 case 0x19:
2405 /* HW_MFPR (PALcode) */
2406 #ifndef CONFIG_USER_ONLY
2407 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2408 va = dest_gpr(ctx, ra);
2409 ret = gen_mfpr(ctx, va, insn & 0xffff);
2410 break;
2411 #else
2412 goto invalid_opc;
2413 #endif
2415 case 0x1A:
2416 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2417 prediction stack action, which of course we don't implement. */
2418 vb = load_gpr(ctx, rb);
2419 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2420 if (ra != 31) {
2421 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
2423 ret = EXIT_PC_UPDATED;
2424 break;
2426 case 0x1B:
2427 /* HW_LD (PALcode) */
2428 #ifndef CONFIG_USER_ONLY
2429 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2431 TCGv addr = tcg_temp_new();
2432 vb = load_gpr(ctx, rb);
2433 va = dest_gpr(ctx, ra);
2435 tcg_gen_addi_i64(addr, vb, disp12);
2436 switch ((insn >> 12) & 0xF) {
2437 case 0x0:
2438 /* Longword physical access (hw_ldl/p) */
2439 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2440 break;
2441 case 0x1:
2442 /* Quadword physical access (hw_ldq/p) */
2443 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2444 break;
2445 case 0x2:
2446 /* Longword physical access with lock (hw_ldl_l/p) */
2447 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2448 break;
2449 case 0x3:
2450 /* Quadword physical access with lock (hw_ldq_l/p) */
2451 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2452 break;
2453 case 0x4:
2454 /* Longword virtual PTE fetch (hw_ldl/v) */
2455 goto invalid_opc;
2456 case 0x5:
2457 /* Quadword virtual PTE fetch (hw_ldq/v) */
2458 goto invalid_opc;
2459 break;
2460 case 0x6:
2461 /* Invalid */
2462 goto invalid_opc;
2463 case 0x7:
2464 /* Invaliid */
2465 goto invalid_opc;
2466 case 0x8:
2467 /* Longword virtual access (hw_ldl) */
2468 goto invalid_opc;
2469 case 0x9:
2470 /* Quadword virtual access (hw_ldq) */
2471 goto invalid_opc;
2472 case 0xA:
2473 /* Longword virtual access with protection check (hw_ldl/w) */
2474 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2475 break;
2476 case 0xB:
2477 /* Quadword virtual access with protection check (hw_ldq/w) */
2478 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2479 break;
2480 case 0xC:
2481 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2482 goto invalid_opc;
2483 case 0xD:
2484 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2485 goto invalid_opc;
2486 case 0xE:
2487 /* Longword virtual access with alternate access mode and
2488 protection checks (hw_ldl/wa) */
2489 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2490 break;
2491 case 0xF:
2492 /* Quadword virtual access with alternate access mode and
2493 protection checks (hw_ldq/wa) */
2494 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2495 break;
2497 tcg_temp_free(addr);
2498 break;
2500 #else
2501 goto invalid_opc;
2502 #endif
2504 case 0x1C:
2505 vc = dest_gpr(ctx, rc);
2506 if (fn7 == 0x70) {
2507 /* FTOIT */
2508 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2509 REQUIRE_REG_31(rb);
2510 va = load_fpr(ctx, ra);
2511 tcg_gen_mov_i64(vc, va);
2512 break;
2513 } else if (fn7 == 0x78) {
2514 /* FTOIS */
2515 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2516 REQUIRE_REG_31(rb);
2517 t32 = tcg_temp_new_i32();
2518 va = load_fpr(ctx, ra);
2519 gen_helper_s_to_memory(t32, va);
2520 tcg_gen_ext_i32_i64(vc, t32);
2521 tcg_temp_free_i32(t32);
2522 break;
2525 vb = load_gpr_lit(ctx, rb, lit, islit);
2526 switch (fn7) {
2527 case 0x00:
2528 /* SEXTB */
2529 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2530 REQUIRE_REG_31(ra);
2531 tcg_gen_ext8s_i64(vc, vb);
2532 break;
2533 case 0x01:
2534 /* SEXTW */
2535 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2536 REQUIRE_REG_31(ra);
2537 tcg_gen_ext16s_i64(vc, vb);
2538 break;
2539 case 0x30:
2540 /* CTPOP */
2541 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2542 REQUIRE_REG_31(ra);
2543 REQUIRE_NO_LIT;
2544 tcg_gen_ctpop_i64(vc, vb);
2545 break;
2546 case 0x31:
2547 /* PERR */
2548 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2549 REQUIRE_NO_LIT;
2550 va = load_gpr(ctx, ra);
2551 gen_helper_perr(vc, va, vb);
2552 break;
2553 case 0x32:
2554 /* CTLZ */
2555 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2556 REQUIRE_REG_31(ra);
2557 REQUIRE_NO_LIT;
2558 tcg_gen_clzi_i64(vc, vb, 64);
2559 break;
2560 case 0x33:
2561 /* CTTZ */
2562 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2563 REQUIRE_REG_31(ra);
2564 REQUIRE_NO_LIT;
2565 tcg_gen_ctzi_i64(vc, vb, 64);
2566 break;
2567 case 0x34:
2568 /* UNPKBW */
2569 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2570 REQUIRE_REG_31(ra);
2571 REQUIRE_NO_LIT;
2572 gen_helper_unpkbw(vc, vb);
2573 break;
2574 case 0x35:
2575 /* UNPKBL */
2576 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2577 REQUIRE_REG_31(ra);
2578 REQUIRE_NO_LIT;
2579 gen_helper_unpkbl(vc, vb);
2580 break;
2581 case 0x36:
2582 /* PKWB */
2583 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2584 REQUIRE_REG_31(ra);
2585 REQUIRE_NO_LIT;
2586 gen_helper_pkwb(vc, vb);
2587 break;
2588 case 0x37:
2589 /* PKLB */
2590 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2591 REQUIRE_REG_31(ra);
2592 REQUIRE_NO_LIT;
2593 gen_helper_pklb(vc, vb);
2594 break;
2595 case 0x38:
2596 /* MINSB8 */
2597 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2598 va = load_gpr(ctx, ra);
2599 gen_helper_minsb8(vc, va, vb);
2600 break;
2601 case 0x39:
2602 /* MINSW4 */
2603 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2604 va = load_gpr(ctx, ra);
2605 gen_helper_minsw4(vc, va, vb);
2606 break;
2607 case 0x3A:
2608 /* MINUB8 */
2609 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2610 va = load_gpr(ctx, ra);
2611 gen_helper_minub8(vc, va, vb);
2612 break;
2613 case 0x3B:
2614 /* MINUW4 */
2615 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2616 va = load_gpr(ctx, ra);
2617 gen_helper_minuw4(vc, va, vb);
2618 break;
2619 case 0x3C:
2620 /* MAXUB8 */
2621 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2622 va = load_gpr(ctx, ra);
2623 gen_helper_maxub8(vc, va, vb);
2624 break;
2625 case 0x3D:
2626 /* MAXUW4 */
2627 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2628 va = load_gpr(ctx, ra);
2629 gen_helper_maxuw4(vc, va, vb);
2630 break;
2631 case 0x3E:
2632 /* MAXSB8 */
2633 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2634 va = load_gpr(ctx, ra);
2635 gen_helper_maxsb8(vc, va, vb);
2636 break;
2637 case 0x3F:
2638 /* MAXSW4 */
2639 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2640 va = load_gpr(ctx, ra);
2641 gen_helper_maxsw4(vc, va, vb);
2642 break;
2643 default:
2644 goto invalid_opc;
2646 break;
2648 case 0x1D:
2649 /* HW_MTPR (PALcode) */
2650 #ifndef CONFIG_USER_ONLY
2651 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2652 vb = load_gpr(ctx, rb);
2653 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2654 break;
2655 #else
2656 goto invalid_opc;
2657 #endif
2659 case 0x1E:
2660 /* HW_RET (PALcode) */
2661 #ifndef CONFIG_USER_ONLY
2662 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2663 if (rb == 31) {
2664 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2665 address from EXC_ADDR. This turns out to be useful for our
2666 emulation PALcode, so continue to accept it. */
2667 ctx->lit = vb = tcg_temp_new();
2668 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2669 } else {
2670 vb = load_gpr(ctx, rb);
2672 tmp = tcg_temp_new();
2673 tcg_gen_movi_i64(tmp, 0);
2674 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
2675 tcg_gen_movi_i64(cpu_lock_addr, -1);
2676 tcg_gen_andi_i64(tmp, vb, 1);
2677 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
2678 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2679 ret = EXIT_PC_UPDATED;
2680 break;
2681 #else
2682 goto invalid_opc;
2683 #endif
2685 case 0x1F:
2686 /* HW_ST (PALcode) */
2687 #ifndef CONFIG_USER_ONLY
2688 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2690 switch ((insn >> 12) & 0xF) {
2691 case 0x0:
2692 /* Longword physical access */
2693 va = load_gpr(ctx, ra);
2694 vb = load_gpr(ctx, rb);
2695 tmp = tcg_temp_new();
2696 tcg_gen_addi_i64(tmp, vb, disp12);
2697 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2698 tcg_temp_free(tmp);
2699 break;
2700 case 0x1:
2701 /* Quadword physical access */
2702 va = load_gpr(ctx, ra);
2703 vb = load_gpr(ctx, rb);
2704 tmp = tcg_temp_new();
2705 tcg_gen_addi_i64(tmp, vb, disp12);
2706 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2707 tcg_temp_free(tmp);
2708 break;
2709 case 0x2:
2710 /* Longword physical access with lock */
2711 ret = gen_store_conditional(ctx, ra, rb, disp12,
2712 MMU_PHYS_IDX, MO_LESL);
2713 break;
2714 case 0x3:
2715 /* Quadword physical access with lock */
2716 ret = gen_store_conditional(ctx, ra, rb, disp12,
2717 MMU_PHYS_IDX, MO_LEQ);
2718 break;
2719 case 0x4:
2720 /* Longword virtual access */
2721 goto invalid_opc;
2722 case 0x5:
2723 /* Quadword virtual access */
2724 goto invalid_opc;
2725 case 0x6:
2726 /* Invalid */
2727 goto invalid_opc;
2728 case 0x7:
2729 /* Invalid */
2730 goto invalid_opc;
2731 case 0x8:
2732 /* Invalid */
2733 goto invalid_opc;
2734 case 0x9:
2735 /* Invalid */
2736 goto invalid_opc;
2737 case 0xA:
2738 /* Invalid */
2739 goto invalid_opc;
2740 case 0xB:
2741 /* Invalid */
2742 goto invalid_opc;
2743 case 0xC:
2744 /* Longword virtual access with alternate access mode */
2745 goto invalid_opc;
2746 case 0xD:
2747 /* Quadword virtual access with alternate access mode */
2748 goto invalid_opc;
2749 case 0xE:
2750 /* Invalid */
2751 goto invalid_opc;
2752 case 0xF:
2753 /* Invalid */
2754 goto invalid_opc;
2756 break;
2758 #else
2759 goto invalid_opc;
2760 #endif
2761 case 0x20:
2762 /* LDF */
2763 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2764 break;
2765 case 0x21:
2766 /* LDG */
2767 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2768 break;
2769 case 0x22:
2770 /* LDS */
2771 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2772 break;
2773 case 0x23:
2774 /* LDT */
2775 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2776 break;
2777 case 0x24:
2778 /* STF */
2779 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2780 break;
2781 case 0x25:
2782 /* STG */
2783 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2784 break;
2785 case 0x26:
2786 /* STS */
2787 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2788 break;
2789 case 0x27:
2790 /* STT */
2791 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2792 break;
2793 case 0x28:
2794 /* LDL */
2795 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2796 break;
2797 case 0x29:
2798 /* LDQ */
2799 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2800 break;
2801 case 0x2A:
2802 /* LDL_L */
2803 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2804 break;
2805 case 0x2B:
2806 /* LDQ_L */
2807 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2808 break;
2809 case 0x2C:
2810 /* STL */
2811 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2812 break;
2813 case 0x2D:
2814 /* STQ */
2815 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2816 break;
2817 case 0x2E:
2818 /* STL_C */
2819 ret = gen_store_conditional(ctx, ra, rb, disp16,
2820 ctx->mem_idx, MO_LESL);
2821 break;
2822 case 0x2F:
2823 /* STQ_C */
2824 ret = gen_store_conditional(ctx, ra, rb, disp16,
2825 ctx->mem_idx, MO_LEQ);
2826 break;
2827 case 0x30:
2828 /* BR */
2829 ret = gen_bdirect(ctx, ra, disp21);
2830 break;
2831 case 0x31: /* FBEQ */
2832 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2833 break;
2834 case 0x32: /* FBLT */
2835 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2836 break;
2837 case 0x33: /* FBLE */
2838 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2839 break;
2840 case 0x34:
2841 /* BSR */
2842 ret = gen_bdirect(ctx, ra, disp21);
2843 break;
2844 case 0x35: /* FBNE */
2845 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2846 break;
2847 case 0x36: /* FBGE */
2848 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2849 break;
2850 case 0x37: /* FBGT */
2851 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2852 break;
2853 case 0x38:
2854 /* BLBC */
2855 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2856 break;
2857 case 0x39:
2858 /* BEQ */
2859 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2860 break;
2861 case 0x3A:
2862 /* BLT */
2863 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2864 break;
2865 case 0x3B:
2866 /* BLE */
2867 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2868 break;
2869 case 0x3C:
2870 /* BLBS */
2871 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2872 break;
2873 case 0x3D:
2874 /* BNE */
2875 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2876 break;
2877 case 0x3E:
2878 /* BGE */
2879 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2880 break;
2881 case 0x3F:
2882 /* BGT */
2883 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2884 break;
2885 invalid_opc:
2886 ret = gen_invalid(ctx);
2887 break;
2890 return ret;
2893 void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
2895 AlphaCPU *cpu = alpha_env_get_cpu(env);
2896 CPUState *cs = CPU(cpu);
2897 DisasContext ctx, *ctxp = &ctx;
2898 target_ulong pc_start;
2899 target_ulong pc_mask;
2900 uint32_t insn;
2901 ExitStatus ret;
2902 int num_insns;
2903 int max_insns;
2905 pc_start = tb->pc;
2907 ctx.tb = tb;
2908 ctx.pc = pc_start;
2909 ctx.mem_idx = cpu_mmu_index(env, false);
2910 ctx.implver = env->implver;
2911 ctx.singlestep_enabled = cs->singlestep_enabled;
2913 #ifdef CONFIG_USER_ONLY
2914 ctx.ir = cpu_std_ir;
2915 #else
2916 ctx.palbr = env->palbr;
2917 ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2918 #endif
2920 /* ??? Every TB begins with unset rounding mode, to be initialized on
2921 the first fp insn of the TB. Alternately we could define a proper
2922 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2923 to reset the FP_STATUS to that default at the end of any TB that
2924 changes the default. We could even (gasp) dynamiclly figure out
2925 what default would be most efficient given the running program. */
2926 ctx.tb_rm = -1;
2927 /* Similarly for flush-to-zero. */
2928 ctx.tb_ftz = -1;
2930 TCGV_UNUSED_I64(ctx.zero);
2931 TCGV_UNUSED_I64(ctx.sink);
2932 TCGV_UNUSED_I64(ctx.lit);
2934 num_insns = 0;
2935 max_insns = tb->cflags & CF_COUNT_MASK;
2936 if (max_insns == 0) {
2937 max_insns = CF_COUNT_MASK;
2939 if (max_insns > TCG_MAX_INSNS) {
2940 max_insns = TCG_MAX_INSNS;
2943 if (in_superpage(&ctx, pc_start)) {
2944 pc_mask = (1ULL << 41) - 1;
2945 } else {
2946 pc_mask = ~TARGET_PAGE_MASK;
2949 gen_tb_start(tb);
2950 do {
2951 tcg_gen_insn_start(ctx.pc);
2952 num_insns++;
2954 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2955 ret = gen_excp(&ctx, EXCP_DEBUG, 0);
2956 /* The address covered by the breakpoint must be included in
2957 [tb->pc, tb->pc + tb->size) in order to for it to be
2958 properly cleared -- thus we increment the PC here so that
2959 the logic setting tb->size below does the right thing. */
2960 ctx.pc += 4;
2961 break;
2963 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2964 gen_io_start();
2966 insn = cpu_ldl_code(env, ctx.pc);
2968 ctx.pc += 4;
2969 ret = translate_one(ctxp, insn);
2970 free_context_temps(ctxp);
2972 /* If we reach a page boundary, are single stepping,
2973 or exhaust instruction count, stop generation. */
2974 if (ret == NO_EXIT
2975 && ((ctx.pc & pc_mask) == 0
2976 || tcg_op_buf_full()
2977 || num_insns >= max_insns
2978 || singlestep
2979 || ctx.singlestep_enabled)) {
2980 ret = EXIT_PC_STALE;
2982 } while (ret == NO_EXIT);
2984 if (tb->cflags & CF_LAST_IO) {
2985 gen_io_end();
2988 switch (ret) {
2989 case EXIT_GOTO_TB:
2990 case EXIT_NORETURN:
2991 break;
2992 case EXIT_PC_STALE:
2993 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2994 /* FALLTHRU */
2995 case EXIT_PC_UPDATED:
2996 if (ctx.singlestep_enabled) {
2997 gen_excp_1(EXCP_DEBUG, 0);
2998 } else {
2999 tcg_gen_exit_tb(0);
3001 break;
3002 default:
3003 abort();
3006 gen_tb_end(tb, num_insns);
3008 tb->size = ctx.pc - pc_start;
3009 tb->icount = num_insns;
3011 #ifdef DEBUG_DISAS
3012 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3013 && qemu_log_in_addr_range(pc_start)) {
3014 qemu_log_lock();
3015 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3016 log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
3017 qemu_log("\n");
3018 qemu_log_unlock();
3020 #endif
3023 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3024 target_ulong *data)
3026 env->pc = data[0];