Merge remote-tracking branch 'remotes/gkurz/tags/for-upstream' into staging
[qemu/ar7.git] / target / alpha / translate.c
blobdf5d695344d060711662d2473ff56382edeb4314
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 # define LOG_DISAS(...) do { } while (0)
43 #endif
45 typedef struct DisasContext DisasContext;
46 struct DisasContext {
47 struct TranslationBlock *tb;
48 uint64_t pc;
49 #ifndef CONFIG_USER_ONLY
50 uint64_t palbr;
51 #endif
52 int mem_idx;
54 /* Current rounding mode for this TB. */
55 int tb_rm;
56 /* Current flush-to-zero setting for this TB. */
57 int tb_ftz;
59 /* implver value for this CPU. */
60 int implver;
62 /* The set of registers active in the current context. */
63 TCGv *ir;
65 /* Temporaries for $31 and $f31 as source and destination. */
66 TCGv zero;
67 TCGv sink;
68 /* Temporary for immediate constants. */
69 TCGv lit;
71 bool singlestep_enabled;
74 /* Return values from translate_one, indicating the state of the TB.
75 Note that zero indicates that we are not exiting the TB. */
77 typedef enum {
78 NO_EXIT,
80 /* We have emitted one or more goto_tb. No fixup required. */
81 EXIT_GOTO_TB,
83 /* We are not using a goto_tb (for whatever reason), but have updated
84 the PC (for whatever reason), so there's no need to do it again on
85 exiting the TB. */
86 EXIT_PC_UPDATED,
88 /* We are exiting the TB, but have neither emitted a goto_tb, nor
89 updated the PC for the next instruction to be executed. */
90 EXIT_PC_STALE,
92 /* We are ending the TB with a noreturn function call, e.g. longjmp.
93 No following code will be executed. */
94 EXIT_NORETURN,
95 } ExitStatus;
97 /* global register indexes */
98 static TCGv_env cpu_env;
99 static TCGv cpu_std_ir[31];
100 static TCGv cpu_fir[31];
101 static TCGv cpu_pc;
102 static TCGv cpu_lock_addr;
103 static TCGv cpu_lock_value;
105 #ifndef CONFIG_USER_ONLY
106 static TCGv cpu_pal_ir[31];
107 #endif
109 #include "exec/gen-icount.h"
111 void alpha_translate_init(void)
113 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
115 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
116 static const GlobalVar vars[] = {
117 DEF_VAR(pc),
118 DEF_VAR(lock_addr),
119 DEF_VAR(lock_value),
122 #undef DEF_VAR
124 /* Use the symbolic register names that match the disassembler. */
125 static const char greg_names[31][4] = {
126 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
127 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
128 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
129 "t10", "t11", "ra", "t12", "at", "gp", "sp"
131 static const char freg_names[31][4] = {
132 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
133 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
134 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
135 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
137 #ifndef CONFIG_USER_ONLY
138 static const char shadow_names[8][8] = {
139 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
140 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
142 #endif
144 static bool done_init = 0;
145 int i;
147 if (done_init) {
148 return;
150 done_init = 1;
152 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
153 tcg_ctx.tcg_env = cpu_env;
155 for (i = 0; i < 31; i++) {
156 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
157 offsetof(CPUAlphaState, ir[i]),
158 greg_names[i]);
161 for (i = 0; i < 31; i++) {
162 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
163 offsetof(CPUAlphaState, fir[i]),
164 freg_names[i]);
167 #ifndef CONFIG_USER_ONLY
168 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
169 for (i = 0; i < 8; i++) {
170 int r = (i == 7 ? 25 : i + 8);
171 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
172 offsetof(CPUAlphaState,
173 shadow[i]),
174 shadow_names[i]);
176 #endif
178 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
179 const GlobalVar *v = &vars[i];
180 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
184 static TCGv load_zero(DisasContext *ctx)
186 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
187 ctx->zero = tcg_const_i64(0);
189 return ctx->zero;
192 static TCGv dest_sink(DisasContext *ctx)
194 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
195 ctx->sink = tcg_temp_new();
197 return ctx->sink;
200 static void free_context_temps(DisasContext *ctx)
202 if (!TCGV_IS_UNUSED_I64(ctx->sink)) {
203 tcg_gen_discard_i64(ctx->sink);
204 tcg_temp_free(ctx->sink);
205 TCGV_UNUSED_I64(ctx->sink);
207 if (!TCGV_IS_UNUSED_I64(ctx->zero)) {
208 tcg_temp_free(ctx->zero);
209 TCGV_UNUSED_I64(ctx->zero);
211 if (!TCGV_IS_UNUSED_I64(ctx->lit)) {
212 tcg_temp_free(ctx->lit);
213 TCGV_UNUSED_I64(ctx->lit);
217 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
219 if (likely(reg < 31)) {
220 return ctx->ir[reg];
221 } else {
222 return load_zero(ctx);
226 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
227 uint8_t lit, bool islit)
229 if (islit) {
230 ctx->lit = tcg_const_i64(lit);
231 return ctx->lit;
232 } else if (likely(reg < 31)) {
233 return ctx->ir[reg];
234 } else {
235 return load_zero(ctx);
239 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
241 if (likely(reg < 31)) {
242 return ctx->ir[reg];
243 } else {
244 return dest_sink(ctx);
248 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
250 if (likely(reg < 31)) {
251 return cpu_fir[reg];
252 } else {
253 return load_zero(ctx);
257 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
259 if (likely(reg < 31)) {
260 return cpu_fir[reg];
261 } else {
262 return dest_sink(ctx);
266 static void gen_excp_1(int exception, int error_code)
268 TCGv_i32 tmp1, tmp2;
270 tmp1 = tcg_const_i32(exception);
271 tmp2 = tcg_const_i32(error_code);
272 gen_helper_excp(cpu_env, tmp1, tmp2);
273 tcg_temp_free_i32(tmp2);
274 tcg_temp_free_i32(tmp1);
277 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
279 tcg_gen_movi_i64(cpu_pc, ctx->pc);
280 gen_excp_1(exception, error_code);
281 return EXIT_NORETURN;
284 static inline ExitStatus gen_invalid(DisasContext *ctx)
286 return gen_excp(ctx, EXCP_OPCDEC, 0);
289 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
291 TCGv_i32 tmp32 = tcg_temp_new_i32();
292 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
293 gen_helper_memory_to_f(t0, tmp32);
294 tcg_temp_free_i32(tmp32);
297 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
299 TCGv tmp = tcg_temp_new();
300 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
301 gen_helper_memory_to_g(t0, tmp);
302 tcg_temp_free(tmp);
305 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
307 TCGv_i32 tmp32 = tcg_temp_new_i32();
308 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
309 gen_helper_memory_to_s(t0, tmp32);
310 tcg_temp_free_i32(tmp32);
313 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
315 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
316 tcg_gen_mov_i64(cpu_lock_addr, t1);
317 tcg_gen_mov_i64(cpu_lock_value, t0);
320 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
322 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
323 tcg_gen_mov_i64(cpu_lock_addr, t1);
324 tcg_gen_mov_i64(cpu_lock_value, t0);
327 static inline void gen_load_mem(DisasContext *ctx,
328 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
329 int flags),
330 int ra, int rb, int32_t disp16, bool fp,
331 bool clear)
333 TCGv tmp, addr, va;
335 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
336 prefetches, which we can treat as nops. No worries about
337 missed exceptions here. */
338 if (unlikely(ra == 31)) {
339 return;
342 tmp = tcg_temp_new();
343 addr = load_gpr(ctx, rb);
345 if (disp16) {
346 tcg_gen_addi_i64(tmp, addr, disp16);
347 addr = tmp;
349 if (clear) {
350 tcg_gen_andi_i64(tmp, addr, ~0x7);
351 addr = tmp;
354 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
355 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
357 tcg_temp_free(tmp);
360 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
362 TCGv_i32 tmp32 = tcg_temp_new_i32();
363 gen_helper_f_to_memory(tmp32, t0);
364 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
365 tcg_temp_free_i32(tmp32);
368 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
370 TCGv tmp = tcg_temp_new();
371 gen_helper_g_to_memory(tmp, t0);
372 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
373 tcg_temp_free(tmp);
376 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
378 TCGv_i32 tmp32 = tcg_temp_new_i32();
379 gen_helper_s_to_memory(tmp32, t0);
380 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
381 tcg_temp_free_i32(tmp32);
384 static inline void gen_store_mem(DisasContext *ctx,
385 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
386 int flags),
387 int ra, int rb, int32_t disp16, bool fp,
388 bool clear)
390 TCGv tmp, addr, va;
392 tmp = tcg_temp_new();
393 addr = load_gpr(ctx, rb);
395 if (disp16) {
396 tcg_gen_addi_i64(tmp, addr, disp16);
397 addr = tmp;
399 if (clear) {
400 tcg_gen_andi_i64(tmp, addr, ~0x7);
401 addr = tmp;
404 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
405 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
407 tcg_temp_free(tmp);
410 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
411 int32_t disp16, int mem_idx,
412 TCGMemOp op)
414 TCGLabel *lab_fail, *lab_done;
415 TCGv addr, val;
417 addr = tcg_temp_new_i64();
418 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
419 free_context_temps(ctx);
421 lab_fail = gen_new_label();
422 lab_done = gen_new_label();
423 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
424 tcg_temp_free_i64(addr);
426 val = tcg_temp_new_i64();
427 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
428 load_gpr(ctx, ra), mem_idx, op);
429 free_context_temps(ctx);
431 if (ra != 31) {
432 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
434 tcg_temp_free_i64(val);
435 tcg_gen_br(lab_done);
437 gen_set_label(lab_fail);
438 if (ra != 31) {
439 tcg_gen_movi_i64(ctx->ir[ra], 0);
442 gen_set_label(lab_done);
443 tcg_gen_movi_i64(cpu_lock_addr, -1);
444 return NO_EXIT;
447 static bool in_superpage(DisasContext *ctx, int64_t addr)
449 #ifndef CONFIG_USER_ONLY
450 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
451 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
452 && ((addr >> 41) & 3) == 2);
453 #else
454 return false;
455 #endif
458 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
460 /* Suppress goto_tb in the case of single-steping and IO. */
461 if ((ctx->tb->cflags & CF_LAST_IO)
462 || ctx->singlestep_enabled || singlestep) {
463 return false;
465 #ifndef CONFIG_USER_ONLY
466 /* If the destination is in the superpage, the page perms can't change. */
467 if (in_superpage(ctx, dest)) {
468 return true;
470 /* Check for the dest on the same page as the start of the TB. */
471 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
472 #else
473 return true;
474 #endif
477 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
479 uint64_t dest = ctx->pc + (disp << 2);
481 if (ra != 31) {
482 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
485 /* Notice branch-to-next; used to initialize RA with the PC. */
486 if (disp == 0) {
487 return 0;
488 } else if (use_goto_tb(ctx, dest)) {
489 tcg_gen_goto_tb(0);
490 tcg_gen_movi_i64(cpu_pc, dest);
491 tcg_gen_exit_tb((uintptr_t)ctx->tb);
492 return EXIT_GOTO_TB;
493 } else {
494 tcg_gen_movi_i64(cpu_pc, dest);
495 return EXIT_PC_UPDATED;
499 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
500 TCGv cmp, int32_t disp)
502 uint64_t dest = ctx->pc + (disp << 2);
503 TCGLabel *lab_true = gen_new_label();
505 if (use_goto_tb(ctx, dest)) {
506 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
508 tcg_gen_goto_tb(0);
509 tcg_gen_movi_i64(cpu_pc, ctx->pc);
510 tcg_gen_exit_tb((uintptr_t)ctx->tb);
512 gen_set_label(lab_true);
513 tcg_gen_goto_tb(1);
514 tcg_gen_movi_i64(cpu_pc, dest);
515 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
517 return EXIT_GOTO_TB;
518 } else {
519 TCGv_i64 z = tcg_const_i64(0);
520 TCGv_i64 d = tcg_const_i64(dest);
521 TCGv_i64 p = tcg_const_i64(ctx->pc);
523 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
525 tcg_temp_free_i64(z);
526 tcg_temp_free_i64(d);
527 tcg_temp_free_i64(p);
528 return EXIT_PC_UPDATED;
532 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
533 int32_t disp, int mask)
535 TCGv cmp_tmp;
537 if (mask) {
538 cmp_tmp = tcg_temp_new();
539 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
540 } else {
541 cmp_tmp = load_gpr(ctx, ra);
544 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
547 /* Fold -0.0 for comparison with COND. */
549 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
551 uint64_t mzero = 1ull << 63;
553 switch (cond) {
554 case TCG_COND_LE:
555 case TCG_COND_GT:
556 /* For <= or >, the -0.0 value directly compares the way we want. */
557 tcg_gen_mov_i64(dest, src);
558 break;
560 case TCG_COND_EQ:
561 case TCG_COND_NE:
562 /* For == or !=, we can simply mask off the sign bit and compare. */
563 tcg_gen_andi_i64(dest, src, mzero - 1);
564 break;
566 case TCG_COND_GE:
567 case TCG_COND_LT:
568 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
569 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
570 tcg_gen_neg_i64(dest, dest);
571 tcg_gen_and_i64(dest, dest, src);
572 break;
574 default:
575 abort();
579 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
580 int32_t disp)
582 TCGv cmp_tmp = tcg_temp_new();
583 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
584 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
587 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
589 TCGv_i64 va, vb, z;
591 z = load_zero(ctx);
592 vb = load_fpr(ctx, rb);
593 va = tcg_temp_new();
594 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
596 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
598 tcg_temp_free(va);
601 #define QUAL_RM_N 0x080 /* Round mode nearest even */
602 #define QUAL_RM_C 0x000 /* Round mode chopped */
603 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
604 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
605 #define QUAL_RM_MASK 0x0c0
607 #define QUAL_U 0x100 /* Underflow enable (fp output) */
608 #define QUAL_V 0x100 /* Overflow enable (int output) */
609 #define QUAL_S 0x400 /* Software completion enable */
610 #define QUAL_I 0x200 /* Inexact detection enable */
612 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
614 TCGv_i32 tmp;
616 fn11 &= QUAL_RM_MASK;
617 if (fn11 == ctx->tb_rm) {
618 return;
620 ctx->tb_rm = fn11;
622 tmp = tcg_temp_new_i32();
623 switch (fn11) {
624 case QUAL_RM_N:
625 tcg_gen_movi_i32(tmp, float_round_nearest_even);
626 break;
627 case QUAL_RM_C:
628 tcg_gen_movi_i32(tmp, float_round_to_zero);
629 break;
630 case QUAL_RM_M:
631 tcg_gen_movi_i32(tmp, float_round_down);
632 break;
633 case QUAL_RM_D:
634 tcg_gen_ld8u_i32(tmp, cpu_env,
635 offsetof(CPUAlphaState, fpcr_dyn_round));
636 break;
639 #if defined(CONFIG_SOFTFLOAT_INLINE)
640 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
641 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
642 sets the one field. */
643 tcg_gen_st8_i32(tmp, cpu_env,
644 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
645 #else
646 gen_helper_setroundmode(tmp);
647 #endif
649 tcg_temp_free_i32(tmp);
652 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
654 TCGv_i32 tmp;
656 fn11 &= QUAL_U;
657 if (fn11 == ctx->tb_ftz) {
658 return;
660 ctx->tb_ftz = fn11;
662 tmp = tcg_temp_new_i32();
663 if (fn11) {
664 /* Underflow is enabled, use the FPCR setting. */
665 tcg_gen_ld8u_i32(tmp, cpu_env,
666 offsetof(CPUAlphaState, fpcr_flush_to_zero));
667 } else {
668 /* Underflow is disabled, force flush-to-zero. */
669 tcg_gen_movi_i32(tmp, 1);
672 #if defined(CONFIG_SOFTFLOAT_INLINE)
673 tcg_gen_st8_i32(tmp, cpu_env,
674 offsetof(CPUAlphaState, fp_status.flush_to_zero));
675 #else
676 gen_helper_setflushzero(tmp);
677 #endif
679 tcg_temp_free_i32(tmp);
682 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
684 TCGv val;
686 if (unlikely(reg == 31)) {
687 val = load_zero(ctx);
688 } else {
689 val = cpu_fir[reg];
690 if ((fn11 & QUAL_S) == 0) {
691 if (is_cmp) {
692 gen_helper_ieee_input_cmp(cpu_env, val);
693 } else {
694 gen_helper_ieee_input(cpu_env, val);
696 } else {
697 #ifndef CONFIG_USER_ONLY
698 /* In system mode, raise exceptions for denormals like real
699 hardware. In user mode, proceed as if the OS completion
700 handler is handling the denormal as per spec. */
701 gen_helper_ieee_input_s(cpu_env, val);
702 #endif
705 return val;
708 static void gen_fp_exc_raise(int rc, int fn11)
710 /* ??? We ought to be able to do something with imprecise exceptions.
711 E.g. notice we're still in the trap shadow of something within the
712 TB and do not generate the code to signal the exception; end the TB
713 when an exception is forced to arrive, either by consumption of a
714 register value or TRAPB or EXCB. */
715 TCGv_i32 reg, ign;
716 uint32_t ignore = 0;
718 if (!(fn11 & QUAL_U)) {
719 /* Note that QUAL_U == QUAL_V, so ignore either. */
720 ignore |= FPCR_UNF | FPCR_IOV;
722 if (!(fn11 & QUAL_I)) {
723 ignore |= FPCR_INE;
725 ign = tcg_const_i32(ignore);
727 /* ??? Pass in the regno of the destination so that the helper can
728 set EXC_MASK, which contains a bitmask of destination registers
729 that have caused arithmetic traps. A simple userspace emulation
730 does not require this. We do need it for a guest kernel's entArith,
731 or if we were to do something clever with imprecise exceptions. */
732 reg = tcg_const_i32(rc + 32);
733 if (fn11 & QUAL_S) {
734 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
735 } else {
736 gen_helper_fp_exc_raise(cpu_env, ign, reg);
739 tcg_temp_free_i32(reg);
740 tcg_temp_free_i32(ign);
743 static void gen_cvtlq(TCGv vc, TCGv vb)
745 TCGv tmp = tcg_temp_new();
747 /* The arithmetic right shift here, plus the sign-extended mask below
748 yields a sign-extended result without an explicit ext32s_i64. */
749 tcg_gen_sari_i64(tmp, vb, 32);
750 tcg_gen_shri_i64(vc, vb, 29);
751 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
752 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
753 tcg_gen_or_i64(vc, vc, tmp);
755 tcg_temp_free(tmp);
758 static void gen_ieee_arith2(DisasContext *ctx,
759 void (*helper)(TCGv, TCGv_ptr, TCGv),
760 int rb, int rc, int fn11)
762 TCGv vb;
764 gen_qual_roundmode(ctx, fn11);
765 gen_qual_flushzero(ctx, fn11);
767 vb = gen_ieee_input(ctx, rb, fn11, 0);
768 helper(dest_fpr(ctx, rc), cpu_env, vb);
770 gen_fp_exc_raise(rc, fn11);
773 #define IEEE_ARITH2(name) \
774 static inline void glue(gen_, name)(DisasContext *ctx, \
775 int rb, int rc, int fn11) \
777 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
779 IEEE_ARITH2(sqrts)
780 IEEE_ARITH2(sqrtt)
781 IEEE_ARITH2(cvtst)
782 IEEE_ARITH2(cvtts)
784 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
786 TCGv vb, vc;
788 /* No need to set flushzero, since we have an integer output. */
789 vb = gen_ieee_input(ctx, rb, fn11, 0);
790 vc = dest_fpr(ctx, rc);
792 /* Almost all integer conversions use cropped rounding;
793 special case that. */
794 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
795 gen_helper_cvttq_c(vc, cpu_env, vb);
796 } else {
797 gen_qual_roundmode(ctx, fn11);
798 gen_helper_cvttq(vc, cpu_env, vb);
800 gen_fp_exc_raise(rc, fn11);
803 static void gen_ieee_intcvt(DisasContext *ctx,
804 void (*helper)(TCGv, TCGv_ptr, TCGv),
805 int rb, int rc, int fn11)
807 TCGv vb, vc;
809 gen_qual_roundmode(ctx, fn11);
810 vb = load_fpr(ctx, rb);
811 vc = dest_fpr(ctx, rc);
813 /* The only exception that can be raised by integer conversion
814 is inexact. Thus we only need to worry about exceptions when
815 inexact handling is requested. */
816 if (fn11 & QUAL_I) {
817 helper(vc, cpu_env, vb);
818 gen_fp_exc_raise(rc, fn11);
819 } else {
820 helper(vc, cpu_env, vb);
824 #define IEEE_INTCVT(name) \
825 static inline void glue(gen_, name)(DisasContext *ctx, \
826 int rb, int rc, int fn11) \
828 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
830 IEEE_INTCVT(cvtqs)
831 IEEE_INTCVT(cvtqt)
833 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
835 TCGv vmask = tcg_const_i64(mask);
836 TCGv tmp = tcg_temp_new_i64();
838 if (inv_a) {
839 tcg_gen_andc_i64(tmp, vmask, va);
840 } else {
841 tcg_gen_and_i64(tmp, va, vmask);
844 tcg_gen_andc_i64(vc, vb, vmask);
845 tcg_gen_or_i64(vc, vc, tmp);
847 tcg_temp_free(vmask);
848 tcg_temp_free(tmp);
851 static void gen_ieee_arith3(DisasContext *ctx,
852 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
853 int ra, int rb, int rc, int fn11)
855 TCGv va, vb, vc;
857 gen_qual_roundmode(ctx, fn11);
858 gen_qual_flushzero(ctx, fn11);
860 va = gen_ieee_input(ctx, ra, fn11, 0);
861 vb = gen_ieee_input(ctx, rb, fn11, 0);
862 vc = dest_fpr(ctx, rc);
863 helper(vc, cpu_env, va, vb);
865 gen_fp_exc_raise(rc, fn11);
868 #define IEEE_ARITH3(name) \
869 static inline void glue(gen_, name)(DisasContext *ctx, \
870 int ra, int rb, int rc, int fn11) \
872 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
874 IEEE_ARITH3(adds)
875 IEEE_ARITH3(subs)
876 IEEE_ARITH3(muls)
877 IEEE_ARITH3(divs)
878 IEEE_ARITH3(addt)
879 IEEE_ARITH3(subt)
880 IEEE_ARITH3(mult)
881 IEEE_ARITH3(divt)
883 static void gen_ieee_compare(DisasContext *ctx,
884 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
885 int ra, int rb, int rc, int fn11)
887 TCGv va, vb, vc;
889 va = gen_ieee_input(ctx, ra, fn11, 1);
890 vb = gen_ieee_input(ctx, rb, fn11, 1);
891 vc = dest_fpr(ctx, rc);
892 helper(vc, cpu_env, va, vb);
894 gen_fp_exc_raise(rc, fn11);
897 #define IEEE_CMP3(name) \
898 static inline void glue(gen_, name)(DisasContext *ctx, \
899 int ra, int rb, int rc, int fn11) \
901 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
903 IEEE_CMP3(cmptun)
904 IEEE_CMP3(cmpteq)
905 IEEE_CMP3(cmptlt)
906 IEEE_CMP3(cmptle)
908 static inline uint64_t zapnot_mask(uint8_t lit)
910 uint64_t mask = 0;
911 int i;
913 for (i = 0; i < 8; ++i) {
914 if ((lit >> i) & 1) {
915 mask |= 0xffull << (i * 8);
918 return mask;
921 /* Implement zapnot with an immediate operand, which expands to some
922 form of immediate AND. This is a basic building block in the
923 definition of many of the other byte manipulation instructions. */
924 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
926 switch (lit) {
927 case 0x00:
928 tcg_gen_movi_i64(dest, 0);
929 break;
930 case 0x01:
931 tcg_gen_ext8u_i64(dest, src);
932 break;
933 case 0x03:
934 tcg_gen_ext16u_i64(dest, src);
935 break;
936 case 0x0f:
937 tcg_gen_ext32u_i64(dest, src);
938 break;
939 case 0xff:
940 tcg_gen_mov_i64(dest, src);
941 break;
942 default:
943 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
944 break;
948 /* EXTWH, EXTLH, EXTQH */
949 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
950 uint8_t lit, uint8_t byte_mask)
952 if (islit) {
953 int pos = (64 - lit * 8) & 0x3f;
954 int len = cto32(byte_mask) * 8;
955 if (pos < len) {
956 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
957 } else {
958 tcg_gen_movi_i64(vc, 0);
960 } else {
961 TCGv tmp = tcg_temp_new();
962 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
963 tcg_gen_neg_i64(tmp, tmp);
964 tcg_gen_andi_i64(tmp, tmp, 0x3f);
965 tcg_gen_shl_i64(vc, va, tmp);
966 tcg_temp_free(tmp);
968 gen_zapnoti(vc, vc, byte_mask);
971 /* EXTBL, EXTWL, EXTLL, EXTQL */
972 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
973 uint8_t lit, uint8_t byte_mask)
975 if (islit) {
976 int pos = (lit & 7) * 8;
977 int len = cto32(byte_mask) * 8;
978 if (pos + len >= 64) {
979 len = 64 - pos;
981 tcg_gen_extract_i64(vc, va, pos, len);
982 } else {
983 TCGv tmp = tcg_temp_new();
984 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
985 tcg_gen_shli_i64(tmp, tmp, 3);
986 tcg_gen_shr_i64(vc, va, tmp);
987 tcg_temp_free(tmp);
988 gen_zapnoti(vc, vc, byte_mask);
992 /* INSWH, INSLH, INSQH */
993 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
994 uint8_t lit, uint8_t byte_mask)
996 if (islit) {
997 int pos = 64 - (lit & 7) * 8;
998 int len = cto32(byte_mask) * 8;
999 if (pos < len) {
1000 tcg_gen_extract_i64(vc, va, pos, len - pos);
1001 } else {
1002 tcg_gen_movi_i64(vc, 0);
1004 } else {
1005 TCGv tmp = tcg_temp_new();
1006 TCGv shift = tcg_temp_new();
1008 /* The instruction description has us left-shift the byte mask
1009 and extract bits <15:8> and apply that zap at the end. This
1010 is equivalent to simply performing the zap first and shifting
1011 afterward. */
1012 gen_zapnoti(tmp, va, byte_mask);
1014 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1015 portably by splitting the shift into two parts: shift_count-1 and 1.
1016 Arrange for the -1 by using ones-complement instead of
1017 twos-complement in the negation: ~(B * 8) & 63. */
1019 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1020 tcg_gen_not_i64(shift, shift);
1021 tcg_gen_andi_i64(shift, shift, 0x3f);
1023 tcg_gen_shr_i64(vc, tmp, shift);
1024 tcg_gen_shri_i64(vc, vc, 1);
1025 tcg_temp_free(shift);
1026 tcg_temp_free(tmp);
1030 /* INSBL, INSWL, INSLL, INSQL */
1031 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1032 uint8_t lit, uint8_t byte_mask)
1034 if (islit) {
1035 int pos = (lit & 7) * 8;
1036 int len = cto32(byte_mask) * 8;
1037 if (pos + len > 64) {
1038 len = 64 - pos;
1040 tcg_gen_deposit_z_i64(vc, va, pos, len);
1041 } else {
1042 TCGv tmp = tcg_temp_new();
1043 TCGv shift = tcg_temp_new();
1045 /* The instruction description has us left-shift the byte mask
1046 and extract bits <15:8> and apply that zap at the end. This
1047 is equivalent to simply performing the zap first and shifting
1048 afterward. */
1049 gen_zapnoti(tmp, va, byte_mask);
1051 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1052 tcg_gen_shli_i64(shift, shift, 3);
1053 tcg_gen_shl_i64(vc, tmp, shift);
1054 tcg_temp_free(shift);
1055 tcg_temp_free(tmp);
1059 /* MSKWH, MSKLH, MSKQH */
1060 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1061 uint8_t lit, uint8_t byte_mask)
1063 if (islit) {
1064 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1065 } else {
1066 TCGv shift = tcg_temp_new();
1067 TCGv mask = tcg_temp_new();
1069 /* The instruction description is as above, where the byte_mask
1070 is shifted left, and then we extract bits <15:8>. This can be
1071 emulated with a right-shift on the expanded byte mask. This
1072 requires extra care because for an input <2:0> == 0 we need a
1073 shift of 64 bits in order to generate a zero. This is done by
1074 splitting the shift into two parts, the variable shift - 1
1075 followed by a constant 1 shift. The code we expand below is
1076 equivalent to ~(B * 8) & 63. */
1078 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1079 tcg_gen_not_i64(shift, shift);
1080 tcg_gen_andi_i64(shift, shift, 0x3f);
1081 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1082 tcg_gen_shr_i64(mask, mask, shift);
1083 tcg_gen_shri_i64(mask, mask, 1);
1085 tcg_gen_andc_i64(vc, va, mask);
1087 tcg_temp_free(mask);
1088 tcg_temp_free(shift);
1092 /* MSKBL, MSKWL, MSKLL, MSKQL */
1093 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1094 uint8_t lit, uint8_t byte_mask)
1096 if (islit) {
1097 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1098 } else {
1099 TCGv shift = tcg_temp_new();
1100 TCGv mask = tcg_temp_new();
1102 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1103 tcg_gen_shli_i64(shift, shift, 3);
1104 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1105 tcg_gen_shl_i64(mask, mask, shift);
1107 tcg_gen_andc_i64(vc, va, mask);
1109 tcg_temp_free(mask);
1110 tcg_temp_free(shift);
1114 static void gen_rx(DisasContext *ctx, int ra, int set)
1116 TCGv_i32 tmp;
1118 if (ra != 31) {
1119 tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
1120 offsetof(CPUAlphaState, intr_flag));
1123 tmp = tcg_const_i32(set);
1124 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1125 tcg_temp_free_i32(tmp);
1128 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1130 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1131 to internal cpu registers. */
1133 /* Unprivileged PAL call */
1134 if (palcode >= 0x80 && palcode < 0xC0) {
1135 switch (palcode) {
1136 case 0x86:
1137 /* IMB */
1138 /* No-op inside QEMU. */
1139 break;
1140 case 0x9E:
1141 /* RDUNIQUE */
1142 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1143 offsetof(CPUAlphaState, unique));
1144 break;
1145 case 0x9F:
1146 /* WRUNIQUE */
1147 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1148 offsetof(CPUAlphaState, unique));
1149 break;
1150 default:
1151 palcode &= 0xbf;
1152 goto do_call_pal;
1154 return NO_EXIT;
1157 #ifndef CONFIG_USER_ONLY
1158 /* Privileged PAL code */
1159 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1160 switch (palcode) {
1161 case 0x01:
1162 /* CFLUSH */
1163 /* No-op inside QEMU. */
1164 break;
1165 case 0x02:
1166 /* DRAINA */
1167 /* No-op inside QEMU. */
1168 break;
1169 case 0x2D:
1170 /* WRVPTPTR */
1171 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1172 offsetof(CPUAlphaState, vptptr));
1173 break;
1174 case 0x31:
1175 /* WRVAL */
1176 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1177 offsetof(CPUAlphaState, sysval));
1178 break;
1179 case 0x32:
1180 /* RDVAL */
1181 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1182 offsetof(CPUAlphaState, sysval));
1183 break;
1185 case 0x35: {
1186 /* SWPIPL */
1187 TCGv tmp;
1189 /* Note that we already know we're in kernel mode, so we know
1190 that PS only contains the 3 IPL bits. */
1191 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1192 offsetof(CPUAlphaState, ps));
1194 /* But make sure and store only the 3 IPL bits from the user. */
1195 tmp = tcg_temp_new();
1196 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1197 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1198 tcg_temp_free(tmp);
1199 break;
1202 case 0x36:
1203 /* RDPS */
1204 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1205 offsetof(CPUAlphaState, ps));
1206 break;
1207 case 0x38:
1208 /* WRUSP */
1209 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1210 offsetof(CPUAlphaState, usp));
1211 break;
1212 case 0x3A:
1213 /* RDUSP */
1214 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1215 offsetof(CPUAlphaState, usp));
1216 break;
1217 case 0x3C:
1218 /* WHAMI */
1219 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1220 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1221 break;
1223 default:
1224 palcode &= 0x3f;
1225 goto do_call_pal;
1227 return NO_EXIT;
1229 #endif
1230 return gen_invalid(ctx);
1232 do_call_pal:
1233 #ifdef CONFIG_USER_ONLY
1234 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1235 #else
1237 TCGv tmp = tcg_temp_new();
1238 uint64_t exc_addr = ctx->pc;
1239 uint64_t entry = ctx->palbr;
1241 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
1242 exc_addr |= 1;
1243 } else {
1244 tcg_gen_movi_i64(tmp, 1);
1245 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
1248 tcg_gen_movi_i64(tmp, exc_addr);
1249 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1250 tcg_temp_free(tmp);
1252 entry += (palcode & 0x80
1253 ? 0x2000 + (palcode - 0x80) * 64
1254 : 0x1000 + palcode * 64);
1256 /* Since the destination is running in PALmode, we don't really
1257 need the page permissions check. We'll see the existence of
1258 the page when we create the TB, and we'll flush all TBs if
1259 we change the PAL base register. */
1260 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1261 tcg_gen_goto_tb(0);
1262 tcg_gen_movi_i64(cpu_pc, entry);
1263 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1264 return EXIT_GOTO_TB;
1265 } else {
1266 tcg_gen_movi_i64(cpu_pc, entry);
1267 return EXIT_PC_UPDATED;
1270 #endif
1273 #ifndef CONFIG_USER_ONLY
1275 #define PR_BYTE 0x100000
1276 #define PR_LONG 0x200000
1278 static int cpu_pr_data(int pr)
1280 switch (pr) {
1281 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1282 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1283 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1284 case 3: return offsetof(CPUAlphaState, trap_arg0);
1285 case 4: return offsetof(CPUAlphaState, trap_arg1);
1286 case 5: return offsetof(CPUAlphaState, trap_arg2);
1287 case 6: return offsetof(CPUAlphaState, exc_addr);
1288 case 7: return offsetof(CPUAlphaState, palbr);
1289 case 8: return offsetof(CPUAlphaState, ptbr);
1290 case 9: return offsetof(CPUAlphaState, vptptr);
1291 case 10: return offsetof(CPUAlphaState, unique);
1292 case 11: return offsetof(CPUAlphaState, sysval);
1293 case 12: return offsetof(CPUAlphaState, usp);
1295 case 40 ... 63:
1296 return offsetof(CPUAlphaState, scratch[pr - 40]);
1298 case 251:
1299 return offsetof(CPUAlphaState, alarm_expire);
1301 return 0;
1304 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1306 void (*helper)(TCGv);
1307 int data;
1309 switch (regno) {
1310 case 32 ... 39:
1311 /* Accessing the "non-shadow" general registers. */
1312 regno = regno == 39 ? 25 : regno - 32 + 8;
1313 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1314 break;
1316 case 250: /* WALLTIME */
1317 helper = gen_helper_get_walltime;
1318 goto do_helper;
1319 case 249: /* VMTIME */
1320 helper = gen_helper_get_vmtime;
1321 do_helper:
1322 if (use_icount) {
1323 gen_io_start();
1324 helper(va);
1325 gen_io_end();
1326 return EXIT_PC_STALE;
1327 } else {
1328 helper(va);
1330 break;
1332 default:
1333 /* The basic registers are data only, and unknown registers
1334 are read-zero, write-ignore. */
1335 data = cpu_pr_data(regno);
1336 if (data == 0) {
1337 tcg_gen_movi_i64(va, 0);
1338 } else if (data & PR_BYTE) {
1339 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1340 } else if (data & PR_LONG) {
1341 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1342 } else {
1343 tcg_gen_ld_i64(va, cpu_env, data);
1345 break;
1348 return NO_EXIT;
1351 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1353 TCGv tmp;
1354 int data;
1356 switch (regno) {
1357 case 255:
1358 /* TBIA */
1359 gen_helper_tbia(cpu_env);
1360 break;
1362 case 254:
1363 /* TBIS */
1364 gen_helper_tbis(cpu_env, vb);
1365 break;
1367 case 253:
1368 /* WAIT */
1369 tmp = tcg_const_i64(1);
1370 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1371 offsetof(CPUState, halted));
1372 return gen_excp(ctx, EXCP_HLT, 0);
1374 case 252:
1375 /* HALT */
1376 gen_helper_halt(vb);
1377 return EXIT_PC_STALE;
1379 case 251:
1380 /* ALARM */
1381 gen_helper_set_alarm(cpu_env, vb);
1382 break;
1384 case 7:
1385 /* PALBR */
1386 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1387 /* Changing the PAL base register implies un-chaining all of the TBs
1388 that ended with a CALL_PAL. Since the base register usually only
1389 changes during boot, flushing everything works well. */
1390 gen_helper_tb_flush(cpu_env);
1391 return EXIT_PC_STALE;
1393 case 32 ... 39:
1394 /* Accessing the "non-shadow" general registers. */
1395 regno = regno == 39 ? 25 : regno - 32 + 8;
1396 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1397 break;
1399 default:
1400 /* The basic registers are data only, and unknown registers
1401 are read-zero, write-ignore. */
1402 data = cpu_pr_data(regno);
1403 if (data != 0) {
1404 if (data & PR_BYTE) {
1405 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1406 } else if (data & PR_LONG) {
1407 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1408 } else {
1409 tcg_gen_st_i64(vb, cpu_env, data);
1412 break;
1415 return NO_EXIT;
1417 #endif /* !USER_ONLY*/
1419 #define REQUIRE_NO_LIT \
1420 do { \
1421 if (real_islit) { \
1422 goto invalid_opc; \
1424 } while (0)
1426 #define REQUIRE_TB_FLAG(FLAG) \
1427 do { \
1428 if ((ctx->tb->flags & (FLAG)) == 0) { \
1429 goto invalid_opc; \
1431 } while (0)
1433 #define REQUIRE_REG_31(WHICH) \
1434 do { \
1435 if (WHICH != 31) { \
1436 goto invalid_opc; \
1438 } while (0)
1440 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1442 int32_t disp21, disp16, disp12 __attribute__((unused));
1443 uint16_t fn11;
1444 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1445 bool islit, real_islit;
1446 TCGv va, vb, vc, tmp, tmp2;
1447 TCGv_i32 t32;
1448 ExitStatus ret;
1450 /* Decode all instruction fields */
1451 opc = extract32(insn, 26, 6);
1452 ra = extract32(insn, 21, 5);
1453 rb = extract32(insn, 16, 5);
1454 rc = extract32(insn, 0, 5);
1455 real_islit = islit = extract32(insn, 12, 1);
1456 lit = extract32(insn, 13, 8);
1458 disp21 = sextract32(insn, 0, 21);
1459 disp16 = sextract32(insn, 0, 16);
1460 disp12 = sextract32(insn, 0, 12);
1462 fn11 = extract32(insn, 5, 11);
1463 fpfn = extract32(insn, 5, 6);
1464 fn7 = extract32(insn, 5, 7);
1466 if (rb == 31 && !islit) {
1467 islit = true;
1468 lit = 0;
1471 ret = NO_EXIT;
1472 switch (opc) {
1473 case 0x00:
1474 /* CALL_PAL */
1475 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1476 break;
1477 case 0x01:
1478 /* OPC01 */
1479 goto invalid_opc;
1480 case 0x02:
1481 /* OPC02 */
1482 goto invalid_opc;
1483 case 0x03:
1484 /* OPC03 */
1485 goto invalid_opc;
1486 case 0x04:
1487 /* OPC04 */
1488 goto invalid_opc;
1489 case 0x05:
1490 /* OPC05 */
1491 goto invalid_opc;
1492 case 0x06:
1493 /* OPC06 */
1494 goto invalid_opc;
1495 case 0x07:
1496 /* OPC07 */
1497 goto invalid_opc;
1499 case 0x09:
1500 /* LDAH */
1501 disp16 = (uint32_t)disp16 << 16;
1502 /* fall through */
1503 case 0x08:
1504 /* LDA */
1505 va = dest_gpr(ctx, ra);
1506 /* It's worth special-casing immediate loads. */
1507 if (rb == 31) {
1508 tcg_gen_movi_i64(va, disp16);
1509 } else {
1510 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1512 break;
1514 case 0x0A:
1515 /* LDBU */
1516 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1517 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1518 break;
1519 case 0x0B:
1520 /* LDQ_U */
1521 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1522 break;
1523 case 0x0C:
1524 /* LDWU */
1525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1526 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1527 break;
1528 case 0x0D:
1529 /* STW */
1530 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1531 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1532 break;
1533 case 0x0E:
1534 /* STB */
1535 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1536 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1537 break;
1538 case 0x0F:
1539 /* STQ_U */
1540 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1541 break;
1543 case 0x10:
1544 vc = dest_gpr(ctx, rc);
1545 vb = load_gpr_lit(ctx, rb, lit, islit);
1547 if (ra == 31) {
1548 if (fn7 == 0x00) {
1549 /* Special case ADDL as SEXTL. */
1550 tcg_gen_ext32s_i64(vc, vb);
1551 break;
1553 if (fn7 == 0x29) {
1554 /* Special case SUBQ as NEGQ. */
1555 tcg_gen_neg_i64(vc, vb);
1556 break;
1560 va = load_gpr(ctx, ra);
1561 switch (fn7) {
1562 case 0x00:
1563 /* ADDL */
1564 tcg_gen_add_i64(vc, va, vb);
1565 tcg_gen_ext32s_i64(vc, vc);
1566 break;
1567 case 0x02:
1568 /* S4ADDL */
1569 tmp = tcg_temp_new();
1570 tcg_gen_shli_i64(tmp, va, 2);
1571 tcg_gen_add_i64(tmp, tmp, vb);
1572 tcg_gen_ext32s_i64(vc, tmp);
1573 tcg_temp_free(tmp);
1574 break;
1575 case 0x09:
1576 /* SUBL */
1577 tcg_gen_sub_i64(vc, va, vb);
1578 tcg_gen_ext32s_i64(vc, vc);
1579 break;
1580 case 0x0B:
1581 /* S4SUBL */
1582 tmp = tcg_temp_new();
1583 tcg_gen_shli_i64(tmp, va, 2);
1584 tcg_gen_sub_i64(tmp, tmp, vb);
1585 tcg_gen_ext32s_i64(vc, tmp);
1586 tcg_temp_free(tmp);
1587 break;
1588 case 0x0F:
1589 /* CMPBGE */
1590 if (ra == 31) {
1591 /* Special case 0 >= X as X == 0. */
1592 gen_helper_cmpbe0(vc, vb);
1593 } else {
1594 gen_helper_cmpbge(vc, va, vb);
1596 break;
1597 case 0x12:
1598 /* S8ADDL */
1599 tmp = tcg_temp_new();
1600 tcg_gen_shli_i64(tmp, va, 3);
1601 tcg_gen_add_i64(tmp, tmp, vb);
1602 tcg_gen_ext32s_i64(vc, tmp);
1603 tcg_temp_free(tmp);
1604 break;
1605 case 0x1B:
1606 /* S8SUBL */
1607 tmp = tcg_temp_new();
1608 tcg_gen_shli_i64(tmp, va, 3);
1609 tcg_gen_sub_i64(tmp, tmp, vb);
1610 tcg_gen_ext32s_i64(vc, tmp);
1611 tcg_temp_free(tmp);
1612 break;
1613 case 0x1D:
1614 /* CMPULT */
1615 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1616 break;
1617 case 0x20:
1618 /* ADDQ */
1619 tcg_gen_add_i64(vc, va, vb);
1620 break;
1621 case 0x22:
1622 /* S4ADDQ */
1623 tmp = tcg_temp_new();
1624 tcg_gen_shli_i64(tmp, va, 2);
1625 tcg_gen_add_i64(vc, tmp, vb);
1626 tcg_temp_free(tmp);
1627 break;
1628 case 0x29:
1629 /* SUBQ */
1630 tcg_gen_sub_i64(vc, va, vb);
1631 break;
1632 case 0x2B:
1633 /* S4SUBQ */
1634 tmp = tcg_temp_new();
1635 tcg_gen_shli_i64(tmp, va, 2);
1636 tcg_gen_sub_i64(vc, tmp, vb);
1637 tcg_temp_free(tmp);
1638 break;
1639 case 0x2D:
1640 /* CMPEQ */
1641 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1642 break;
1643 case 0x32:
1644 /* S8ADDQ */
1645 tmp = tcg_temp_new();
1646 tcg_gen_shli_i64(tmp, va, 3);
1647 tcg_gen_add_i64(vc, tmp, vb);
1648 tcg_temp_free(tmp);
1649 break;
1650 case 0x3B:
1651 /* S8SUBQ */
1652 tmp = tcg_temp_new();
1653 tcg_gen_shli_i64(tmp, va, 3);
1654 tcg_gen_sub_i64(vc, tmp, vb);
1655 tcg_temp_free(tmp);
1656 break;
1657 case 0x3D:
1658 /* CMPULE */
1659 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1660 break;
1661 case 0x40:
1662 /* ADDL/V */
1663 tmp = tcg_temp_new();
1664 tcg_gen_ext32s_i64(tmp, va);
1665 tcg_gen_ext32s_i64(vc, vb);
1666 tcg_gen_add_i64(tmp, tmp, vc);
1667 tcg_gen_ext32s_i64(vc, tmp);
1668 gen_helper_check_overflow(cpu_env, vc, tmp);
1669 tcg_temp_free(tmp);
1670 break;
1671 case 0x49:
1672 /* SUBL/V */
1673 tmp = tcg_temp_new();
1674 tcg_gen_ext32s_i64(tmp, va);
1675 tcg_gen_ext32s_i64(vc, vb);
1676 tcg_gen_sub_i64(tmp, tmp, vc);
1677 tcg_gen_ext32s_i64(vc, tmp);
1678 gen_helper_check_overflow(cpu_env, vc, tmp);
1679 tcg_temp_free(tmp);
1680 break;
1681 case 0x4D:
1682 /* CMPLT */
1683 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1684 break;
1685 case 0x60:
1686 /* ADDQ/V */
1687 tmp = tcg_temp_new();
1688 tmp2 = tcg_temp_new();
1689 tcg_gen_eqv_i64(tmp, va, vb);
1690 tcg_gen_mov_i64(tmp2, va);
1691 tcg_gen_add_i64(vc, va, vb);
1692 tcg_gen_xor_i64(tmp2, tmp2, vc);
1693 tcg_gen_and_i64(tmp, tmp, tmp2);
1694 tcg_gen_shri_i64(tmp, tmp, 63);
1695 tcg_gen_movi_i64(tmp2, 0);
1696 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1697 tcg_temp_free(tmp);
1698 tcg_temp_free(tmp2);
1699 break;
1700 case 0x69:
1701 /* SUBQ/V */
1702 tmp = tcg_temp_new();
1703 tmp2 = tcg_temp_new();
1704 tcg_gen_xor_i64(tmp, va, vb);
1705 tcg_gen_mov_i64(tmp2, va);
1706 tcg_gen_sub_i64(vc, va, vb);
1707 tcg_gen_xor_i64(tmp2, tmp2, vc);
1708 tcg_gen_and_i64(tmp, tmp, tmp2);
1709 tcg_gen_shri_i64(tmp, tmp, 63);
1710 tcg_gen_movi_i64(tmp2, 0);
1711 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1712 tcg_temp_free(tmp);
1713 tcg_temp_free(tmp2);
1714 break;
1715 case 0x6D:
1716 /* CMPLE */
1717 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1718 break;
1719 default:
1720 goto invalid_opc;
1722 break;
1724 case 0x11:
1725 if (fn7 == 0x20) {
1726 if (rc == 31) {
1727 /* Special case BIS as NOP. */
1728 break;
1730 if (ra == 31) {
1731 /* Special case BIS as MOV. */
1732 vc = dest_gpr(ctx, rc);
1733 if (islit) {
1734 tcg_gen_movi_i64(vc, lit);
1735 } else {
1736 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1738 break;
1742 vc = dest_gpr(ctx, rc);
1743 vb = load_gpr_lit(ctx, rb, lit, islit);
1745 if (fn7 == 0x28 && ra == 31) {
1746 /* Special case ORNOT as NOT. */
1747 tcg_gen_not_i64(vc, vb);
1748 break;
1751 va = load_gpr(ctx, ra);
1752 switch (fn7) {
1753 case 0x00:
1754 /* AND */
1755 tcg_gen_and_i64(vc, va, vb);
1756 break;
1757 case 0x08:
1758 /* BIC */
1759 tcg_gen_andc_i64(vc, va, vb);
1760 break;
1761 case 0x14:
1762 /* CMOVLBS */
1763 tmp = tcg_temp_new();
1764 tcg_gen_andi_i64(tmp, va, 1);
1765 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1766 vb, load_gpr(ctx, rc));
1767 tcg_temp_free(tmp);
1768 break;
1769 case 0x16:
1770 /* CMOVLBC */
1771 tmp = tcg_temp_new();
1772 tcg_gen_andi_i64(tmp, va, 1);
1773 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1774 vb, load_gpr(ctx, rc));
1775 tcg_temp_free(tmp);
1776 break;
1777 case 0x20:
1778 /* BIS */
1779 tcg_gen_or_i64(vc, va, vb);
1780 break;
1781 case 0x24:
1782 /* CMOVEQ */
1783 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1784 vb, load_gpr(ctx, rc));
1785 break;
1786 case 0x26:
1787 /* CMOVNE */
1788 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1789 vb, load_gpr(ctx, rc));
1790 break;
1791 case 0x28:
1792 /* ORNOT */
1793 tcg_gen_orc_i64(vc, va, vb);
1794 break;
1795 case 0x40:
1796 /* XOR */
1797 tcg_gen_xor_i64(vc, va, vb);
1798 break;
1799 case 0x44:
1800 /* CMOVLT */
1801 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1802 vb, load_gpr(ctx, rc));
1803 break;
1804 case 0x46:
1805 /* CMOVGE */
1806 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1807 vb, load_gpr(ctx, rc));
1808 break;
1809 case 0x48:
1810 /* EQV */
1811 tcg_gen_eqv_i64(vc, va, vb);
1812 break;
1813 case 0x61:
1814 /* AMASK */
1815 REQUIRE_REG_31(ra);
1817 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1818 tcg_gen_andi_i64(vc, vb, ~amask);
1820 break;
1821 case 0x64:
1822 /* CMOVLE */
1823 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1824 vb, load_gpr(ctx, rc));
1825 break;
1826 case 0x66:
1827 /* CMOVGT */
1828 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1829 vb, load_gpr(ctx, rc));
1830 break;
1831 case 0x6C:
1832 /* IMPLVER */
1833 REQUIRE_REG_31(ra);
1834 tcg_gen_movi_i64(vc, ctx->implver);
1835 break;
1836 default:
1837 goto invalid_opc;
1839 break;
1841 case 0x12:
1842 vc = dest_gpr(ctx, rc);
1843 va = load_gpr(ctx, ra);
1844 switch (fn7) {
1845 case 0x02:
1846 /* MSKBL */
1847 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1848 break;
1849 case 0x06:
1850 /* EXTBL */
1851 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1852 break;
1853 case 0x0B:
1854 /* INSBL */
1855 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1856 break;
1857 case 0x12:
1858 /* MSKWL */
1859 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1860 break;
1861 case 0x16:
1862 /* EXTWL */
1863 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1864 break;
1865 case 0x1B:
1866 /* INSWL */
1867 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1868 break;
1869 case 0x22:
1870 /* MSKLL */
1871 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1872 break;
1873 case 0x26:
1874 /* EXTLL */
1875 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1876 break;
1877 case 0x2B:
1878 /* INSLL */
1879 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1880 break;
1881 case 0x30:
1882 /* ZAP */
1883 if (islit) {
1884 gen_zapnoti(vc, va, ~lit);
1885 } else {
1886 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1888 break;
1889 case 0x31:
1890 /* ZAPNOT */
1891 if (islit) {
1892 gen_zapnoti(vc, va, lit);
1893 } else {
1894 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1896 break;
1897 case 0x32:
1898 /* MSKQL */
1899 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1900 break;
1901 case 0x34:
1902 /* SRL */
1903 if (islit) {
1904 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1905 } else {
1906 tmp = tcg_temp_new();
1907 vb = load_gpr(ctx, rb);
1908 tcg_gen_andi_i64(tmp, vb, 0x3f);
1909 tcg_gen_shr_i64(vc, va, tmp);
1910 tcg_temp_free(tmp);
1912 break;
1913 case 0x36:
1914 /* EXTQL */
1915 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1916 break;
1917 case 0x39:
1918 /* SLL */
1919 if (islit) {
1920 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1921 } else {
1922 tmp = tcg_temp_new();
1923 vb = load_gpr(ctx, rb);
1924 tcg_gen_andi_i64(tmp, vb, 0x3f);
1925 tcg_gen_shl_i64(vc, va, tmp);
1926 tcg_temp_free(tmp);
1928 break;
1929 case 0x3B:
1930 /* INSQL */
1931 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1932 break;
1933 case 0x3C:
1934 /* SRA */
1935 if (islit) {
1936 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1937 } else {
1938 tmp = tcg_temp_new();
1939 vb = load_gpr(ctx, rb);
1940 tcg_gen_andi_i64(tmp, vb, 0x3f);
1941 tcg_gen_sar_i64(vc, va, tmp);
1942 tcg_temp_free(tmp);
1944 break;
1945 case 0x52:
1946 /* MSKWH */
1947 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1948 break;
1949 case 0x57:
1950 /* INSWH */
1951 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1952 break;
1953 case 0x5A:
1954 /* EXTWH */
1955 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1956 break;
1957 case 0x62:
1958 /* MSKLH */
1959 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1960 break;
1961 case 0x67:
1962 /* INSLH */
1963 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1964 break;
1965 case 0x6A:
1966 /* EXTLH */
1967 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1968 break;
1969 case 0x72:
1970 /* MSKQH */
1971 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1972 break;
1973 case 0x77:
1974 /* INSQH */
1975 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1976 break;
1977 case 0x7A:
1978 /* EXTQH */
1979 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1980 break;
1981 default:
1982 goto invalid_opc;
1984 break;
1986 case 0x13:
1987 vc = dest_gpr(ctx, rc);
1988 vb = load_gpr_lit(ctx, rb, lit, islit);
1989 va = load_gpr(ctx, ra);
1990 switch (fn7) {
1991 case 0x00:
1992 /* MULL */
1993 tcg_gen_mul_i64(vc, va, vb);
1994 tcg_gen_ext32s_i64(vc, vc);
1995 break;
1996 case 0x20:
1997 /* MULQ */
1998 tcg_gen_mul_i64(vc, va, vb);
1999 break;
2000 case 0x30:
2001 /* UMULH */
2002 tmp = tcg_temp_new();
2003 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2004 tcg_temp_free(tmp);
2005 break;
2006 case 0x40:
2007 /* MULL/V */
2008 tmp = tcg_temp_new();
2009 tcg_gen_ext32s_i64(tmp, va);
2010 tcg_gen_ext32s_i64(vc, vb);
2011 tcg_gen_mul_i64(tmp, tmp, vc);
2012 tcg_gen_ext32s_i64(vc, tmp);
2013 gen_helper_check_overflow(cpu_env, vc, tmp);
2014 tcg_temp_free(tmp);
2015 break;
2016 case 0x60:
2017 /* MULQ/V */
2018 tmp = tcg_temp_new();
2019 tmp2 = tcg_temp_new();
2020 tcg_gen_muls2_i64(vc, tmp, va, vb);
2021 tcg_gen_sari_i64(tmp2, vc, 63);
2022 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2023 tcg_temp_free(tmp);
2024 tcg_temp_free(tmp2);
2025 break;
2026 default:
2027 goto invalid_opc;
2029 break;
2031 case 0x14:
2032 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2033 vc = dest_fpr(ctx, rc);
2034 switch (fpfn) { /* fn11 & 0x3F */
2035 case 0x04:
2036 /* ITOFS */
2037 REQUIRE_REG_31(rb);
2038 t32 = tcg_temp_new_i32();
2039 va = load_gpr(ctx, ra);
2040 tcg_gen_extrl_i64_i32(t32, va);
2041 gen_helper_memory_to_s(vc, t32);
2042 tcg_temp_free_i32(t32);
2043 break;
2044 case 0x0A:
2045 /* SQRTF */
2046 REQUIRE_REG_31(ra);
2047 vb = load_fpr(ctx, rb);
2048 gen_helper_sqrtf(vc, cpu_env, vb);
2049 break;
2050 case 0x0B:
2051 /* SQRTS */
2052 REQUIRE_REG_31(ra);
2053 gen_sqrts(ctx, rb, rc, fn11);
2054 break;
2055 case 0x14:
2056 /* ITOFF */
2057 REQUIRE_REG_31(rb);
2058 t32 = tcg_temp_new_i32();
2059 va = load_gpr(ctx, ra);
2060 tcg_gen_extrl_i64_i32(t32, va);
2061 gen_helper_memory_to_f(vc, t32);
2062 tcg_temp_free_i32(t32);
2063 break;
2064 case 0x24:
2065 /* ITOFT */
2066 REQUIRE_REG_31(rb);
2067 va = load_gpr(ctx, ra);
2068 tcg_gen_mov_i64(vc, va);
2069 break;
2070 case 0x2A:
2071 /* SQRTG */
2072 REQUIRE_REG_31(ra);
2073 vb = load_fpr(ctx, rb);
2074 gen_helper_sqrtg(vc, cpu_env, vb);
2075 break;
2076 case 0x02B:
2077 /* SQRTT */
2078 REQUIRE_REG_31(ra);
2079 gen_sqrtt(ctx, rb, rc, fn11);
2080 break;
2081 default:
2082 goto invalid_opc;
2084 break;
2086 case 0x15:
2087 /* VAX floating point */
2088 /* XXX: rounding mode and trap are ignored (!) */
2089 vc = dest_fpr(ctx, rc);
2090 vb = load_fpr(ctx, rb);
2091 va = load_fpr(ctx, ra);
2092 switch (fpfn) { /* fn11 & 0x3F */
2093 case 0x00:
2094 /* ADDF */
2095 gen_helper_addf(vc, cpu_env, va, vb);
2096 break;
2097 case 0x01:
2098 /* SUBF */
2099 gen_helper_subf(vc, cpu_env, va, vb);
2100 break;
2101 case 0x02:
2102 /* MULF */
2103 gen_helper_mulf(vc, cpu_env, va, vb);
2104 break;
2105 case 0x03:
2106 /* DIVF */
2107 gen_helper_divf(vc, cpu_env, va, vb);
2108 break;
2109 case 0x1E:
2110 /* CVTDG -- TODO */
2111 REQUIRE_REG_31(ra);
2112 goto invalid_opc;
2113 case 0x20:
2114 /* ADDG */
2115 gen_helper_addg(vc, cpu_env, va, vb);
2116 break;
2117 case 0x21:
2118 /* SUBG */
2119 gen_helper_subg(vc, cpu_env, va, vb);
2120 break;
2121 case 0x22:
2122 /* MULG */
2123 gen_helper_mulg(vc, cpu_env, va, vb);
2124 break;
2125 case 0x23:
2126 /* DIVG */
2127 gen_helper_divg(vc, cpu_env, va, vb);
2128 break;
2129 case 0x25:
2130 /* CMPGEQ */
2131 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2132 break;
2133 case 0x26:
2134 /* CMPGLT */
2135 gen_helper_cmpglt(vc, cpu_env, va, vb);
2136 break;
2137 case 0x27:
2138 /* CMPGLE */
2139 gen_helper_cmpgle(vc, cpu_env, va, vb);
2140 break;
2141 case 0x2C:
2142 /* CVTGF */
2143 REQUIRE_REG_31(ra);
2144 gen_helper_cvtgf(vc, cpu_env, vb);
2145 break;
2146 case 0x2D:
2147 /* CVTGD -- TODO */
2148 REQUIRE_REG_31(ra);
2149 goto invalid_opc;
2150 case 0x2F:
2151 /* CVTGQ */
2152 REQUIRE_REG_31(ra);
2153 gen_helper_cvtgq(vc, cpu_env, vb);
2154 break;
2155 case 0x3C:
2156 /* CVTQF */
2157 REQUIRE_REG_31(ra);
2158 gen_helper_cvtqf(vc, cpu_env, vb);
2159 break;
2160 case 0x3E:
2161 /* CVTQG */
2162 REQUIRE_REG_31(ra);
2163 gen_helper_cvtqg(vc, cpu_env, vb);
2164 break;
2165 default:
2166 goto invalid_opc;
2168 break;
2170 case 0x16:
2171 /* IEEE floating-point */
2172 switch (fpfn) { /* fn11 & 0x3F */
2173 case 0x00:
2174 /* ADDS */
2175 gen_adds(ctx, ra, rb, rc, fn11);
2176 break;
2177 case 0x01:
2178 /* SUBS */
2179 gen_subs(ctx, ra, rb, rc, fn11);
2180 break;
2181 case 0x02:
2182 /* MULS */
2183 gen_muls(ctx, ra, rb, rc, fn11);
2184 break;
2185 case 0x03:
2186 /* DIVS */
2187 gen_divs(ctx, ra, rb, rc, fn11);
2188 break;
2189 case 0x20:
2190 /* ADDT */
2191 gen_addt(ctx, ra, rb, rc, fn11);
2192 break;
2193 case 0x21:
2194 /* SUBT */
2195 gen_subt(ctx, ra, rb, rc, fn11);
2196 break;
2197 case 0x22:
2198 /* MULT */
2199 gen_mult(ctx, ra, rb, rc, fn11);
2200 break;
2201 case 0x23:
2202 /* DIVT */
2203 gen_divt(ctx, ra, rb, rc, fn11);
2204 break;
2205 case 0x24:
2206 /* CMPTUN */
2207 gen_cmptun(ctx, ra, rb, rc, fn11);
2208 break;
2209 case 0x25:
2210 /* CMPTEQ */
2211 gen_cmpteq(ctx, ra, rb, rc, fn11);
2212 break;
2213 case 0x26:
2214 /* CMPTLT */
2215 gen_cmptlt(ctx, ra, rb, rc, fn11);
2216 break;
2217 case 0x27:
2218 /* CMPTLE */
2219 gen_cmptle(ctx, ra, rb, rc, fn11);
2220 break;
2221 case 0x2C:
2222 REQUIRE_REG_31(ra);
2223 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2224 /* CVTST */
2225 gen_cvtst(ctx, rb, rc, fn11);
2226 } else {
2227 /* CVTTS */
2228 gen_cvtts(ctx, rb, rc, fn11);
2230 break;
2231 case 0x2F:
2232 /* CVTTQ */
2233 REQUIRE_REG_31(ra);
2234 gen_cvttq(ctx, rb, rc, fn11);
2235 break;
2236 case 0x3C:
2237 /* CVTQS */
2238 REQUIRE_REG_31(ra);
2239 gen_cvtqs(ctx, rb, rc, fn11);
2240 break;
2241 case 0x3E:
2242 /* CVTQT */
2243 REQUIRE_REG_31(ra);
2244 gen_cvtqt(ctx, rb, rc, fn11);
2245 break;
2246 default:
2247 goto invalid_opc;
2249 break;
2251 case 0x17:
2252 switch (fn11) {
2253 case 0x010:
2254 /* CVTLQ */
2255 REQUIRE_REG_31(ra);
2256 vc = dest_fpr(ctx, rc);
2257 vb = load_fpr(ctx, rb);
2258 gen_cvtlq(vc, vb);
2259 break;
2260 case 0x020:
2261 /* CPYS */
2262 if (rc == 31) {
2263 /* Special case CPYS as FNOP. */
2264 } else {
2265 vc = dest_fpr(ctx, rc);
2266 va = load_fpr(ctx, ra);
2267 if (ra == rb) {
2268 /* Special case CPYS as FMOV. */
2269 tcg_gen_mov_i64(vc, va);
2270 } else {
2271 vb = load_fpr(ctx, rb);
2272 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2275 break;
2276 case 0x021:
2277 /* CPYSN */
2278 vc = dest_fpr(ctx, rc);
2279 vb = load_fpr(ctx, rb);
2280 va = load_fpr(ctx, ra);
2281 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2282 break;
2283 case 0x022:
2284 /* CPYSE */
2285 vc = dest_fpr(ctx, rc);
2286 vb = load_fpr(ctx, rb);
2287 va = load_fpr(ctx, ra);
2288 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2289 break;
2290 case 0x024:
2291 /* MT_FPCR */
2292 va = load_fpr(ctx, ra);
2293 gen_helper_store_fpcr(cpu_env, va);
2294 if (ctx->tb_rm == QUAL_RM_D) {
2295 /* Re-do the copy of the rounding mode to fp_status
2296 the next time we use dynamic rounding. */
2297 ctx->tb_rm = -1;
2299 break;
2300 case 0x025:
2301 /* MF_FPCR */
2302 va = dest_fpr(ctx, ra);
2303 gen_helper_load_fpcr(va, cpu_env);
2304 break;
2305 case 0x02A:
2306 /* FCMOVEQ */
2307 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2308 break;
2309 case 0x02B:
2310 /* FCMOVNE */
2311 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2312 break;
2313 case 0x02C:
2314 /* FCMOVLT */
2315 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2316 break;
2317 case 0x02D:
2318 /* FCMOVGE */
2319 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2320 break;
2321 case 0x02E:
2322 /* FCMOVLE */
2323 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2324 break;
2325 case 0x02F:
2326 /* FCMOVGT */
2327 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2328 break;
2329 case 0x030: /* CVTQL */
2330 case 0x130: /* CVTQL/V */
2331 case 0x530: /* CVTQL/SV */
2332 REQUIRE_REG_31(ra);
2333 vc = dest_fpr(ctx, rc);
2334 vb = load_fpr(ctx, rb);
2335 gen_helper_cvtql(vc, cpu_env, vb);
2336 gen_fp_exc_raise(rc, fn11);
2337 break;
2338 default:
2339 goto invalid_opc;
2341 break;
2343 case 0x18:
2344 switch ((uint16_t)disp16) {
2345 case 0x0000:
2346 /* TRAPB */
2347 /* No-op. */
2348 break;
2349 case 0x0400:
2350 /* EXCB */
2351 /* No-op. */
2352 break;
2353 case 0x4000:
2354 /* MB */
2355 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2356 break;
2357 case 0x4400:
2358 /* WMB */
2359 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2360 break;
2361 case 0x8000:
2362 /* FETCH */
2363 /* No-op */
2364 break;
2365 case 0xA000:
2366 /* FETCH_M */
2367 /* No-op */
2368 break;
2369 case 0xC000:
2370 /* RPCC */
2371 va = dest_gpr(ctx, ra);
2372 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2373 gen_io_start();
2374 gen_helper_load_pcc(va, cpu_env);
2375 gen_io_end();
2376 ret = EXIT_PC_STALE;
2377 } else {
2378 gen_helper_load_pcc(va, cpu_env);
2380 break;
2381 case 0xE000:
2382 /* RC */
2383 gen_rx(ctx, ra, 0);
2384 break;
2385 case 0xE800:
2386 /* ECB */
2387 break;
2388 case 0xF000:
2389 /* RS */
2390 gen_rx(ctx, ra, 1);
2391 break;
2392 case 0xF800:
2393 /* WH64 */
2394 /* No-op */
2395 break;
2396 case 0xFC00:
2397 /* WH64EN */
2398 /* No-op */
2399 break;
2400 default:
2401 goto invalid_opc;
2403 break;
2405 case 0x19:
2406 /* HW_MFPR (PALcode) */
2407 #ifndef CONFIG_USER_ONLY
2408 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2409 va = dest_gpr(ctx, ra);
2410 ret = gen_mfpr(ctx, va, insn & 0xffff);
2411 break;
2412 #else
2413 goto invalid_opc;
2414 #endif
2416 case 0x1A:
2417 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2418 prediction stack action, which of course we don't implement. */
2419 vb = load_gpr(ctx, rb);
2420 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2421 if (ra != 31) {
2422 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
2424 ret = EXIT_PC_UPDATED;
2425 break;
2427 case 0x1B:
2428 /* HW_LD (PALcode) */
2429 #ifndef CONFIG_USER_ONLY
2430 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2432 TCGv addr = tcg_temp_new();
2433 vb = load_gpr(ctx, rb);
2434 va = dest_gpr(ctx, ra);
2436 tcg_gen_addi_i64(addr, vb, disp12);
2437 switch ((insn >> 12) & 0xF) {
2438 case 0x0:
2439 /* Longword physical access (hw_ldl/p) */
2440 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2441 break;
2442 case 0x1:
2443 /* Quadword physical access (hw_ldq/p) */
2444 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2445 break;
2446 case 0x2:
2447 /* Longword physical access with lock (hw_ldl_l/p) */
2448 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2449 break;
2450 case 0x3:
2451 /* Quadword physical access with lock (hw_ldq_l/p) */
2452 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2453 break;
2454 case 0x4:
2455 /* Longword virtual PTE fetch (hw_ldl/v) */
2456 goto invalid_opc;
2457 case 0x5:
2458 /* Quadword virtual PTE fetch (hw_ldq/v) */
2459 goto invalid_opc;
2460 break;
2461 case 0x6:
2462 /* Invalid */
2463 goto invalid_opc;
2464 case 0x7:
2465 /* Invaliid */
2466 goto invalid_opc;
2467 case 0x8:
2468 /* Longword virtual access (hw_ldl) */
2469 goto invalid_opc;
2470 case 0x9:
2471 /* Quadword virtual access (hw_ldq) */
2472 goto invalid_opc;
2473 case 0xA:
2474 /* Longword virtual access with protection check (hw_ldl/w) */
2475 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2476 break;
2477 case 0xB:
2478 /* Quadword virtual access with protection check (hw_ldq/w) */
2479 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2480 break;
2481 case 0xC:
2482 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2483 goto invalid_opc;
2484 case 0xD:
2485 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2486 goto invalid_opc;
2487 case 0xE:
2488 /* Longword virtual access with alternate access mode and
2489 protection checks (hw_ldl/wa) */
2490 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2491 break;
2492 case 0xF:
2493 /* Quadword virtual access with alternate access mode and
2494 protection checks (hw_ldq/wa) */
2495 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2496 break;
2498 tcg_temp_free(addr);
2499 break;
2501 #else
2502 goto invalid_opc;
2503 #endif
2505 case 0x1C:
2506 vc = dest_gpr(ctx, rc);
2507 if (fn7 == 0x70) {
2508 /* FTOIT */
2509 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2510 REQUIRE_REG_31(rb);
2511 va = load_fpr(ctx, ra);
2512 tcg_gen_mov_i64(vc, va);
2513 break;
2514 } else if (fn7 == 0x78) {
2515 /* FTOIS */
2516 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2517 REQUIRE_REG_31(rb);
2518 t32 = tcg_temp_new_i32();
2519 va = load_fpr(ctx, ra);
2520 gen_helper_s_to_memory(t32, va);
2521 tcg_gen_ext_i32_i64(vc, t32);
2522 tcg_temp_free_i32(t32);
2523 break;
2526 vb = load_gpr_lit(ctx, rb, lit, islit);
2527 switch (fn7) {
2528 case 0x00:
2529 /* SEXTB */
2530 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2531 REQUIRE_REG_31(ra);
2532 tcg_gen_ext8s_i64(vc, vb);
2533 break;
2534 case 0x01:
2535 /* SEXTW */
2536 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2537 REQUIRE_REG_31(ra);
2538 tcg_gen_ext16s_i64(vc, vb);
2539 break;
2540 case 0x30:
2541 /* CTPOP */
2542 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2543 REQUIRE_REG_31(ra);
2544 REQUIRE_NO_LIT;
2545 tcg_gen_ctpop_i64(vc, vb);
2546 break;
2547 case 0x31:
2548 /* PERR */
2549 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2550 REQUIRE_NO_LIT;
2551 va = load_gpr(ctx, ra);
2552 gen_helper_perr(vc, va, vb);
2553 break;
2554 case 0x32:
2555 /* CTLZ */
2556 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2557 REQUIRE_REG_31(ra);
2558 REQUIRE_NO_LIT;
2559 tcg_gen_clzi_i64(vc, vb, 64);
2560 break;
2561 case 0x33:
2562 /* CTTZ */
2563 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2564 REQUIRE_REG_31(ra);
2565 REQUIRE_NO_LIT;
2566 tcg_gen_ctzi_i64(vc, vb, 64);
2567 break;
2568 case 0x34:
2569 /* UNPKBW */
2570 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2571 REQUIRE_REG_31(ra);
2572 REQUIRE_NO_LIT;
2573 gen_helper_unpkbw(vc, vb);
2574 break;
2575 case 0x35:
2576 /* UNPKBL */
2577 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2578 REQUIRE_REG_31(ra);
2579 REQUIRE_NO_LIT;
2580 gen_helper_unpkbl(vc, vb);
2581 break;
2582 case 0x36:
2583 /* PKWB */
2584 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2585 REQUIRE_REG_31(ra);
2586 REQUIRE_NO_LIT;
2587 gen_helper_pkwb(vc, vb);
2588 break;
2589 case 0x37:
2590 /* PKLB */
2591 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2592 REQUIRE_REG_31(ra);
2593 REQUIRE_NO_LIT;
2594 gen_helper_pklb(vc, vb);
2595 break;
2596 case 0x38:
2597 /* MINSB8 */
2598 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2599 va = load_gpr(ctx, ra);
2600 gen_helper_minsb8(vc, va, vb);
2601 break;
2602 case 0x39:
2603 /* MINSW4 */
2604 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2605 va = load_gpr(ctx, ra);
2606 gen_helper_minsw4(vc, va, vb);
2607 break;
2608 case 0x3A:
2609 /* MINUB8 */
2610 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2611 va = load_gpr(ctx, ra);
2612 gen_helper_minub8(vc, va, vb);
2613 break;
2614 case 0x3B:
2615 /* MINUW4 */
2616 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2617 va = load_gpr(ctx, ra);
2618 gen_helper_minuw4(vc, va, vb);
2619 break;
2620 case 0x3C:
2621 /* MAXUB8 */
2622 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2623 va = load_gpr(ctx, ra);
2624 gen_helper_maxub8(vc, va, vb);
2625 break;
2626 case 0x3D:
2627 /* MAXUW4 */
2628 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2629 va = load_gpr(ctx, ra);
2630 gen_helper_maxuw4(vc, va, vb);
2631 break;
2632 case 0x3E:
2633 /* MAXSB8 */
2634 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2635 va = load_gpr(ctx, ra);
2636 gen_helper_maxsb8(vc, va, vb);
2637 break;
2638 case 0x3F:
2639 /* MAXSW4 */
2640 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2641 va = load_gpr(ctx, ra);
2642 gen_helper_maxsw4(vc, va, vb);
2643 break;
2644 default:
2645 goto invalid_opc;
2647 break;
2649 case 0x1D:
2650 /* HW_MTPR (PALcode) */
2651 #ifndef CONFIG_USER_ONLY
2652 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2653 vb = load_gpr(ctx, rb);
2654 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2655 break;
2656 #else
2657 goto invalid_opc;
2658 #endif
2660 case 0x1E:
2661 /* HW_RET (PALcode) */
2662 #ifndef CONFIG_USER_ONLY
2663 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2664 if (rb == 31) {
2665 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2666 address from EXC_ADDR. This turns out to be useful for our
2667 emulation PALcode, so continue to accept it. */
2668 ctx->lit = vb = tcg_temp_new();
2669 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2670 } else {
2671 vb = load_gpr(ctx, rb);
2673 tmp = tcg_temp_new();
2674 tcg_gen_movi_i64(tmp, 0);
2675 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
2676 tcg_gen_movi_i64(cpu_lock_addr, -1);
2677 tcg_gen_andi_i64(tmp, vb, 1);
2678 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
2679 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2680 ret = EXIT_PC_UPDATED;
2681 break;
2682 #else
2683 goto invalid_opc;
2684 #endif
2686 case 0x1F:
2687 /* HW_ST (PALcode) */
2688 #ifndef CONFIG_USER_ONLY
2689 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2691 switch ((insn >> 12) & 0xF) {
2692 case 0x0:
2693 /* Longword physical access */
2694 va = load_gpr(ctx, ra);
2695 vb = load_gpr(ctx, rb);
2696 tmp = tcg_temp_new();
2697 tcg_gen_addi_i64(tmp, vb, disp12);
2698 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2699 tcg_temp_free(tmp);
2700 break;
2701 case 0x1:
2702 /* Quadword physical access */
2703 va = load_gpr(ctx, ra);
2704 vb = load_gpr(ctx, rb);
2705 tmp = tcg_temp_new();
2706 tcg_gen_addi_i64(tmp, vb, disp12);
2707 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2708 tcg_temp_free(tmp);
2709 break;
2710 case 0x2:
2711 /* Longword physical access with lock */
2712 ret = gen_store_conditional(ctx, ra, rb, disp12,
2713 MMU_PHYS_IDX, MO_LESL);
2714 break;
2715 case 0x3:
2716 /* Quadword physical access with lock */
2717 ret = gen_store_conditional(ctx, ra, rb, disp12,
2718 MMU_PHYS_IDX, MO_LEQ);
2719 break;
2720 case 0x4:
2721 /* Longword virtual access */
2722 goto invalid_opc;
2723 case 0x5:
2724 /* Quadword virtual access */
2725 goto invalid_opc;
2726 case 0x6:
2727 /* Invalid */
2728 goto invalid_opc;
2729 case 0x7:
2730 /* Invalid */
2731 goto invalid_opc;
2732 case 0x8:
2733 /* Invalid */
2734 goto invalid_opc;
2735 case 0x9:
2736 /* Invalid */
2737 goto invalid_opc;
2738 case 0xA:
2739 /* Invalid */
2740 goto invalid_opc;
2741 case 0xB:
2742 /* Invalid */
2743 goto invalid_opc;
2744 case 0xC:
2745 /* Longword virtual access with alternate access mode */
2746 goto invalid_opc;
2747 case 0xD:
2748 /* Quadword virtual access with alternate access mode */
2749 goto invalid_opc;
2750 case 0xE:
2751 /* Invalid */
2752 goto invalid_opc;
2753 case 0xF:
2754 /* Invalid */
2755 goto invalid_opc;
2757 break;
2759 #else
2760 goto invalid_opc;
2761 #endif
2762 case 0x20:
2763 /* LDF */
2764 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2765 break;
2766 case 0x21:
2767 /* LDG */
2768 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2769 break;
2770 case 0x22:
2771 /* LDS */
2772 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2773 break;
2774 case 0x23:
2775 /* LDT */
2776 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2777 break;
2778 case 0x24:
2779 /* STF */
2780 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2781 break;
2782 case 0x25:
2783 /* STG */
2784 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2785 break;
2786 case 0x26:
2787 /* STS */
2788 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2789 break;
2790 case 0x27:
2791 /* STT */
2792 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2793 break;
2794 case 0x28:
2795 /* LDL */
2796 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2797 break;
2798 case 0x29:
2799 /* LDQ */
2800 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2801 break;
2802 case 0x2A:
2803 /* LDL_L */
2804 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2805 break;
2806 case 0x2B:
2807 /* LDQ_L */
2808 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2809 break;
2810 case 0x2C:
2811 /* STL */
2812 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2813 break;
2814 case 0x2D:
2815 /* STQ */
2816 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2817 break;
2818 case 0x2E:
2819 /* STL_C */
2820 ret = gen_store_conditional(ctx, ra, rb, disp16,
2821 ctx->mem_idx, MO_LESL);
2822 break;
2823 case 0x2F:
2824 /* STQ_C */
2825 ret = gen_store_conditional(ctx, ra, rb, disp16,
2826 ctx->mem_idx, MO_LEQ);
2827 break;
2828 case 0x30:
2829 /* BR */
2830 ret = gen_bdirect(ctx, ra, disp21);
2831 break;
2832 case 0x31: /* FBEQ */
2833 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2834 break;
2835 case 0x32: /* FBLT */
2836 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2837 break;
2838 case 0x33: /* FBLE */
2839 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2840 break;
2841 case 0x34:
2842 /* BSR */
2843 ret = gen_bdirect(ctx, ra, disp21);
2844 break;
2845 case 0x35: /* FBNE */
2846 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2847 break;
2848 case 0x36: /* FBGE */
2849 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2850 break;
2851 case 0x37: /* FBGT */
2852 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2853 break;
2854 case 0x38:
2855 /* BLBC */
2856 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2857 break;
2858 case 0x39:
2859 /* BEQ */
2860 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2861 break;
2862 case 0x3A:
2863 /* BLT */
2864 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2865 break;
2866 case 0x3B:
2867 /* BLE */
2868 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2869 break;
2870 case 0x3C:
2871 /* BLBS */
2872 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2873 break;
2874 case 0x3D:
2875 /* BNE */
2876 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2877 break;
2878 case 0x3E:
2879 /* BGE */
2880 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2881 break;
2882 case 0x3F:
2883 /* BGT */
2884 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2885 break;
2886 invalid_opc:
2887 ret = gen_invalid(ctx);
2888 break;
2891 return ret;
2894 void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
2896 AlphaCPU *cpu = alpha_env_get_cpu(env);
2897 CPUState *cs = CPU(cpu);
2898 DisasContext ctx, *ctxp = &ctx;
2899 target_ulong pc_start;
2900 target_ulong pc_mask;
2901 uint32_t insn;
2902 ExitStatus ret;
2903 int num_insns;
2904 int max_insns;
2906 pc_start = tb->pc;
2908 ctx.tb = tb;
2909 ctx.pc = pc_start;
2910 ctx.mem_idx = cpu_mmu_index(env, false);
2911 ctx.implver = env->implver;
2912 ctx.singlestep_enabled = cs->singlestep_enabled;
2914 #ifdef CONFIG_USER_ONLY
2915 ctx.ir = cpu_std_ir;
2916 #else
2917 ctx.palbr = env->palbr;
2918 ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2919 #endif
2921 /* ??? Every TB begins with unset rounding mode, to be initialized on
2922 the first fp insn of the TB. Alternately we could define a proper
2923 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2924 to reset the FP_STATUS to that default at the end of any TB that
2925 changes the default. We could even (gasp) dynamiclly figure out
2926 what default would be most efficient given the running program. */
2927 ctx.tb_rm = -1;
2928 /* Similarly for flush-to-zero. */
2929 ctx.tb_ftz = -1;
2931 TCGV_UNUSED_I64(ctx.zero);
2932 TCGV_UNUSED_I64(ctx.sink);
2933 TCGV_UNUSED_I64(ctx.lit);
2935 num_insns = 0;
2936 max_insns = tb->cflags & CF_COUNT_MASK;
2937 if (max_insns == 0) {
2938 max_insns = CF_COUNT_MASK;
2940 if (max_insns > TCG_MAX_INSNS) {
2941 max_insns = TCG_MAX_INSNS;
2944 if (in_superpage(&ctx, pc_start)) {
2945 pc_mask = (1ULL << 41) - 1;
2946 } else {
2947 pc_mask = ~TARGET_PAGE_MASK;
2950 gen_tb_start(tb);
2951 do {
2952 tcg_gen_insn_start(ctx.pc);
2953 num_insns++;
2955 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2956 ret = gen_excp(&ctx, EXCP_DEBUG, 0);
2957 /* The address covered by the breakpoint must be included in
2958 [tb->pc, tb->pc + tb->size) in order to for it to be
2959 properly cleared -- thus we increment the PC here so that
2960 the logic setting tb->size below does the right thing. */
2961 ctx.pc += 4;
2962 break;
2964 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2965 gen_io_start();
2967 insn = cpu_ldl_code(env, ctx.pc);
2969 ctx.pc += 4;
2970 ret = translate_one(ctxp, insn);
2971 free_context_temps(ctxp);
2973 /* If we reach a page boundary, are single stepping,
2974 or exhaust instruction count, stop generation. */
2975 if (ret == NO_EXIT
2976 && ((ctx.pc & pc_mask) == 0
2977 || tcg_op_buf_full()
2978 || num_insns >= max_insns
2979 || singlestep
2980 || ctx.singlestep_enabled)) {
2981 ret = EXIT_PC_STALE;
2983 } while (ret == NO_EXIT);
2985 if (tb->cflags & CF_LAST_IO) {
2986 gen_io_end();
2989 switch (ret) {
2990 case EXIT_GOTO_TB:
2991 case EXIT_NORETURN:
2992 break;
2993 case EXIT_PC_STALE:
2994 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2995 /* FALLTHRU */
2996 case EXIT_PC_UPDATED:
2997 if (ctx.singlestep_enabled) {
2998 gen_excp_1(EXCP_DEBUG, 0);
2999 } else {
3000 tcg_gen_exit_tb(0);
3002 break;
3003 default:
3004 abort();
3007 gen_tb_end(tb, num_insns);
3009 tb->size = ctx.pc - pc_start;
3010 tb->icount = num_insns;
3012 #ifdef DEBUG_DISAS
3013 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3014 && qemu_log_in_addr_range(pc_start)) {
3015 qemu_log_lock();
3016 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3017 log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
3018 qemu_log("\n");
3019 qemu_log_unlock();
3021 #endif
3024 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3025 target_ulong *data)
3027 env->pc = data[0];