qemu-io: Fix recent UI updates
[qemu/ar7.git] / target-alpha / translate.c
blob8c2183a418e3cead20c44e82da76663ebcdf27b3
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "tcg-op.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
34 #undef ALPHA_DEBUG_DISAS
35 #define CONFIG_SOFTFLOAT_INLINE
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DISAS(...) do { } while (0)
41 #endif
43 typedef struct DisasContext DisasContext;
44 struct DisasContext {
45 struct TranslationBlock *tb;
46 uint64_t pc;
47 #ifndef CONFIG_USER_ONLY
48 uint64_t palbr;
49 #endif
50 int mem_idx;
52 /* Current rounding mode for this TB. */
53 int tb_rm;
54 /* Current flush-to-zero setting for this TB. */
55 int tb_ftz;
57 /* implver value for this CPU. */
58 int implver;
60 /* The set of registers active in the current context. */
61 TCGv *ir;
63 /* Temporaries for $31 and $f31 as source and destination. */
64 TCGv zero;
65 TCGv sink;
66 /* Temporary for immediate constants. */
67 TCGv lit;
69 bool singlestep_enabled;
72 /* Return values from translate_one, indicating the state of the TB.
73 Note that zero indicates that we are not exiting the TB. */
75 typedef enum {
76 NO_EXIT,
78 /* We have emitted one or more goto_tb. No fixup required. */
79 EXIT_GOTO_TB,
81 /* We are not using a goto_tb (for whatever reason), but have updated
82 the PC (for whatever reason), so there's no need to do it again on
83 exiting the TB. */
84 EXIT_PC_UPDATED,
86 /* We are exiting the TB, but have neither emitted a goto_tb, nor
87 updated the PC for the next instruction to be executed. */
88 EXIT_PC_STALE,
90 /* We are ending the TB with a noreturn function call, e.g. longjmp.
91 No following code will be executed. */
92 EXIT_NORETURN,
93 } ExitStatus;
95 /* global register indexes */
96 static TCGv_env cpu_env;
97 static TCGv cpu_std_ir[31];
98 static TCGv cpu_fir[31];
99 static TCGv cpu_pc;
100 static TCGv cpu_lock_addr;
101 static TCGv cpu_lock_st_addr;
102 static TCGv cpu_lock_value;
104 #ifndef CONFIG_USER_ONLY
105 static TCGv cpu_pal_ir[31];
106 #endif
108 #include "exec/gen-icount.h"
110 void alpha_translate_init(void)
112 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
114 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
115 static const GlobalVar vars[] = {
116 DEF_VAR(pc),
117 DEF_VAR(lock_addr),
118 DEF_VAR(lock_st_addr),
119 DEF_VAR(lock_value),
122 #undef DEF_VAR
124 /* Use the symbolic register names that match the disassembler. */
125 static const char greg_names[31][4] = {
126 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
127 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
128 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
129 "t10", "t11", "ra", "t12", "at", "gp", "sp"
131 static const char freg_names[31][4] = {
132 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
133 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
134 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
135 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
137 #ifndef CONFIG_USER_ONLY
138 static const char shadow_names[8][8] = {
139 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
140 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
142 #endif
144 static bool done_init = 0;
145 int i;
147 if (done_init) {
148 return;
150 done_init = 1;
152 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
154 for (i = 0; i < 31; i++) {
155 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
156 offsetof(CPUAlphaState, ir[i]),
157 greg_names[i]);
160 for (i = 0; i < 31; i++) {
161 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
162 offsetof(CPUAlphaState, fir[i]),
163 freg_names[i]);
166 #ifndef CONFIG_USER_ONLY
167 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
168 for (i = 0; i < 8; i++) {
169 int r = (i == 7 ? 25 : i + 8);
170 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
171 offsetof(CPUAlphaState,
172 shadow[i]),
173 shadow_names[i]);
175 #endif
177 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
178 const GlobalVar *v = &vars[i];
179 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
183 static TCGv load_zero(DisasContext *ctx)
185 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
186 ctx->zero = tcg_const_i64(0);
188 return ctx->zero;
191 static TCGv dest_sink(DisasContext *ctx)
193 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
194 ctx->sink = tcg_temp_new();
196 return ctx->sink;
199 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
201 if (likely(reg < 31)) {
202 return ctx->ir[reg];
203 } else {
204 return load_zero(ctx);
208 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
209 uint8_t lit, bool islit)
211 if (islit) {
212 ctx->lit = tcg_const_i64(lit);
213 return ctx->lit;
214 } else if (likely(reg < 31)) {
215 return ctx->ir[reg];
216 } else {
217 return load_zero(ctx);
221 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
223 if (likely(reg < 31)) {
224 return ctx->ir[reg];
225 } else {
226 return dest_sink(ctx);
230 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
232 if (likely(reg < 31)) {
233 return cpu_fir[reg];
234 } else {
235 return load_zero(ctx);
239 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
241 if (likely(reg < 31)) {
242 return cpu_fir[reg];
243 } else {
244 return dest_sink(ctx);
248 static void gen_excp_1(int exception, int error_code)
250 TCGv_i32 tmp1, tmp2;
252 tmp1 = tcg_const_i32(exception);
253 tmp2 = tcg_const_i32(error_code);
254 gen_helper_excp(cpu_env, tmp1, tmp2);
255 tcg_temp_free_i32(tmp2);
256 tcg_temp_free_i32(tmp1);
259 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
261 tcg_gen_movi_i64(cpu_pc, ctx->pc);
262 gen_excp_1(exception, error_code);
263 return EXIT_NORETURN;
266 static inline ExitStatus gen_invalid(DisasContext *ctx)
268 return gen_excp(ctx, EXCP_OPCDEC, 0);
271 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
273 TCGv_i32 tmp32 = tcg_temp_new_i32();
274 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
275 gen_helper_memory_to_f(t0, tmp32);
276 tcg_temp_free_i32(tmp32);
279 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
281 TCGv tmp = tcg_temp_new();
282 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
283 gen_helper_memory_to_g(t0, tmp);
284 tcg_temp_free(tmp);
287 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
289 TCGv_i32 tmp32 = tcg_temp_new_i32();
290 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
291 gen_helper_memory_to_s(t0, tmp32);
292 tcg_temp_free_i32(tmp32);
295 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
297 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
298 tcg_gen_mov_i64(cpu_lock_addr, t1);
299 tcg_gen_mov_i64(cpu_lock_value, t0);
302 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
304 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
305 tcg_gen_mov_i64(cpu_lock_addr, t1);
306 tcg_gen_mov_i64(cpu_lock_value, t0);
309 static inline void gen_load_mem(DisasContext *ctx,
310 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
311 int flags),
312 int ra, int rb, int32_t disp16, bool fp,
313 bool clear)
315 TCGv tmp, addr, va;
317 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
318 prefetches, which we can treat as nops. No worries about
319 missed exceptions here. */
320 if (unlikely(ra == 31)) {
321 return;
324 tmp = tcg_temp_new();
325 addr = load_gpr(ctx, rb);
327 if (disp16) {
328 tcg_gen_addi_i64(tmp, addr, disp16);
329 addr = tmp;
331 if (clear) {
332 tcg_gen_andi_i64(tmp, addr, ~0x7);
333 addr = tmp;
336 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
337 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
339 tcg_temp_free(tmp);
342 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
344 TCGv_i32 tmp32 = tcg_temp_new_i32();
345 gen_helper_f_to_memory(tmp32, t0);
346 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
347 tcg_temp_free_i32(tmp32);
350 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
352 TCGv tmp = tcg_temp_new();
353 gen_helper_g_to_memory(tmp, t0);
354 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
355 tcg_temp_free(tmp);
358 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
360 TCGv_i32 tmp32 = tcg_temp_new_i32();
361 gen_helper_s_to_memory(tmp32, t0);
362 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
363 tcg_temp_free_i32(tmp32);
366 static inline void gen_store_mem(DisasContext *ctx,
367 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
368 int flags),
369 int ra, int rb, int32_t disp16, bool fp,
370 bool clear)
372 TCGv tmp, addr, va;
374 tmp = tcg_temp_new();
375 addr = load_gpr(ctx, rb);
377 if (disp16) {
378 tcg_gen_addi_i64(tmp, addr, disp16);
379 addr = tmp;
381 if (clear) {
382 tcg_gen_andi_i64(tmp, addr, ~0x7);
383 addr = tmp;
386 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
387 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
389 tcg_temp_free(tmp);
392 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
393 int32_t disp16, int quad)
395 TCGv addr;
397 if (ra == 31) {
398 /* ??? Don't bother storing anything. The user can't tell
399 the difference, since the zero register always reads zero. */
400 return NO_EXIT;
403 #if defined(CONFIG_USER_ONLY)
404 addr = cpu_lock_st_addr;
405 #else
406 addr = tcg_temp_local_new();
407 #endif
409 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
411 #if defined(CONFIG_USER_ONLY)
412 /* ??? This is handled via a complicated version of compare-and-swap
413 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
414 in TCG so that this isn't necessary. */
415 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
416 #else
417 /* ??? In system mode we are never multi-threaded, so CAS can be
418 implemented via a non-atomic load-compare-store sequence. */
420 TCGLabel *lab_fail, *lab_done;
421 TCGv val;
423 lab_fail = gen_new_label();
424 lab_done = gen_new_label();
425 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
427 val = tcg_temp_new();
428 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
429 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
431 tcg_gen_qemu_st_i64(ctx->ir[ra], addr, ctx->mem_idx,
432 quad ? MO_LEQ : MO_LEUL);
433 tcg_gen_movi_i64(ctx->ir[ra], 1);
434 tcg_gen_br(lab_done);
436 gen_set_label(lab_fail);
437 tcg_gen_movi_i64(ctx->ir[ra], 0);
439 gen_set_label(lab_done);
440 tcg_gen_movi_i64(cpu_lock_addr, -1);
442 tcg_temp_free(addr);
443 return NO_EXIT;
445 #endif
448 static bool in_superpage(DisasContext *ctx, int64_t addr)
450 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
451 && addr < 0
452 && ((addr >> 41) & 3) == 2
453 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
456 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
458 /* Suppress goto_tb in the case of single-steping and IO. */
459 if ((ctx->tb->cflags & CF_LAST_IO)
460 || ctx->singlestep_enabled || singlestep) {
461 return false;
463 #ifndef CONFIG_USER_ONLY
464 /* If the destination is in the superpage, the page perms can't change. */
465 if (in_superpage(ctx, dest)) {
466 return true;
468 /* Check for the dest on the same page as the start of the TB. */
469 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
470 #else
471 return true;
472 #endif
475 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
477 uint64_t dest = ctx->pc + (disp << 2);
479 if (ra != 31) {
480 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
483 /* Notice branch-to-next; used to initialize RA with the PC. */
484 if (disp == 0) {
485 return 0;
486 } else if (use_goto_tb(ctx, dest)) {
487 tcg_gen_goto_tb(0);
488 tcg_gen_movi_i64(cpu_pc, dest);
489 tcg_gen_exit_tb((uintptr_t)ctx->tb);
490 return EXIT_GOTO_TB;
491 } else {
492 tcg_gen_movi_i64(cpu_pc, dest);
493 return EXIT_PC_UPDATED;
497 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
498 TCGv cmp, int32_t disp)
500 uint64_t dest = ctx->pc + (disp << 2);
501 TCGLabel *lab_true = gen_new_label();
503 if (use_goto_tb(ctx, dest)) {
504 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
506 tcg_gen_goto_tb(0);
507 tcg_gen_movi_i64(cpu_pc, ctx->pc);
508 tcg_gen_exit_tb((uintptr_t)ctx->tb);
510 gen_set_label(lab_true);
511 tcg_gen_goto_tb(1);
512 tcg_gen_movi_i64(cpu_pc, dest);
513 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
515 return EXIT_GOTO_TB;
516 } else {
517 TCGv_i64 z = tcg_const_i64(0);
518 TCGv_i64 d = tcg_const_i64(dest);
519 TCGv_i64 p = tcg_const_i64(ctx->pc);
521 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
523 tcg_temp_free_i64(z);
524 tcg_temp_free_i64(d);
525 tcg_temp_free_i64(p);
526 return EXIT_PC_UPDATED;
530 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
531 int32_t disp, int mask)
533 TCGv cmp_tmp;
535 if (mask) {
536 cmp_tmp = tcg_temp_new();
537 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
538 } else {
539 cmp_tmp = load_gpr(ctx, ra);
542 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
545 /* Fold -0.0 for comparison with COND. */
547 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
549 uint64_t mzero = 1ull << 63;
551 switch (cond) {
552 case TCG_COND_LE:
553 case TCG_COND_GT:
554 /* For <= or >, the -0.0 value directly compares the way we want. */
555 tcg_gen_mov_i64(dest, src);
556 break;
558 case TCG_COND_EQ:
559 case TCG_COND_NE:
560 /* For == or !=, we can simply mask off the sign bit and compare. */
561 tcg_gen_andi_i64(dest, src, mzero - 1);
562 break;
564 case TCG_COND_GE:
565 case TCG_COND_LT:
566 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
567 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
568 tcg_gen_neg_i64(dest, dest);
569 tcg_gen_and_i64(dest, dest, src);
570 break;
572 default:
573 abort();
577 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
578 int32_t disp)
580 TCGv cmp_tmp = tcg_temp_new();
581 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
582 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
585 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
587 TCGv_i64 va, vb, z;
589 z = load_zero(ctx);
590 vb = load_fpr(ctx, rb);
591 va = tcg_temp_new();
592 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
594 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
596 tcg_temp_free(va);
599 #define QUAL_RM_N 0x080 /* Round mode nearest even */
600 #define QUAL_RM_C 0x000 /* Round mode chopped */
601 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
602 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
603 #define QUAL_RM_MASK 0x0c0
605 #define QUAL_U 0x100 /* Underflow enable (fp output) */
606 #define QUAL_V 0x100 /* Overflow enable (int output) */
607 #define QUAL_S 0x400 /* Software completion enable */
608 #define QUAL_I 0x200 /* Inexact detection enable */
610 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
612 TCGv_i32 tmp;
614 fn11 &= QUAL_RM_MASK;
615 if (fn11 == ctx->tb_rm) {
616 return;
618 ctx->tb_rm = fn11;
620 tmp = tcg_temp_new_i32();
621 switch (fn11) {
622 case QUAL_RM_N:
623 tcg_gen_movi_i32(tmp, float_round_nearest_even);
624 break;
625 case QUAL_RM_C:
626 tcg_gen_movi_i32(tmp, float_round_to_zero);
627 break;
628 case QUAL_RM_M:
629 tcg_gen_movi_i32(tmp, float_round_down);
630 break;
631 case QUAL_RM_D:
632 tcg_gen_ld8u_i32(tmp, cpu_env,
633 offsetof(CPUAlphaState, fpcr_dyn_round));
634 break;
637 #if defined(CONFIG_SOFTFLOAT_INLINE)
638 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
639 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
640 sets the one field. */
641 tcg_gen_st8_i32(tmp, cpu_env,
642 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
643 #else
644 gen_helper_setroundmode(tmp);
645 #endif
647 tcg_temp_free_i32(tmp);
650 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
652 TCGv_i32 tmp;
654 fn11 &= QUAL_U;
655 if (fn11 == ctx->tb_ftz) {
656 return;
658 ctx->tb_ftz = fn11;
660 tmp = tcg_temp_new_i32();
661 if (fn11) {
662 /* Underflow is enabled, use the FPCR setting. */
663 tcg_gen_ld8u_i32(tmp, cpu_env,
664 offsetof(CPUAlphaState, fpcr_flush_to_zero));
665 } else {
666 /* Underflow is disabled, force flush-to-zero. */
667 tcg_gen_movi_i32(tmp, 1);
670 #if defined(CONFIG_SOFTFLOAT_INLINE)
671 tcg_gen_st8_i32(tmp, cpu_env,
672 offsetof(CPUAlphaState, fp_status.flush_to_zero));
673 #else
674 gen_helper_setflushzero(tmp);
675 #endif
677 tcg_temp_free_i32(tmp);
680 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
682 TCGv val;
684 if (unlikely(reg == 31)) {
685 val = load_zero(ctx);
686 } else {
687 val = cpu_fir[reg];
688 if ((fn11 & QUAL_S) == 0) {
689 if (is_cmp) {
690 gen_helper_ieee_input_cmp(cpu_env, val);
691 } else {
692 gen_helper_ieee_input(cpu_env, val);
694 } else {
695 #ifndef CONFIG_USER_ONLY
696 /* In system mode, raise exceptions for denormals like real
697 hardware. In user mode, proceed as if the OS completion
698 handler is handling the denormal as per spec. */
699 gen_helper_ieee_input_s(cpu_env, val);
700 #endif
703 return val;
706 static void gen_fp_exc_raise(int rc, int fn11)
708 /* ??? We ought to be able to do something with imprecise exceptions.
709 E.g. notice we're still in the trap shadow of something within the
710 TB and do not generate the code to signal the exception; end the TB
711 when an exception is forced to arrive, either by consumption of a
712 register value or TRAPB or EXCB. */
713 TCGv_i32 reg, ign;
714 uint32_t ignore = 0;
716 if (!(fn11 & QUAL_U)) {
717 /* Note that QUAL_U == QUAL_V, so ignore either. */
718 ignore |= FPCR_UNF | FPCR_IOV;
720 if (!(fn11 & QUAL_I)) {
721 ignore |= FPCR_INE;
723 ign = tcg_const_i32(ignore);
725 /* ??? Pass in the regno of the destination so that the helper can
726 set EXC_MASK, which contains a bitmask of destination registers
727 that have caused arithmetic traps. A simple userspace emulation
728 does not require this. We do need it for a guest kernel's entArith,
729 or if we were to do something clever with imprecise exceptions. */
730 reg = tcg_const_i32(rc + 32);
731 if (fn11 & QUAL_S) {
732 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
733 } else {
734 gen_helper_fp_exc_raise(cpu_env, ign, reg);
737 tcg_temp_free_i32(reg);
738 tcg_temp_free_i32(ign);
741 static void gen_cvtlq(TCGv vc, TCGv vb)
743 TCGv tmp = tcg_temp_new();
745 /* The arithmetic right shift here, plus the sign-extended mask below
746 yields a sign-extended result without an explicit ext32s_i64. */
747 tcg_gen_sari_i64(tmp, vb, 32);
748 tcg_gen_shri_i64(vc, vb, 29);
749 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
750 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
751 tcg_gen_or_i64(vc, vc, tmp);
753 tcg_temp_free(tmp);
756 static void gen_ieee_arith2(DisasContext *ctx,
757 void (*helper)(TCGv, TCGv_ptr, TCGv),
758 int rb, int rc, int fn11)
760 TCGv vb;
762 gen_qual_roundmode(ctx, fn11);
763 gen_qual_flushzero(ctx, fn11);
765 vb = gen_ieee_input(ctx, rb, fn11, 0);
766 helper(dest_fpr(ctx, rc), cpu_env, vb);
768 gen_fp_exc_raise(rc, fn11);
771 #define IEEE_ARITH2(name) \
772 static inline void glue(gen_, name)(DisasContext *ctx, \
773 int rb, int rc, int fn11) \
775 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
777 IEEE_ARITH2(sqrts)
778 IEEE_ARITH2(sqrtt)
779 IEEE_ARITH2(cvtst)
780 IEEE_ARITH2(cvtts)
782 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
784 TCGv vb, vc;
786 /* No need to set flushzero, since we have an integer output. */
787 vb = gen_ieee_input(ctx, rb, fn11, 0);
788 vc = dest_fpr(ctx, rc);
790 /* Almost all integer conversions use cropped rounding;
791 special case that. */
792 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
793 gen_helper_cvttq_c(vc, cpu_env, vb);
794 } else {
795 gen_qual_roundmode(ctx, fn11);
796 gen_helper_cvttq(vc, cpu_env, vb);
798 gen_fp_exc_raise(rc, fn11);
801 static void gen_ieee_intcvt(DisasContext *ctx,
802 void (*helper)(TCGv, TCGv_ptr, TCGv),
803 int rb, int rc, int fn11)
805 TCGv vb, vc;
807 gen_qual_roundmode(ctx, fn11);
808 vb = load_fpr(ctx, rb);
809 vc = dest_fpr(ctx, rc);
811 /* The only exception that can be raised by integer conversion
812 is inexact. Thus we only need to worry about exceptions when
813 inexact handling is requested. */
814 if (fn11 & QUAL_I) {
815 helper(vc, cpu_env, vb);
816 gen_fp_exc_raise(rc, fn11);
817 } else {
818 helper(vc, cpu_env, vb);
822 #define IEEE_INTCVT(name) \
823 static inline void glue(gen_, name)(DisasContext *ctx, \
824 int rb, int rc, int fn11) \
826 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
828 IEEE_INTCVT(cvtqs)
829 IEEE_INTCVT(cvtqt)
831 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
833 TCGv vmask = tcg_const_i64(mask);
834 TCGv tmp = tcg_temp_new_i64();
836 if (inv_a) {
837 tcg_gen_andc_i64(tmp, vmask, va);
838 } else {
839 tcg_gen_and_i64(tmp, va, vmask);
842 tcg_gen_andc_i64(vc, vb, vmask);
843 tcg_gen_or_i64(vc, vc, tmp);
845 tcg_temp_free(vmask);
846 tcg_temp_free(tmp);
849 static void gen_ieee_arith3(DisasContext *ctx,
850 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
851 int ra, int rb, int rc, int fn11)
853 TCGv va, vb, vc;
855 gen_qual_roundmode(ctx, fn11);
856 gen_qual_flushzero(ctx, fn11);
858 va = gen_ieee_input(ctx, ra, fn11, 0);
859 vb = gen_ieee_input(ctx, rb, fn11, 0);
860 vc = dest_fpr(ctx, rc);
861 helper(vc, cpu_env, va, vb);
863 gen_fp_exc_raise(rc, fn11);
866 #define IEEE_ARITH3(name) \
867 static inline void glue(gen_, name)(DisasContext *ctx, \
868 int ra, int rb, int rc, int fn11) \
870 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
872 IEEE_ARITH3(adds)
873 IEEE_ARITH3(subs)
874 IEEE_ARITH3(muls)
875 IEEE_ARITH3(divs)
876 IEEE_ARITH3(addt)
877 IEEE_ARITH3(subt)
878 IEEE_ARITH3(mult)
879 IEEE_ARITH3(divt)
881 static void gen_ieee_compare(DisasContext *ctx,
882 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
883 int ra, int rb, int rc, int fn11)
885 TCGv va, vb, vc;
887 va = gen_ieee_input(ctx, ra, fn11, 1);
888 vb = gen_ieee_input(ctx, rb, fn11, 1);
889 vc = dest_fpr(ctx, rc);
890 helper(vc, cpu_env, va, vb);
892 gen_fp_exc_raise(rc, fn11);
895 #define IEEE_CMP3(name) \
896 static inline void glue(gen_, name)(DisasContext *ctx, \
897 int ra, int rb, int rc, int fn11) \
899 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
901 IEEE_CMP3(cmptun)
902 IEEE_CMP3(cmpteq)
903 IEEE_CMP3(cmptlt)
904 IEEE_CMP3(cmptle)
906 static inline uint64_t zapnot_mask(uint8_t lit)
908 uint64_t mask = 0;
909 int i;
911 for (i = 0; i < 8; ++i) {
912 if ((lit >> i) & 1) {
913 mask |= 0xffull << (i * 8);
916 return mask;
919 /* Implement zapnot with an immediate operand, which expands to some
920 form of immediate AND. This is a basic building block in the
921 definition of many of the other byte manipulation instructions. */
922 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
924 switch (lit) {
925 case 0x00:
926 tcg_gen_movi_i64(dest, 0);
927 break;
928 case 0x01:
929 tcg_gen_ext8u_i64(dest, src);
930 break;
931 case 0x03:
932 tcg_gen_ext16u_i64(dest, src);
933 break;
934 case 0x0f:
935 tcg_gen_ext32u_i64(dest, src);
936 break;
937 case 0xff:
938 tcg_gen_mov_i64(dest, src);
939 break;
940 default:
941 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
942 break;
946 /* EXTWH, EXTLH, EXTQH */
947 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
948 uint8_t lit, uint8_t byte_mask)
950 if (islit) {
951 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
952 } else {
953 TCGv tmp = tcg_temp_new();
954 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
955 tcg_gen_neg_i64(tmp, tmp);
956 tcg_gen_andi_i64(tmp, tmp, 0x3f);
957 tcg_gen_shl_i64(vc, va, tmp);
958 tcg_temp_free(tmp);
960 gen_zapnoti(vc, vc, byte_mask);
963 /* EXTBL, EXTWL, EXTLL, EXTQL */
964 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
965 uint8_t lit, uint8_t byte_mask)
967 if (islit) {
968 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
969 } else {
970 TCGv tmp = tcg_temp_new();
971 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
972 tcg_gen_shli_i64(tmp, tmp, 3);
973 tcg_gen_shr_i64(vc, va, tmp);
974 tcg_temp_free(tmp);
976 gen_zapnoti(vc, vc, byte_mask);
979 /* INSWH, INSLH, INSQH */
980 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
981 uint8_t lit, uint8_t byte_mask)
983 TCGv tmp = tcg_temp_new();
985 /* The instruction description has us left-shift the byte mask and extract
986 bits <15:8> and apply that zap at the end. This is equivalent to simply
987 performing the zap first and shifting afterward. */
988 gen_zapnoti(tmp, va, byte_mask);
990 if (islit) {
991 lit &= 7;
992 if (unlikely(lit == 0)) {
993 tcg_gen_movi_i64(vc, 0);
994 } else {
995 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
997 } else {
998 TCGv shift = tcg_temp_new();
1000 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1001 portably by splitting the shift into two parts: shift_count-1 and 1.
1002 Arrange for the -1 by using ones-complement instead of
1003 twos-complement in the negation: ~(B * 8) & 63. */
1005 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1006 tcg_gen_not_i64(shift, shift);
1007 tcg_gen_andi_i64(shift, shift, 0x3f);
1009 tcg_gen_shr_i64(vc, tmp, shift);
1010 tcg_gen_shri_i64(vc, vc, 1);
1011 tcg_temp_free(shift);
1013 tcg_temp_free(tmp);
1016 /* INSBL, INSWL, INSLL, INSQL */
1017 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1018 uint8_t lit, uint8_t byte_mask)
1020 TCGv tmp = tcg_temp_new();
1022 /* The instruction description has us left-shift the byte mask
1023 the same number of byte slots as the data and apply the zap
1024 at the end. This is equivalent to simply performing the zap
1025 first and shifting afterward. */
1026 gen_zapnoti(tmp, va, byte_mask);
1028 if (islit) {
1029 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1030 } else {
1031 TCGv shift = tcg_temp_new();
1032 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1033 tcg_gen_shli_i64(shift, shift, 3);
1034 tcg_gen_shl_i64(vc, tmp, shift);
1035 tcg_temp_free(shift);
1037 tcg_temp_free(tmp);
1040 /* MSKWH, MSKLH, MSKQH */
1041 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1042 uint8_t lit, uint8_t byte_mask)
1044 if (islit) {
1045 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1046 } else {
1047 TCGv shift = tcg_temp_new();
1048 TCGv mask = tcg_temp_new();
1050 /* The instruction description is as above, where the byte_mask
1051 is shifted left, and then we extract bits <15:8>. This can be
1052 emulated with a right-shift on the expanded byte mask. This
1053 requires extra care because for an input <2:0> == 0 we need a
1054 shift of 64 bits in order to generate a zero. This is done by
1055 splitting the shift into two parts, the variable shift - 1
1056 followed by a constant 1 shift. The code we expand below is
1057 equivalent to ~(B * 8) & 63. */
1059 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1060 tcg_gen_not_i64(shift, shift);
1061 tcg_gen_andi_i64(shift, shift, 0x3f);
1062 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1063 tcg_gen_shr_i64(mask, mask, shift);
1064 tcg_gen_shri_i64(mask, mask, 1);
1066 tcg_gen_andc_i64(vc, va, mask);
1068 tcg_temp_free(mask);
1069 tcg_temp_free(shift);
1073 /* MSKBL, MSKWL, MSKLL, MSKQL */
1074 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1075 uint8_t lit, uint8_t byte_mask)
1077 if (islit) {
1078 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1079 } else {
1080 TCGv shift = tcg_temp_new();
1081 TCGv mask = tcg_temp_new();
1083 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1084 tcg_gen_shli_i64(shift, shift, 3);
1085 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1086 tcg_gen_shl_i64(mask, mask, shift);
1088 tcg_gen_andc_i64(vc, va, mask);
1090 tcg_temp_free(mask);
1091 tcg_temp_free(shift);
1095 static void gen_rx(DisasContext *ctx, int ra, int set)
1097 TCGv_i32 tmp;
1099 if (ra != 31) {
1100 tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
1101 offsetof(CPUAlphaState, intr_flag));
1104 tmp = tcg_const_i32(set);
1105 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1106 tcg_temp_free_i32(tmp);
1109 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1111 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1112 to internal cpu registers. */
1114 /* Unprivileged PAL call */
1115 if (palcode >= 0x80 && palcode < 0xC0) {
1116 switch (palcode) {
1117 case 0x86:
1118 /* IMB */
1119 /* No-op inside QEMU. */
1120 break;
1121 case 0x9E:
1122 /* RDUNIQUE */
1123 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1124 offsetof(CPUAlphaState, unique));
1125 break;
1126 case 0x9F:
1127 /* WRUNIQUE */
1128 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1129 offsetof(CPUAlphaState, unique));
1130 break;
1131 default:
1132 palcode &= 0xbf;
1133 goto do_call_pal;
1135 return NO_EXIT;
1138 #ifndef CONFIG_USER_ONLY
1139 /* Privileged PAL code */
1140 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1141 switch (palcode) {
1142 case 0x01:
1143 /* CFLUSH */
1144 /* No-op inside QEMU. */
1145 break;
1146 case 0x02:
1147 /* DRAINA */
1148 /* No-op inside QEMU. */
1149 break;
1150 case 0x2D:
1151 /* WRVPTPTR */
1152 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1153 offsetof(CPUAlphaState, vptptr));
1154 break;
1155 case 0x31:
1156 /* WRVAL */
1157 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1158 offsetof(CPUAlphaState, sysval));
1159 break;
1160 case 0x32:
1161 /* RDVAL */
1162 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1163 offsetof(CPUAlphaState, sysval));
1164 break;
1166 case 0x35: {
1167 /* SWPIPL */
1168 TCGv tmp;
1170 /* Note that we already know we're in kernel mode, so we know
1171 that PS only contains the 3 IPL bits. */
1172 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1173 offsetof(CPUAlphaState, ps));
1175 /* But make sure and store only the 3 IPL bits from the user. */
1176 tmp = tcg_temp_new();
1177 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1178 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1179 tcg_temp_free(tmp);
1180 break;
1183 case 0x36:
1184 /* RDPS */
1185 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1186 offsetof(CPUAlphaState, ps));
1187 break;
1188 case 0x38:
1189 /* WRUSP */
1190 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1191 offsetof(CPUAlphaState, usp));
1192 break;
1193 case 0x3A:
1194 /* RDUSP */
1195 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1196 offsetof(CPUAlphaState, usp));
1197 break;
1198 case 0x3C:
1199 /* WHAMI */
1200 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1201 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1202 break;
1204 default:
1205 palcode &= 0x3f;
1206 goto do_call_pal;
1208 return NO_EXIT;
1210 #endif
1211 return gen_invalid(ctx);
1213 do_call_pal:
1214 #ifdef CONFIG_USER_ONLY
1215 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1216 #else
1218 TCGv tmp = tcg_temp_new();
1219 uint64_t exc_addr = ctx->pc;
1220 uint64_t entry = ctx->palbr;
1222 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
1223 exc_addr |= 1;
1224 } else {
1225 tcg_gen_movi_i64(tmp, 1);
1226 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
1229 tcg_gen_movi_i64(tmp, exc_addr);
1230 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1231 tcg_temp_free(tmp);
1233 entry += (palcode & 0x80
1234 ? 0x2000 + (palcode - 0x80) * 64
1235 : 0x1000 + palcode * 64);
1237 /* Since the destination is running in PALmode, we don't really
1238 need the page permissions check. We'll see the existence of
1239 the page when we create the TB, and we'll flush all TBs if
1240 we change the PAL base register. */
1241 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1242 tcg_gen_goto_tb(0);
1243 tcg_gen_movi_i64(cpu_pc, entry);
1244 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1245 return EXIT_GOTO_TB;
1246 } else {
1247 tcg_gen_movi_i64(cpu_pc, entry);
1248 return EXIT_PC_UPDATED;
1251 #endif
1254 #ifndef CONFIG_USER_ONLY
1256 #define PR_BYTE 0x100000
1257 #define PR_LONG 0x200000
1259 static int cpu_pr_data(int pr)
1261 switch (pr) {
1262 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1263 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1264 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1265 case 3: return offsetof(CPUAlphaState, trap_arg0);
1266 case 4: return offsetof(CPUAlphaState, trap_arg1);
1267 case 5: return offsetof(CPUAlphaState, trap_arg2);
1268 case 6: return offsetof(CPUAlphaState, exc_addr);
1269 case 7: return offsetof(CPUAlphaState, palbr);
1270 case 8: return offsetof(CPUAlphaState, ptbr);
1271 case 9: return offsetof(CPUAlphaState, vptptr);
1272 case 10: return offsetof(CPUAlphaState, unique);
1273 case 11: return offsetof(CPUAlphaState, sysval);
1274 case 12: return offsetof(CPUAlphaState, usp);
1276 case 40 ... 63:
1277 return offsetof(CPUAlphaState, scratch[pr - 40]);
1279 case 251:
1280 return offsetof(CPUAlphaState, alarm_expire);
1282 return 0;
1285 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1287 void (*helper)(TCGv);
1288 int data;
1290 switch (regno) {
1291 case 32 ... 39:
1292 /* Accessing the "non-shadow" general registers. */
1293 regno = regno == 39 ? 25 : regno - 32 + 8;
1294 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1295 break;
1297 case 250: /* WALLTIME */
1298 helper = gen_helper_get_walltime;
1299 goto do_helper;
1300 case 249: /* VMTIME */
1301 helper = gen_helper_get_vmtime;
1302 do_helper:
1303 if (use_icount) {
1304 gen_io_start();
1305 helper(va);
1306 gen_io_end();
1307 return EXIT_PC_STALE;
1308 } else {
1309 helper(va);
1311 break;
1313 default:
1314 /* The basic registers are data only, and unknown registers
1315 are read-zero, write-ignore. */
1316 data = cpu_pr_data(regno);
1317 if (data == 0) {
1318 tcg_gen_movi_i64(va, 0);
1319 } else if (data & PR_BYTE) {
1320 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1321 } else if (data & PR_LONG) {
1322 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1323 } else {
1324 tcg_gen_ld_i64(va, cpu_env, data);
1326 break;
1329 return NO_EXIT;
1332 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1334 TCGv tmp;
1335 int data;
1337 switch (regno) {
1338 case 255:
1339 /* TBIA */
1340 gen_helper_tbia(cpu_env);
1341 break;
1343 case 254:
1344 /* TBIS */
1345 gen_helper_tbis(cpu_env, vb);
1346 break;
1348 case 253:
1349 /* WAIT */
1350 tmp = tcg_const_i64(1);
1351 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1352 offsetof(CPUState, halted));
1353 return gen_excp(ctx, EXCP_HLT, 0);
1355 case 252:
1356 /* HALT */
1357 gen_helper_halt(vb);
1358 return EXIT_PC_STALE;
1360 case 251:
1361 /* ALARM */
1362 gen_helper_set_alarm(cpu_env, vb);
1363 break;
1365 case 7:
1366 /* PALBR */
1367 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1368 /* Changing the PAL base register implies un-chaining all of the TBs
1369 that ended with a CALL_PAL. Since the base register usually only
1370 changes during boot, flushing everything works well. */
1371 gen_helper_tb_flush(cpu_env);
1372 return EXIT_PC_STALE;
1374 case 32 ... 39:
1375 /* Accessing the "non-shadow" general registers. */
1376 regno = regno == 39 ? 25 : regno - 32 + 8;
1377 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1378 break;
1380 default:
1381 /* The basic registers are data only, and unknown registers
1382 are read-zero, write-ignore. */
1383 data = cpu_pr_data(regno);
1384 if (data != 0) {
1385 if (data & PR_BYTE) {
1386 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1387 } else if (data & PR_LONG) {
1388 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1389 } else {
1390 tcg_gen_st_i64(vb, cpu_env, data);
1393 break;
1396 return NO_EXIT;
1398 #endif /* !USER_ONLY*/
1400 #define REQUIRE_NO_LIT \
1401 do { \
1402 if (real_islit) { \
1403 goto invalid_opc; \
1405 } while (0)
1407 #define REQUIRE_TB_FLAG(FLAG) \
1408 do { \
1409 if ((ctx->tb->flags & (FLAG)) == 0) { \
1410 goto invalid_opc; \
1412 } while (0)
1414 #define REQUIRE_REG_31(WHICH) \
1415 do { \
1416 if (WHICH != 31) { \
1417 goto invalid_opc; \
1419 } while (0)
1421 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1423 int32_t disp21, disp16, disp12 __attribute__((unused));
1424 uint16_t fn11;
1425 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1426 bool islit, real_islit;
1427 TCGv va, vb, vc, tmp, tmp2;
1428 TCGv_i32 t32;
1429 ExitStatus ret;
1431 /* Decode all instruction fields */
1432 opc = extract32(insn, 26, 6);
1433 ra = extract32(insn, 21, 5);
1434 rb = extract32(insn, 16, 5);
1435 rc = extract32(insn, 0, 5);
1436 real_islit = islit = extract32(insn, 12, 1);
1437 lit = extract32(insn, 13, 8);
1439 disp21 = sextract32(insn, 0, 21);
1440 disp16 = sextract32(insn, 0, 16);
1441 disp12 = sextract32(insn, 0, 12);
1443 fn11 = extract32(insn, 5, 11);
1444 fpfn = extract32(insn, 5, 6);
1445 fn7 = extract32(insn, 5, 7);
1447 if (rb == 31 && !islit) {
1448 islit = true;
1449 lit = 0;
1452 ret = NO_EXIT;
1453 switch (opc) {
1454 case 0x00:
1455 /* CALL_PAL */
1456 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1457 break;
1458 case 0x01:
1459 /* OPC01 */
1460 goto invalid_opc;
1461 case 0x02:
1462 /* OPC02 */
1463 goto invalid_opc;
1464 case 0x03:
1465 /* OPC03 */
1466 goto invalid_opc;
1467 case 0x04:
1468 /* OPC04 */
1469 goto invalid_opc;
1470 case 0x05:
1471 /* OPC05 */
1472 goto invalid_opc;
1473 case 0x06:
1474 /* OPC06 */
1475 goto invalid_opc;
1476 case 0x07:
1477 /* OPC07 */
1478 goto invalid_opc;
1480 case 0x09:
1481 /* LDAH */
1482 disp16 = (uint32_t)disp16 << 16;
1483 /* fall through */
1484 case 0x08:
1485 /* LDA */
1486 va = dest_gpr(ctx, ra);
1487 /* It's worth special-casing immediate loads. */
1488 if (rb == 31) {
1489 tcg_gen_movi_i64(va, disp16);
1490 } else {
1491 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1493 break;
1495 case 0x0A:
1496 /* LDBU */
1497 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1498 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1499 break;
1500 case 0x0B:
1501 /* LDQ_U */
1502 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1503 break;
1504 case 0x0C:
1505 /* LDWU */
1506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1507 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1508 break;
1509 case 0x0D:
1510 /* STW */
1511 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1512 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1513 break;
1514 case 0x0E:
1515 /* STB */
1516 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1517 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1518 break;
1519 case 0x0F:
1520 /* STQ_U */
1521 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1522 break;
1524 case 0x10:
1525 vc = dest_gpr(ctx, rc);
1526 vb = load_gpr_lit(ctx, rb, lit, islit);
1528 if (ra == 31) {
1529 if (fn7 == 0x00) {
1530 /* Special case ADDL as SEXTL. */
1531 tcg_gen_ext32s_i64(vc, vb);
1532 break;
1534 if (fn7 == 0x29) {
1535 /* Special case SUBQ as NEGQ. */
1536 tcg_gen_neg_i64(vc, vb);
1537 break;
1541 va = load_gpr(ctx, ra);
1542 switch (fn7) {
1543 case 0x00:
1544 /* ADDL */
1545 tcg_gen_add_i64(vc, va, vb);
1546 tcg_gen_ext32s_i64(vc, vc);
1547 break;
1548 case 0x02:
1549 /* S4ADDL */
1550 tmp = tcg_temp_new();
1551 tcg_gen_shli_i64(tmp, va, 2);
1552 tcg_gen_add_i64(tmp, tmp, vb);
1553 tcg_gen_ext32s_i64(vc, tmp);
1554 tcg_temp_free(tmp);
1555 break;
1556 case 0x09:
1557 /* SUBL */
1558 tcg_gen_sub_i64(vc, va, vb);
1559 tcg_gen_ext32s_i64(vc, vc);
1560 break;
1561 case 0x0B:
1562 /* S4SUBL */
1563 tmp = tcg_temp_new();
1564 tcg_gen_shli_i64(tmp, va, 2);
1565 tcg_gen_sub_i64(tmp, tmp, vb);
1566 tcg_gen_ext32s_i64(vc, tmp);
1567 tcg_temp_free(tmp);
1568 break;
1569 case 0x0F:
1570 /* CMPBGE */
1571 if (ra == 31) {
1572 /* Special case 0 >= X as X == 0. */
1573 gen_helper_cmpbe0(vc, vb);
1574 } else {
1575 gen_helper_cmpbge(vc, va, vb);
1577 break;
1578 case 0x12:
1579 /* S8ADDL */
1580 tmp = tcg_temp_new();
1581 tcg_gen_shli_i64(tmp, va, 3);
1582 tcg_gen_add_i64(tmp, tmp, vb);
1583 tcg_gen_ext32s_i64(vc, tmp);
1584 tcg_temp_free(tmp);
1585 break;
1586 case 0x1B:
1587 /* S8SUBL */
1588 tmp = tcg_temp_new();
1589 tcg_gen_shli_i64(tmp, va, 3);
1590 tcg_gen_sub_i64(tmp, tmp, vb);
1591 tcg_gen_ext32s_i64(vc, tmp);
1592 tcg_temp_free(tmp);
1593 break;
1594 case 0x1D:
1595 /* CMPULT */
1596 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1597 break;
1598 case 0x20:
1599 /* ADDQ */
1600 tcg_gen_add_i64(vc, va, vb);
1601 break;
1602 case 0x22:
1603 /* S4ADDQ */
1604 tmp = tcg_temp_new();
1605 tcg_gen_shli_i64(tmp, va, 2);
1606 tcg_gen_add_i64(vc, tmp, vb);
1607 tcg_temp_free(tmp);
1608 break;
1609 case 0x29:
1610 /* SUBQ */
1611 tcg_gen_sub_i64(vc, va, vb);
1612 break;
1613 case 0x2B:
1614 /* S4SUBQ */
1615 tmp = tcg_temp_new();
1616 tcg_gen_shli_i64(tmp, va, 2);
1617 tcg_gen_sub_i64(vc, tmp, vb);
1618 tcg_temp_free(tmp);
1619 break;
1620 case 0x2D:
1621 /* CMPEQ */
1622 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1623 break;
1624 case 0x32:
1625 /* S8ADDQ */
1626 tmp = tcg_temp_new();
1627 tcg_gen_shli_i64(tmp, va, 3);
1628 tcg_gen_add_i64(vc, tmp, vb);
1629 tcg_temp_free(tmp);
1630 break;
1631 case 0x3B:
1632 /* S8SUBQ */
1633 tmp = tcg_temp_new();
1634 tcg_gen_shli_i64(tmp, va, 3);
1635 tcg_gen_sub_i64(vc, tmp, vb);
1636 tcg_temp_free(tmp);
1637 break;
1638 case 0x3D:
1639 /* CMPULE */
1640 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1641 break;
1642 case 0x40:
1643 /* ADDL/V */
1644 tmp = tcg_temp_new();
1645 tcg_gen_ext32s_i64(tmp, va);
1646 tcg_gen_ext32s_i64(vc, vb);
1647 tcg_gen_add_i64(tmp, tmp, vc);
1648 tcg_gen_ext32s_i64(vc, tmp);
1649 gen_helper_check_overflow(cpu_env, vc, tmp);
1650 tcg_temp_free(tmp);
1651 break;
1652 case 0x49:
1653 /* SUBL/V */
1654 tmp = tcg_temp_new();
1655 tcg_gen_ext32s_i64(tmp, va);
1656 tcg_gen_ext32s_i64(vc, vb);
1657 tcg_gen_sub_i64(tmp, tmp, vc);
1658 tcg_gen_ext32s_i64(vc, tmp);
1659 gen_helper_check_overflow(cpu_env, vc, tmp);
1660 tcg_temp_free(tmp);
1661 break;
1662 case 0x4D:
1663 /* CMPLT */
1664 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1665 break;
1666 case 0x60:
1667 /* ADDQ/V */
1668 tmp = tcg_temp_new();
1669 tmp2 = tcg_temp_new();
1670 tcg_gen_eqv_i64(tmp, va, vb);
1671 tcg_gen_mov_i64(tmp2, va);
1672 tcg_gen_add_i64(vc, va, vb);
1673 tcg_gen_xor_i64(tmp2, tmp2, vc);
1674 tcg_gen_and_i64(tmp, tmp, tmp2);
1675 tcg_gen_shri_i64(tmp, tmp, 63);
1676 tcg_gen_movi_i64(tmp2, 0);
1677 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1678 tcg_temp_free(tmp);
1679 tcg_temp_free(tmp2);
1680 break;
1681 case 0x69:
1682 /* SUBQ/V */
1683 tmp = tcg_temp_new();
1684 tmp2 = tcg_temp_new();
1685 tcg_gen_xor_i64(tmp, va, vb);
1686 tcg_gen_mov_i64(tmp2, va);
1687 tcg_gen_sub_i64(vc, va, vb);
1688 tcg_gen_xor_i64(tmp2, tmp2, vc);
1689 tcg_gen_and_i64(tmp, tmp, tmp2);
1690 tcg_gen_shri_i64(tmp, tmp, 63);
1691 tcg_gen_movi_i64(tmp2, 0);
1692 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1693 tcg_temp_free(tmp);
1694 tcg_temp_free(tmp2);
1695 break;
1696 case 0x6D:
1697 /* CMPLE */
1698 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1699 break;
1700 default:
1701 goto invalid_opc;
1703 break;
1705 case 0x11:
1706 if (fn7 == 0x20) {
1707 if (rc == 31) {
1708 /* Special case BIS as NOP. */
1709 break;
1711 if (ra == 31) {
1712 /* Special case BIS as MOV. */
1713 vc = dest_gpr(ctx, rc);
1714 if (islit) {
1715 tcg_gen_movi_i64(vc, lit);
1716 } else {
1717 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1719 break;
1723 vc = dest_gpr(ctx, rc);
1724 vb = load_gpr_lit(ctx, rb, lit, islit);
1726 if (fn7 == 0x28 && ra == 31) {
1727 /* Special case ORNOT as NOT. */
1728 tcg_gen_not_i64(vc, vb);
1729 break;
1732 va = load_gpr(ctx, ra);
1733 switch (fn7) {
1734 case 0x00:
1735 /* AND */
1736 tcg_gen_and_i64(vc, va, vb);
1737 break;
1738 case 0x08:
1739 /* BIC */
1740 tcg_gen_andc_i64(vc, va, vb);
1741 break;
1742 case 0x14:
1743 /* CMOVLBS */
1744 tmp = tcg_temp_new();
1745 tcg_gen_andi_i64(tmp, va, 1);
1746 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1747 vb, load_gpr(ctx, rc));
1748 tcg_temp_free(tmp);
1749 break;
1750 case 0x16:
1751 /* CMOVLBC */
1752 tmp = tcg_temp_new();
1753 tcg_gen_andi_i64(tmp, va, 1);
1754 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1755 vb, load_gpr(ctx, rc));
1756 tcg_temp_free(tmp);
1757 break;
1758 case 0x20:
1759 /* BIS */
1760 tcg_gen_or_i64(vc, va, vb);
1761 break;
1762 case 0x24:
1763 /* CMOVEQ */
1764 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1765 vb, load_gpr(ctx, rc));
1766 break;
1767 case 0x26:
1768 /* CMOVNE */
1769 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1770 vb, load_gpr(ctx, rc));
1771 break;
1772 case 0x28:
1773 /* ORNOT */
1774 tcg_gen_orc_i64(vc, va, vb);
1775 break;
1776 case 0x40:
1777 /* XOR */
1778 tcg_gen_xor_i64(vc, va, vb);
1779 break;
1780 case 0x44:
1781 /* CMOVLT */
1782 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1783 vb, load_gpr(ctx, rc));
1784 break;
1785 case 0x46:
1786 /* CMOVGE */
1787 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1788 vb, load_gpr(ctx, rc));
1789 break;
1790 case 0x48:
1791 /* EQV */
1792 tcg_gen_eqv_i64(vc, va, vb);
1793 break;
1794 case 0x61:
1795 /* AMASK */
1796 REQUIRE_REG_31(ra);
1798 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1799 tcg_gen_andi_i64(vc, vb, ~amask);
1801 break;
1802 case 0x64:
1803 /* CMOVLE */
1804 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1805 vb, load_gpr(ctx, rc));
1806 break;
1807 case 0x66:
1808 /* CMOVGT */
1809 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1810 vb, load_gpr(ctx, rc));
1811 break;
1812 case 0x6C:
1813 /* IMPLVER */
1814 REQUIRE_REG_31(ra);
1815 tcg_gen_movi_i64(vc, ctx->implver);
1816 break;
1817 default:
1818 goto invalid_opc;
1820 break;
1822 case 0x12:
1823 vc = dest_gpr(ctx, rc);
1824 va = load_gpr(ctx, ra);
1825 switch (fn7) {
1826 case 0x02:
1827 /* MSKBL */
1828 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1829 break;
1830 case 0x06:
1831 /* EXTBL */
1832 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1833 break;
1834 case 0x0B:
1835 /* INSBL */
1836 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1837 break;
1838 case 0x12:
1839 /* MSKWL */
1840 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1841 break;
1842 case 0x16:
1843 /* EXTWL */
1844 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1845 break;
1846 case 0x1B:
1847 /* INSWL */
1848 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1849 break;
1850 case 0x22:
1851 /* MSKLL */
1852 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1853 break;
1854 case 0x26:
1855 /* EXTLL */
1856 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1857 break;
1858 case 0x2B:
1859 /* INSLL */
1860 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1861 break;
1862 case 0x30:
1863 /* ZAP */
1864 if (islit) {
1865 gen_zapnoti(vc, va, ~lit);
1866 } else {
1867 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1869 break;
1870 case 0x31:
1871 /* ZAPNOT */
1872 if (islit) {
1873 gen_zapnoti(vc, va, lit);
1874 } else {
1875 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1877 break;
1878 case 0x32:
1879 /* MSKQL */
1880 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1881 break;
1882 case 0x34:
1883 /* SRL */
1884 if (islit) {
1885 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1886 } else {
1887 tmp = tcg_temp_new();
1888 vb = load_gpr(ctx, rb);
1889 tcg_gen_andi_i64(tmp, vb, 0x3f);
1890 tcg_gen_shr_i64(vc, va, tmp);
1891 tcg_temp_free(tmp);
1893 break;
1894 case 0x36:
1895 /* EXTQL */
1896 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1897 break;
1898 case 0x39:
1899 /* SLL */
1900 if (islit) {
1901 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1902 } else {
1903 tmp = tcg_temp_new();
1904 vb = load_gpr(ctx, rb);
1905 tcg_gen_andi_i64(tmp, vb, 0x3f);
1906 tcg_gen_shl_i64(vc, va, tmp);
1907 tcg_temp_free(tmp);
1909 break;
1910 case 0x3B:
1911 /* INSQL */
1912 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1913 break;
1914 case 0x3C:
1915 /* SRA */
1916 if (islit) {
1917 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1918 } else {
1919 tmp = tcg_temp_new();
1920 vb = load_gpr(ctx, rb);
1921 tcg_gen_andi_i64(tmp, vb, 0x3f);
1922 tcg_gen_sar_i64(vc, va, tmp);
1923 tcg_temp_free(tmp);
1925 break;
1926 case 0x52:
1927 /* MSKWH */
1928 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1929 break;
1930 case 0x57:
1931 /* INSWH */
1932 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1933 break;
1934 case 0x5A:
1935 /* EXTWH */
1936 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1937 break;
1938 case 0x62:
1939 /* MSKLH */
1940 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1941 break;
1942 case 0x67:
1943 /* INSLH */
1944 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1945 break;
1946 case 0x6A:
1947 /* EXTLH */
1948 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1949 break;
1950 case 0x72:
1951 /* MSKQH */
1952 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1953 break;
1954 case 0x77:
1955 /* INSQH */
1956 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1957 break;
1958 case 0x7A:
1959 /* EXTQH */
1960 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1961 break;
1962 default:
1963 goto invalid_opc;
1965 break;
1967 case 0x13:
1968 vc = dest_gpr(ctx, rc);
1969 vb = load_gpr_lit(ctx, rb, lit, islit);
1970 va = load_gpr(ctx, ra);
1971 switch (fn7) {
1972 case 0x00:
1973 /* MULL */
1974 tcg_gen_mul_i64(vc, va, vb);
1975 tcg_gen_ext32s_i64(vc, vc);
1976 break;
1977 case 0x20:
1978 /* MULQ */
1979 tcg_gen_mul_i64(vc, va, vb);
1980 break;
1981 case 0x30:
1982 /* UMULH */
1983 tmp = tcg_temp_new();
1984 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1985 tcg_temp_free(tmp);
1986 break;
1987 case 0x40:
1988 /* MULL/V */
1989 tmp = tcg_temp_new();
1990 tcg_gen_ext32s_i64(tmp, va);
1991 tcg_gen_ext32s_i64(vc, vb);
1992 tcg_gen_mul_i64(tmp, tmp, vc);
1993 tcg_gen_ext32s_i64(vc, tmp);
1994 gen_helper_check_overflow(cpu_env, vc, tmp);
1995 tcg_temp_free(tmp);
1996 break;
1997 case 0x60:
1998 /* MULQ/V */
1999 tmp = tcg_temp_new();
2000 tmp2 = tcg_temp_new();
2001 tcg_gen_muls2_i64(vc, tmp, va, vb);
2002 tcg_gen_sari_i64(tmp2, vc, 63);
2003 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2004 tcg_temp_free(tmp);
2005 tcg_temp_free(tmp2);
2006 break;
2007 default:
2008 goto invalid_opc;
2010 break;
2012 case 0x14:
2013 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2014 vc = dest_fpr(ctx, rc);
2015 switch (fpfn) { /* fn11 & 0x3F */
2016 case 0x04:
2017 /* ITOFS */
2018 REQUIRE_REG_31(rb);
2019 t32 = tcg_temp_new_i32();
2020 va = load_gpr(ctx, ra);
2021 tcg_gen_extrl_i64_i32(t32, va);
2022 gen_helper_memory_to_s(vc, t32);
2023 tcg_temp_free_i32(t32);
2024 break;
2025 case 0x0A:
2026 /* SQRTF */
2027 REQUIRE_REG_31(ra);
2028 vb = load_fpr(ctx, rb);
2029 gen_helper_sqrtf(vc, cpu_env, vb);
2030 break;
2031 case 0x0B:
2032 /* SQRTS */
2033 REQUIRE_REG_31(ra);
2034 gen_sqrts(ctx, rb, rc, fn11);
2035 break;
2036 case 0x14:
2037 /* ITOFF */
2038 REQUIRE_REG_31(rb);
2039 t32 = tcg_temp_new_i32();
2040 va = load_gpr(ctx, ra);
2041 tcg_gen_extrl_i64_i32(t32, va);
2042 gen_helper_memory_to_f(vc, t32);
2043 tcg_temp_free_i32(t32);
2044 break;
2045 case 0x24:
2046 /* ITOFT */
2047 REQUIRE_REG_31(rb);
2048 va = load_gpr(ctx, ra);
2049 tcg_gen_mov_i64(vc, va);
2050 break;
2051 case 0x2A:
2052 /* SQRTG */
2053 REQUIRE_REG_31(ra);
2054 vb = load_fpr(ctx, rb);
2055 gen_helper_sqrtg(vc, cpu_env, vb);
2056 break;
2057 case 0x02B:
2058 /* SQRTT */
2059 REQUIRE_REG_31(ra);
2060 gen_sqrtt(ctx, rb, rc, fn11);
2061 break;
2062 default:
2063 goto invalid_opc;
2065 break;
2067 case 0x15:
2068 /* VAX floating point */
2069 /* XXX: rounding mode and trap are ignored (!) */
2070 vc = dest_fpr(ctx, rc);
2071 vb = load_fpr(ctx, rb);
2072 va = load_fpr(ctx, ra);
2073 switch (fpfn) { /* fn11 & 0x3F */
2074 case 0x00:
2075 /* ADDF */
2076 gen_helper_addf(vc, cpu_env, va, vb);
2077 break;
2078 case 0x01:
2079 /* SUBF */
2080 gen_helper_subf(vc, cpu_env, va, vb);
2081 break;
2082 case 0x02:
2083 /* MULF */
2084 gen_helper_mulf(vc, cpu_env, va, vb);
2085 break;
2086 case 0x03:
2087 /* DIVF */
2088 gen_helper_divf(vc, cpu_env, va, vb);
2089 break;
2090 case 0x1E:
2091 /* CVTDG -- TODO */
2092 REQUIRE_REG_31(ra);
2093 goto invalid_opc;
2094 case 0x20:
2095 /* ADDG */
2096 gen_helper_addg(vc, cpu_env, va, vb);
2097 break;
2098 case 0x21:
2099 /* SUBG */
2100 gen_helper_subg(vc, cpu_env, va, vb);
2101 break;
2102 case 0x22:
2103 /* MULG */
2104 gen_helper_mulg(vc, cpu_env, va, vb);
2105 break;
2106 case 0x23:
2107 /* DIVG */
2108 gen_helper_divg(vc, cpu_env, va, vb);
2109 break;
2110 case 0x25:
2111 /* CMPGEQ */
2112 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2113 break;
2114 case 0x26:
2115 /* CMPGLT */
2116 gen_helper_cmpglt(vc, cpu_env, va, vb);
2117 break;
2118 case 0x27:
2119 /* CMPGLE */
2120 gen_helper_cmpgle(vc, cpu_env, va, vb);
2121 break;
2122 case 0x2C:
2123 /* CVTGF */
2124 REQUIRE_REG_31(ra);
2125 gen_helper_cvtgf(vc, cpu_env, vb);
2126 break;
2127 case 0x2D:
2128 /* CVTGD -- TODO */
2129 REQUIRE_REG_31(ra);
2130 goto invalid_opc;
2131 case 0x2F:
2132 /* CVTGQ */
2133 REQUIRE_REG_31(ra);
2134 gen_helper_cvtgq(vc, cpu_env, vb);
2135 break;
2136 case 0x3C:
2137 /* CVTQF */
2138 REQUIRE_REG_31(ra);
2139 gen_helper_cvtqf(vc, cpu_env, vb);
2140 break;
2141 case 0x3E:
2142 /* CVTQG */
2143 REQUIRE_REG_31(ra);
2144 gen_helper_cvtqg(vc, cpu_env, vb);
2145 break;
2146 default:
2147 goto invalid_opc;
2149 break;
2151 case 0x16:
2152 /* IEEE floating-point */
2153 switch (fpfn) { /* fn11 & 0x3F */
2154 case 0x00:
2155 /* ADDS */
2156 gen_adds(ctx, ra, rb, rc, fn11);
2157 break;
2158 case 0x01:
2159 /* SUBS */
2160 gen_subs(ctx, ra, rb, rc, fn11);
2161 break;
2162 case 0x02:
2163 /* MULS */
2164 gen_muls(ctx, ra, rb, rc, fn11);
2165 break;
2166 case 0x03:
2167 /* DIVS */
2168 gen_divs(ctx, ra, rb, rc, fn11);
2169 break;
2170 case 0x20:
2171 /* ADDT */
2172 gen_addt(ctx, ra, rb, rc, fn11);
2173 break;
2174 case 0x21:
2175 /* SUBT */
2176 gen_subt(ctx, ra, rb, rc, fn11);
2177 break;
2178 case 0x22:
2179 /* MULT */
2180 gen_mult(ctx, ra, rb, rc, fn11);
2181 break;
2182 case 0x23:
2183 /* DIVT */
2184 gen_divt(ctx, ra, rb, rc, fn11);
2185 break;
2186 case 0x24:
2187 /* CMPTUN */
2188 gen_cmptun(ctx, ra, rb, rc, fn11);
2189 break;
2190 case 0x25:
2191 /* CMPTEQ */
2192 gen_cmpteq(ctx, ra, rb, rc, fn11);
2193 break;
2194 case 0x26:
2195 /* CMPTLT */
2196 gen_cmptlt(ctx, ra, rb, rc, fn11);
2197 break;
2198 case 0x27:
2199 /* CMPTLE */
2200 gen_cmptle(ctx, ra, rb, rc, fn11);
2201 break;
2202 case 0x2C:
2203 REQUIRE_REG_31(ra);
2204 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2205 /* CVTST */
2206 gen_cvtst(ctx, rb, rc, fn11);
2207 } else {
2208 /* CVTTS */
2209 gen_cvtts(ctx, rb, rc, fn11);
2211 break;
2212 case 0x2F:
2213 /* CVTTQ */
2214 REQUIRE_REG_31(ra);
2215 gen_cvttq(ctx, rb, rc, fn11);
2216 break;
2217 case 0x3C:
2218 /* CVTQS */
2219 REQUIRE_REG_31(ra);
2220 gen_cvtqs(ctx, rb, rc, fn11);
2221 break;
2222 case 0x3E:
2223 /* CVTQT */
2224 REQUIRE_REG_31(ra);
2225 gen_cvtqt(ctx, rb, rc, fn11);
2226 break;
2227 default:
2228 goto invalid_opc;
2230 break;
2232 case 0x17:
2233 switch (fn11) {
2234 case 0x010:
2235 /* CVTLQ */
2236 REQUIRE_REG_31(ra);
2237 vc = dest_fpr(ctx, rc);
2238 vb = load_fpr(ctx, rb);
2239 gen_cvtlq(vc, vb);
2240 break;
2241 case 0x020:
2242 /* CPYS */
2243 if (rc == 31) {
2244 /* Special case CPYS as FNOP. */
2245 } else {
2246 vc = dest_fpr(ctx, rc);
2247 va = load_fpr(ctx, ra);
2248 if (ra == rb) {
2249 /* Special case CPYS as FMOV. */
2250 tcg_gen_mov_i64(vc, va);
2251 } else {
2252 vb = load_fpr(ctx, rb);
2253 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2256 break;
2257 case 0x021:
2258 /* CPYSN */
2259 vc = dest_fpr(ctx, rc);
2260 vb = load_fpr(ctx, rb);
2261 va = load_fpr(ctx, ra);
2262 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2263 break;
2264 case 0x022:
2265 /* CPYSE */
2266 vc = dest_fpr(ctx, rc);
2267 vb = load_fpr(ctx, rb);
2268 va = load_fpr(ctx, ra);
2269 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2270 break;
2271 case 0x024:
2272 /* MT_FPCR */
2273 va = load_fpr(ctx, ra);
2274 gen_helper_store_fpcr(cpu_env, va);
2275 if (ctx->tb_rm == QUAL_RM_D) {
2276 /* Re-do the copy of the rounding mode to fp_status
2277 the next time we use dynamic rounding. */
2278 ctx->tb_rm = -1;
2280 break;
2281 case 0x025:
2282 /* MF_FPCR */
2283 va = dest_fpr(ctx, ra);
2284 gen_helper_load_fpcr(va, cpu_env);
2285 break;
2286 case 0x02A:
2287 /* FCMOVEQ */
2288 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2289 break;
2290 case 0x02B:
2291 /* FCMOVNE */
2292 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2293 break;
2294 case 0x02C:
2295 /* FCMOVLT */
2296 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2297 break;
2298 case 0x02D:
2299 /* FCMOVGE */
2300 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2301 break;
2302 case 0x02E:
2303 /* FCMOVLE */
2304 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2305 break;
2306 case 0x02F:
2307 /* FCMOVGT */
2308 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2309 break;
2310 case 0x030: /* CVTQL */
2311 case 0x130: /* CVTQL/V */
2312 case 0x530: /* CVTQL/SV */
2313 REQUIRE_REG_31(ra);
2314 vc = dest_fpr(ctx, rc);
2315 vb = load_fpr(ctx, rb);
2316 gen_helper_cvtql(vc, cpu_env, vb);
2317 gen_fp_exc_raise(rc, fn11);
2318 break;
2319 default:
2320 goto invalid_opc;
2322 break;
2324 case 0x18:
2325 switch ((uint16_t)disp16) {
2326 case 0x0000:
2327 /* TRAPB */
2328 /* No-op. */
2329 break;
2330 case 0x0400:
2331 /* EXCB */
2332 /* No-op. */
2333 break;
2334 case 0x4000:
2335 /* MB */
2336 /* No-op */
2337 break;
2338 case 0x4400:
2339 /* WMB */
2340 /* No-op */
2341 break;
2342 case 0x8000:
2343 /* FETCH */
2344 /* No-op */
2345 break;
2346 case 0xA000:
2347 /* FETCH_M */
2348 /* No-op */
2349 break;
2350 case 0xC000:
2351 /* RPCC */
2352 va = dest_gpr(ctx, ra);
2353 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2354 gen_io_start();
2355 gen_helper_load_pcc(va, cpu_env);
2356 gen_io_end();
2357 ret = EXIT_PC_STALE;
2358 } else {
2359 gen_helper_load_pcc(va, cpu_env);
2361 break;
2362 case 0xE000:
2363 /* RC */
2364 gen_rx(ctx, ra, 0);
2365 break;
2366 case 0xE800:
2367 /* ECB */
2368 break;
2369 case 0xF000:
2370 /* RS */
2371 gen_rx(ctx, ra, 1);
2372 break;
2373 case 0xF800:
2374 /* WH64 */
2375 /* No-op */
2376 break;
2377 case 0xFC00:
2378 /* WH64EN */
2379 /* No-op */
2380 break;
2381 default:
2382 goto invalid_opc;
2384 break;
2386 case 0x19:
2387 /* HW_MFPR (PALcode) */
2388 #ifndef CONFIG_USER_ONLY
2389 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2390 va = dest_gpr(ctx, ra);
2391 ret = gen_mfpr(ctx, va, insn & 0xffff);
2392 break;
2393 #else
2394 goto invalid_opc;
2395 #endif
2397 case 0x1A:
2398 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2399 prediction stack action, which of course we don't implement. */
2400 vb = load_gpr(ctx, rb);
2401 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2402 if (ra != 31) {
2403 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
2405 ret = EXIT_PC_UPDATED;
2406 break;
2408 case 0x1B:
2409 /* HW_LD (PALcode) */
2410 #ifndef CONFIG_USER_ONLY
2411 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2413 TCGv addr = tcg_temp_new();
2414 vb = load_gpr(ctx, rb);
2415 va = dest_gpr(ctx, ra);
2417 tcg_gen_addi_i64(addr, vb, disp12);
2418 switch ((insn >> 12) & 0xF) {
2419 case 0x0:
2420 /* Longword physical access (hw_ldl/p) */
2421 gen_helper_ldl_phys(va, cpu_env, addr);
2422 break;
2423 case 0x1:
2424 /* Quadword physical access (hw_ldq/p) */
2425 gen_helper_ldq_phys(va, cpu_env, addr);
2426 break;
2427 case 0x2:
2428 /* Longword physical access with lock (hw_ldl_l/p) */
2429 gen_helper_ldl_l_phys(va, cpu_env, addr);
2430 break;
2431 case 0x3:
2432 /* Quadword physical access with lock (hw_ldq_l/p) */
2433 gen_helper_ldq_l_phys(va, cpu_env, addr);
2434 break;
2435 case 0x4:
2436 /* Longword virtual PTE fetch (hw_ldl/v) */
2437 goto invalid_opc;
2438 case 0x5:
2439 /* Quadword virtual PTE fetch (hw_ldq/v) */
2440 goto invalid_opc;
2441 break;
2442 case 0x6:
2443 /* Invalid */
2444 goto invalid_opc;
2445 case 0x7:
2446 /* Invaliid */
2447 goto invalid_opc;
2448 case 0x8:
2449 /* Longword virtual access (hw_ldl) */
2450 goto invalid_opc;
2451 case 0x9:
2452 /* Quadword virtual access (hw_ldq) */
2453 goto invalid_opc;
2454 case 0xA:
2455 /* Longword virtual access with protection check (hw_ldl/w) */
2456 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2457 break;
2458 case 0xB:
2459 /* Quadword virtual access with protection check (hw_ldq/w) */
2460 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2461 break;
2462 case 0xC:
2463 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2464 goto invalid_opc;
2465 case 0xD:
2466 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2467 goto invalid_opc;
2468 case 0xE:
2469 /* Longword virtual access with alternate access mode and
2470 protection checks (hw_ldl/wa) */
2471 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2472 break;
2473 case 0xF:
2474 /* Quadword virtual access with alternate access mode and
2475 protection checks (hw_ldq/wa) */
2476 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2477 break;
2479 tcg_temp_free(addr);
2480 break;
2482 #else
2483 goto invalid_opc;
2484 #endif
2486 case 0x1C:
2487 vc = dest_gpr(ctx, rc);
2488 if (fn7 == 0x70) {
2489 /* FTOIT */
2490 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2491 REQUIRE_REG_31(rb);
2492 va = load_fpr(ctx, ra);
2493 tcg_gen_mov_i64(vc, va);
2494 break;
2495 } else if (fn7 == 0x78) {
2496 /* FTOIS */
2497 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2498 REQUIRE_REG_31(rb);
2499 t32 = tcg_temp_new_i32();
2500 va = load_fpr(ctx, ra);
2501 gen_helper_s_to_memory(t32, va);
2502 tcg_gen_ext_i32_i64(vc, t32);
2503 tcg_temp_free_i32(t32);
2504 break;
2507 vb = load_gpr_lit(ctx, rb, lit, islit);
2508 switch (fn7) {
2509 case 0x00:
2510 /* SEXTB */
2511 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2512 REQUIRE_REG_31(ra);
2513 tcg_gen_ext8s_i64(vc, vb);
2514 break;
2515 case 0x01:
2516 /* SEXTW */
2517 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2518 REQUIRE_REG_31(ra);
2519 tcg_gen_ext16s_i64(vc, vb);
2520 break;
2521 case 0x30:
2522 /* CTPOP */
2523 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2524 REQUIRE_REG_31(ra);
2525 REQUIRE_NO_LIT;
2526 gen_helper_ctpop(vc, vb);
2527 break;
2528 case 0x31:
2529 /* PERR */
2530 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2531 REQUIRE_NO_LIT;
2532 va = load_gpr(ctx, ra);
2533 gen_helper_perr(vc, va, vb);
2534 break;
2535 case 0x32:
2536 /* CTLZ */
2537 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2538 REQUIRE_REG_31(ra);
2539 REQUIRE_NO_LIT;
2540 gen_helper_ctlz(vc, vb);
2541 break;
2542 case 0x33:
2543 /* CTTZ */
2544 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2545 REQUIRE_REG_31(ra);
2546 REQUIRE_NO_LIT;
2547 gen_helper_cttz(vc, vb);
2548 break;
2549 case 0x34:
2550 /* UNPKBW */
2551 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2552 REQUIRE_REG_31(ra);
2553 REQUIRE_NO_LIT;
2554 gen_helper_unpkbw(vc, vb);
2555 break;
2556 case 0x35:
2557 /* UNPKBL */
2558 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2559 REQUIRE_REG_31(ra);
2560 REQUIRE_NO_LIT;
2561 gen_helper_unpkbl(vc, vb);
2562 break;
2563 case 0x36:
2564 /* PKWB */
2565 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2566 REQUIRE_REG_31(ra);
2567 REQUIRE_NO_LIT;
2568 gen_helper_pkwb(vc, vb);
2569 break;
2570 case 0x37:
2571 /* PKLB */
2572 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2573 REQUIRE_REG_31(ra);
2574 REQUIRE_NO_LIT;
2575 gen_helper_pklb(vc, vb);
2576 break;
2577 case 0x38:
2578 /* MINSB8 */
2579 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2580 va = load_gpr(ctx, ra);
2581 gen_helper_minsb8(vc, va, vb);
2582 break;
2583 case 0x39:
2584 /* MINSW4 */
2585 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2586 va = load_gpr(ctx, ra);
2587 gen_helper_minsw4(vc, va, vb);
2588 break;
2589 case 0x3A:
2590 /* MINUB8 */
2591 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2592 va = load_gpr(ctx, ra);
2593 gen_helper_minub8(vc, va, vb);
2594 break;
2595 case 0x3B:
2596 /* MINUW4 */
2597 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2598 va = load_gpr(ctx, ra);
2599 gen_helper_minuw4(vc, va, vb);
2600 break;
2601 case 0x3C:
2602 /* MAXUB8 */
2603 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2604 va = load_gpr(ctx, ra);
2605 gen_helper_maxub8(vc, va, vb);
2606 break;
2607 case 0x3D:
2608 /* MAXUW4 */
2609 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2610 va = load_gpr(ctx, ra);
2611 gen_helper_maxuw4(vc, va, vb);
2612 break;
2613 case 0x3E:
2614 /* MAXSB8 */
2615 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2616 va = load_gpr(ctx, ra);
2617 gen_helper_maxsb8(vc, va, vb);
2618 break;
2619 case 0x3F:
2620 /* MAXSW4 */
2621 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2622 va = load_gpr(ctx, ra);
2623 gen_helper_maxsw4(vc, va, vb);
2624 break;
2625 default:
2626 goto invalid_opc;
2628 break;
2630 case 0x1D:
2631 /* HW_MTPR (PALcode) */
2632 #ifndef CONFIG_USER_ONLY
2633 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2634 vb = load_gpr(ctx, rb);
2635 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2636 break;
2637 #else
2638 goto invalid_opc;
2639 #endif
2641 case 0x1E:
2642 /* HW_RET (PALcode) */
2643 #ifndef CONFIG_USER_ONLY
2644 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2645 if (rb == 31) {
2646 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2647 address from EXC_ADDR. This turns out to be useful for our
2648 emulation PALcode, so continue to accept it. */
2649 ctx->lit = vb = tcg_temp_new();
2650 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2651 } else {
2652 vb = load_gpr(ctx, rb);
2654 tmp = tcg_temp_new();
2655 tcg_gen_movi_i64(tmp, 0);
2656 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
2657 tcg_gen_movi_i64(cpu_lock_addr, -1);
2658 tcg_gen_andi_i64(tmp, vb, 1);
2659 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
2660 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2661 ret = EXIT_PC_UPDATED;
2662 break;
2663 #else
2664 goto invalid_opc;
2665 #endif
2667 case 0x1F:
2668 /* HW_ST (PALcode) */
2669 #ifndef CONFIG_USER_ONLY
2670 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2672 TCGv addr = tcg_temp_new();
2673 va = load_gpr(ctx, ra);
2674 vb = load_gpr(ctx, rb);
2676 tcg_gen_addi_i64(addr, vb, disp12);
2677 switch ((insn >> 12) & 0xF) {
2678 case 0x0:
2679 /* Longword physical access */
2680 gen_helper_stl_phys(cpu_env, addr, va);
2681 break;
2682 case 0x1:
2683 /* Quadword physical access */
2684 gen_helper_stq_phys(cpu_env, addr, va);
2685 break;
2686 case 0x2:
2687 /* Longword physical access with lock */
2688 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2689 break;
2690 case 0x3:
2691 /* Quadword physical access with lock */
2692 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2693 break;
2694 case 0x4:
2695 /* Longword virtual access */
2696 goto invalid_opc;
2697 case 0x5:
2698 /* Quadword virtual access */
2699 goto invalid_opc;
2700 case 0x6:
2701 /* Invalid */
2702 goto invalid_opc;
2703 case 0x7:
2704 /* Invalid */
2705 goto invalid_opc;
2706 case 0x8:
2707 /* Invalid */
2708 goto invalid_opc;
2709 case 0x9:
2710 /* Invalid */
2711 goto invalid_opc;
2712 case 0xA:
2713 /* Invalid */
2714 goto invalid_opc;
2715 case 0xB:
2716 /* Invalid */
2717 goto invalid_opc;
2718 case 0xC:
2719 /* Longword virtual access with alternate access mode */
2720 goto invalid_opc;
2721 case 0xD:
2722 /* Quadword virtual access with alternate access mode */
2723 goto invalid_opc;
2724 case 0xE:
2725 /* Invalid */
2726 goto invalid_opc;
2727 case 0xF:
2728 /* Invalid */
2729 goto invalid_opc;
2731 tcg_temp_free(addr);
2732 break;
2734 #else
2735 goto invalid_opc;
2736 #endif
2737 case 0x20:
2738 /* LDF */
2739 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2740 break;
2741 case 0x21:
2742 /* LDG */
2743 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2744 break;
2745 case 0x22:
2746 /* LDS */
2747 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2748 break;
2749 case 0x23:
2750 /* LDT */
2751 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2752 break;
2753 case 0x24:
2754 /* STF */
2755 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2756 break;
2757 case 0x25:
2758 /* STG */
2759 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2760 break;
2761 case 0x26:
2762 /* STS */
2763 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2764 break;
2765 case 0x27:
2766 /* STT */
2767 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2768 break;
2769 case 0x28:
2770 /* LDL */
2771 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2772 break;
2773 case 0x29:
2774 /* LDQ */
2775 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2776 break;
2777 case 0x2A:
2778 /* LDL_L */
2779 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2780 break;
2781 case 0x2B:
2782 /* LDQ_L */
2783 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2784 break;
2785 case 0x2C:
2786 /* STL */
2787 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2788 break;
2789 case 0x2D:
2790 /* STQ */
2791 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2792 break;
2793 case 0x2E:
2794 /* STL_C */
2795 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
2796 break;
2797 case 0x2F:
2798 /* STQ_C */
2799 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
2800 break;
2801 case 0x30:
2802 /* BR */
2803 ret = gen_bdirect(ctx, ra, disp21);
2804 break;
2805 case 0x31: /* FBEQ */
2806 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2807 break;
2808 case 0x32: /* FBLT */
2809 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2810 break;
2811 case 0x33: /* FBLE */
2812 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2813 break;
2814 case 0x34:
2815 /* BSR */
2816 ret = gen_bdirect(ctx, ra, disp21);
2817 break;
2818 case 0x35: /* FBNE */
2819 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2820 break;
2821 case 0x36: /* FBGE */
2822 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2823 break;
2824 case 0x37: /* FBGT */
2825 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2826 break;
2827 case 0x38:
2828 /* BLBC */
2829 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2830 break;
2831 case 0x39:
2832 /* BEQ */
2833 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2834 break;
2835 case 0x3A:
2836 /* BLT */
2837 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2838 break;
2839 case 0x3B:
2840 /* BLE */
2841 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2842 break;
2843 case 0x3C:
2844 /* BLBS */
2845 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2846 break;
2847 case 0x3D:
2848 /* BNE */
2849 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2850 break;
2851 case 0x3E:
2852 /* BGE */
2853 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2854 break;
2855 case 0x3F:
2856 /* BGT */
2857 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2858 break;
2859 invalid_opc:
2860 ret = gen_invalid(ctx);
2861 break;
2864 return ret;
2867 void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
2869 AlphaCPU *cpu = alpha_env_get_cpu(env);
2870 CPUState *cs = CPU(cpu);
2871 DisasContext ctx, *ctxp = &ctx;
2872 target_ulong pc_start;
2873 target_ulong pc_mask;
2874 uint32_t insn;
2875 ExitStatus ret;
2876 int num_insns;
2877 int max_insns;
2879 pc_start = tb->pc;
2881 ctx.tb = tb;
2882 ctx.pc = pc_start;
2883 ctx.mem_idx = cpu_mmu_index(env, false);
2884 ctx.implver = env->implver;
2885 ctx.singlestep_enabled = cs->singlestep_enabled;
2887 #ifdef CONFIG_USER_ONLY
2888 ctx.ir = cpu_std_ir;
2889 #else
2890 ctx.palbr = env->palbr;
2891 ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2892 #endif
2894 /* ??? Every TB begins with unset rounding mode, to be initialized on
2895 the first fp insn of the TB. Alternately we could define a proper
2896 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2897 to reset the FP_STATUS to that default at the end of any TB that
2898 changes the default. We could even (gasp) dynamiclly figure out
2899 what default would be most efficient given the running program. */
2900 ctx.tb_rm = -1;
2901 /* Similarly for flush-to-zero. */
2902 ctx.tb_ftz = -1;
2904 num_insns = 0;
2905 max_insns = tb->cflags & CF_COUNT_MASK;
2906 if (max_insns == 0) {
2907 max_insns = CF_COUNT_MASK;
2909 if (max_insns > TCG_MAX_INSNS) {
2910 max_insns = TCG_MAX_INSNS;
2913 if (in_superpage(&ctx, pc_start)) {
2914 pc_mask = (1ULL << 41) - 1;
2915 } else {
2916 pc_mask = ~TARGET_PAGE_MASK;
2919 gen_tb_start(tb);
2920 do {
2921 tcg_gen_insn_start(ctx.pc);
2922 num_insns++;
2924 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2925 ret = gen_excp(&ctx, EXCP_DEBUG, 0);
2926 /* The address covered by the breakpoint must be included in
2927 [tb->pc, tb->pc + tb->size) in order to for it to be
2928 properly cleared -- thus we increment the PC here so that
2929 the logic setting tb->size below does the right thing. */
2930 ctx.pc += 4;
2931 break;
2933 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2934 gen_io_start();
2936 insn = cpu_ldl_code(env, ctx.pc);
2938 TCGV_UNUSED_I64(ctx.zero);
2939 TCGV_UNUSED_I64(ctx.sink);
2940 TCGV_UNUSED_I64(ctx.lit);
2942 ctx.pc += 4;
2943 ret = translate_one(ctxp, insn);
2945 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2946 tcg_gen_discard_i64(ctx.sink);
2947 tcg_temp_free(ctx.sink);
2949 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2950 tcg_temp_free(ctx.zero);
2952 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2953 tcg_temp_free(ctx.lit);
2956 /* If we reach a page boundary, are single stepping,
2957 or exhaust instruction count, stop generation. */
2958 if (ret == NO_EXIT
2959 && ((ctx.pc & pc_mask) == 0
2960 || tcg_op_buf_full()
2961 || num_insns >= max_insns
2962 || singlestep
2963 || ctx.singlestep_enabled)) {
2964 ret = EXIT_PC_STALE;
2966 } while (ret == NO_EXIT);
2968 if (tb->cflags & CF_LAST_IO) {
2969 gen_io_end();
2972 switch (ret) {
2973 case EXIT_GOTO_TB:
2974 case EXIT_NORETURN:
2975 break;
2976 case EXIT_PC_STALE:
2977 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2978 /* FALLTHRU */
2979 case EXIT_PC_UPDATED:
2980 if (ctx.singlestep_enabled) {
2981 gen_excp_1(EXCP_DEBUG, 0);
2982 } else {
2983 tcg_gen_exit_tb(0);
2985 break;
2986 default:
2987 abort();
2990 gen_tb_end(tb, num_insns);
2992 tb->size = ctx.pc - pc_start;
2993 tb->icount = num_insns;
2995 #ifdef DEBUG_DISAS
2996 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2997 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2998 log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
2999 qemu_log("\n");
3001 #endif
3004 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3005 target_ulong *data)
3007 env->pc = data[0];