block: Add blk_remove_all_bs()
[qemu/ar7.git] / target-alpha / translate.c
blob7d419d9972330115aee98842ab05284300ded677
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "tcg-op.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
33 #undef ALPHA_DEBUG_DISAS
34 #define CONFIG_SOFTFLOAT_INLINE
36 #ifdef ALPHA_DEBUG_DISAS
37 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
38 #else
39 # define LOG_DISAS(...) do { } while (0)
40 #endif
42 typedef struct DisasContext DisasContext;
43 struct DisasContext {
44 struct TranslationBlock *tb;
45 uint64_t pc;
46 #ifndef CONFIG_USER_ONLY
47 uint64_t palbr;
48 #endif
49 int mem_idx;
51 /* Current rounding mode for this TB. */
52 int tb_rm;
53 /* Current flush-to-zero setting for this TB. */
54 int tb_ftz;
56 /* implver value for this CPU. */
57 int implver;
59 /* The set of registers active in the current context. */
60 TCGv *ir;
62 /* Temporaries for $31 and $f31 as source and destination. */
63 TCGv zero;
64 TCGv sink;
65 /* Temporary for immediate constants. */
66 TCGv lit;
68 bool singlestep_enabled;
71 /* Return values from translate_one, indicating the state of the TB.
72 Note that zero indicates that we are not exiting the TB. */
74 typedef enum {
75 NO_EXIT,
77 /* We have emitted one or more goto_tb. No fixup required. */
78 EXIT_GOTO_TB,
80 /* We are not using a goto_tb (for whatever reason), but have updated
81 the PC (for whatever reason), so there's no need to do it again on
82 exiting the TB. */
83 EXIT_PC_UPDATED,
85 /* We are exiting the TB, but have neither emitted a goto_tb, nor
86 updated the PC for the next instruction to be executed. */
87 EXIT_PC_STALE,
89 /* We are ending the TB with a noreturn function call, e.g. longjmp.
90 No following code will be executed. */
91 EXIT_NORETURN,
92 } ExitStatus;
94 /* global register indexes */
95 static TCGv_ptr cpu_env;
96 static TCGv cpu_std_ir[31];
97 static TCGv cpu_fir[31];
98 static TCGv cpu_pc;
99 static TCGv cpu_lock_addr;
100 static TCGv cpu_lock_st_addr;
101 static TCGv cpu_lock_value;
103 #ifndef CONFIG_USER_ONLY
104 static TCGv cpu_pal_ir[31];
105 #endif
107 #include "exec/gen-icount.h"
109 void alpha_translate_init(void)
111 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
113 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
114 static const GlobalVar vars[] = {
115 DEF_VAR(pc),
116 DEF_VAR(lock_addr),
117 DEF_VAR(lock_st_addr),
118 DEF_VAR(lock_value),
121 #undef DEF_VAR
123 /* Use the symbolic register names that match the disassembler. */
124 static const char greg_names[31][4] = {
125 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
126 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
127 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
128 "t10", "t11", "ra", "t12", "at", "gp", "sp"
130 static const char freg_names[31][4] = {
131 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
132 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
133 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
134 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
136 #ifndef CONFIG_USER_ONLY
137 static const char shadow_names[8][8] = {
138 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
139 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
141 #endif
143 static bool done_init = 0;
144 int i;
146 if (done_init) {
147 return;
149 done_init = 1;
151 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
153 for (i = 0; i < 31; i++) {
154 cpu_std_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
155 offsetof(CPUAlphaState, ir[i]),
156 greg_names[i]);
159 for (i = 0; i < 31; i++) {
160 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUAlphaState, fir[i]),
162 freg_names[i]);
165 #ifndef CONFIG_USER_ONLY
166 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
167 for (i = 0; i < 8; i++) {
168 int r = (i == 7 ? 25 : i + 8);
169 cpu_pal_ir[r] = tcg_global_mem_new_i64(TCG_AREG0,
170 offsetof(CPUAlphaState,
171 shadow[i]),
172 shadow_names[i]);
174 #endif
176 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
177 const GlobalVar *v = &vars[i];
178 *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
182 static TCGv load_zero(DisasContext *ctx)
184 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
185 ctx->zero = tcg_const_i64(0);
187 return ctx->zero;
190 static TCGv dest_sink(DisasContext *ctx)
192 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
193 ctx->sink = tcg_temp_new();
195 return ctx->sink;
198 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
200 if (likely(reg < 31)) {
201 return ctx->ir[reg];
202 } else {
203 return load_zero(ctx);
207 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
208 uint8_t lit, bool islit)
210 if (islit) {
211 ctx->lit = tcg_const_i64(lit);
212 return ctx->lit;
213 } else if (likely(reg < 31)) {
214 return ctx->ir[reg];
215 } else {
216 return load_zero(ctx);
220 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
222 if (likely(reg < 31)) {
223 return ctx->ir[reg];
224 } else {
225 return dest_sink(ctx);
229 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
231 if (likely(reg < 31)) {
232 return cpu_fir[reg];
233 } else {
234 return load_zero(ctx);
238 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
240 if (likely(reg < 31)) {
241 return cpu_fir[reg];
242 } else {
243 return dest_sink(ctx);
247 static void gen_excp_1(int exception, int error_code)
249 TCGv_i32 tmp1, tmp2;
251 tmp1 = tcg_const_i32(exception);
252 tmp2 = tcg_const_i32(error_code);
253 gen_helper_excp(cpu_env, tmp1, tmp2);
254 tcg_temp_free_i32(tmp2);
255 tcg_temp_free_i32(tmp1);
258 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
260 tcg_gen_movi_i64(cpu_pc, ctx->pc);
261 gen_excp_1(exception, error_code);
262 return EXIT_NORETURN;
265 static inline ExitStatus gen_invalid(DisasContext *ctx)
267 return gen_excp(ctx, EXCP_OPCDEC, 0);
270 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
272 TCGv_i32 tmp32 = tcg_temp_new_i32();
273 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
274 gen_helper_memory_to_f(t0, tmp32);
275 tcg_temp_free_i32(tmp32);
278 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
280 TCGv tmp = tcg_temp_new();
281 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
282 gen_helper_memory_to_g(t0, tmp);
283 tcg_temp_free(tmp);
286 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
288 TCGv_i32 tmp32 = tcg_temp_new_i32();
289 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
290 gen_helper_memory_to_s(t0, tmp32);
291 tcg_temp_free_i32(tmp32);
294 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
296 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
297 tcg_gen_mov_i64(cpu_lock_addr, t1);
298 tcg_gen_mov_i64(cpu_lock_value, t0);
301 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
303 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
304 tcg_gen_mov_i64(cpu_lock_addr, t1);
305 tcg_gen_mov_i64(cpu_lock_value, t0);
308 static inline void gen_load_mem(DisasContext *ctx,
309 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
310 int flags),
311 int ra, int rb, int32_t disp16, bool fp,
312 bool clear)
314 TCGv tmp, addr, va;
316 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
317 prefetches, which we can treat as nops. No worries about
318 missed exceptions here. */
319 if (unlikely(ra == 31)) {
320 return;
323 tmp = tcg_temp_new();
324 addr = load_gpr(ctx, rb);
326 if (disp16) {
327 tcg_gen_addi_i64(tmp, addr, disp16);
328 addr = tmp;
330 if (clear) {
331 tcg_gen_andi_i64(tmp, addr, ~0x7);
332 addr = tmp;
335 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
336 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
338 tcg_temp_free(tmp);
341 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
343 TCGv_i32 tmp32 = tcg_temp_new_i32();
344 gen_helper_f_to_memory(tmp32, t0);
345 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
346 tcg_temp_free_i32(tmp32);
349 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
351 TCGv tmp = tcg_temp_new();
352 gen_helper_g_to_memory(tmp, t0);
353 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
354 tcg_temp_free(tmp);
357 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
359 TCGv_i32 tmp32 = tcg_temp_new_i32();
360 gen_helper_s_to_memory(tmp32, t0);
361 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
362 tcg_temp_free_i32(tmp32);
365 static inline void gen_store_mem(DisasContext *ctx,
366 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
367 int flags),
368 int ra, int rb, int32_t disp16, bool fp,
369 bool clear)
371 TCGv tmp, addr, va;
373 tmp = tcg_temp_new();
374 addr = load_gpr(ctx, rb);
376 if (disp16) {
377 tcg_gen_addi_i64(tmp, addr, disp16);
378 addr = tmp;
380 if (clear) {
381 tcg_gen_andi_i64(tmp, addr, ~0x7);
382 addr = tmp;
385 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
386 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
388 tcg_temp_free(tmp);
391 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
392 int32_t disp16, int quad)
394 TCGv addr;
396 if (ra == 31) {
397 /* ??? Don't bother storing anything. The user can't tell
398 the difference, since the zero register always reads zero. */
399 return NO_EXIT;
402 #if defined(CONFIG_USER_ONLY)
403 addr = cpu_lock_st_addr;
404 #else
405 addr = tcg_temp_local_new();
406 #endif
408 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
410 #if defined(CONFIG_USER_ONLY)
411 /* ??? This is handled via a complicated version of compare-and-swap
412 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
413 in TCG so that this isn't necessary. */
414 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
415 #else
416 /* ??? In system mode we are never multi-threaded, so CAS can be
417 implemented via a non-atomic load-compare-store sequence. */
419 TCGLabel *lab_fail, *lab_done;
420 TCGv val;
422 lab_fail = gen_new_label();
423 lab_done = gen_new_label();
424 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
426 val = tcg_temp_new();
427 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
428 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
430 tcg_gen_qemu_st_i64(ctx->ir[ra], addr, ctx->mem_idx,
431 quad ? MO_LEQ : MO_LEUL);
432 tcg_gen_movi_i64(ctx->ir[ra], 1);
433 tcg_gen_br(lab_done);
435 gen_set_label(lab_fail);
436 tcg_gen_movi_i64(ctx->ir[ra], 0);
438 gen_set_label(lab_done);
439 tcg_gen_movi_i64(cpu_lock_addr, -1);
441 tcg_temp_free(addr);
442 return NO_EXIT;
444 #endif
447 static bool in_superpage(DisasContext *ctx, int64_t addr)
449 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
450 && addr < 0
451 && ((addr >> 41) & 3) == 2
452 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
455 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
457 /* Suppress goto_tb in the case of single-steping and IO. */
458 if ((ctx->tb->cflags & CF_LAST_IO)
459 || ctx->singlestep_enabled || singlestep) {
460 return false;
462 /* If the destination is in the superpage, the page perms can't change. */
463 if (in_superpage(ctx, dest)) {
464 return true;
466 /* Check for the dest on the same page as the start of the TB. */
467 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
470 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
472 uint64_t dest = ctx->pc + (disp << 2);
474 if (ra != 31) {
475 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
478 /* Notice branch-to-next; used to initialize RA with the PC. */
479 if (disp == 0) {
480 return 0;
481 } else if (use_goto_tb(ctx, dest)) {
482 tcg_gen_goto_tb(0);
483 tcg_gen_movi_i64(cpu_pc, dest);
484 tcg_gen_exit_tb((uintptr_t)ctx->tb);
485 return EXIT_GOTO_TB;
486 } else {
487 tcg_gen_movi_i64(cpu_pc, dest);
488 return EXIT_PC_UPDATED;
492 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
493 TCGv cmp, int32_t disp)
495 uint64_t dest = ctx->pc + (disp << 2);
496 TCGLabel *lab_true = gen_new_label();
498 if (use_goto_tb(ctx, dest)) {
499 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
501 tcg_gen_goto_tb(0);
502 tcg_gen_movi_i64(cpu_pc, ctx->pc);
503 tcg_gen_exit_tb((uintptr_t)ctx->tb);
505 gen_set_label(lab_true);
506 tcg_gen_goto_tb(1);
507 tcg_gen_movi_i64(cpu_pc, dest);
508 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
510 return EXIT_GOTO_TB;
511 } else {
512 TCGv_i64 z = tcg_const_i64(0);
513 TCGv_i64 d = tcg_const_i64(dest);
514 TCGv_i64 p = tcg_const_i64(ctx->pc);
516 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
518 tcg_temp_free_i64(z);
519 tcg_temp_free_i64(d);
520 tcg_temp_free_i64(p);
521 return EXIT_PC_UPDATED;
525 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
526 int32_t disp, int mask)
528 TCGv cmp_tmp;
530 if (mask) {
531 cmp_tmp = tcg_temp_new();
532 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
533 } else {
534 cmp_tmp = load_gpr(ctx, ra);
537 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
540 /* Fold -0.0 for comparison with COND. */
542 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
544 uint64_t mzero = 1ull << 63;
546 switch (cond) {
547 case TCG_COND_LE:
548 case TCG_COND_GT:
549 /* For <= or >, the -0.0 value directly compares the way we want. */
550 tcg_gen_mov_i64(dest, src);
551 break;
553 case TCG_COND_EQ:
554 case TCG_COND_NE:
555 /* For == or !=, we can simply mask off the sign bit and compare. */
556 tcg_gen_andi_i64(dest, src, mzero - 1);
557 break;
559 case TCG_COND_GE:
560 case TCG_COND_LT:
561 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
562 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
563 tcg_gen_neg_i64(dest, dest);
564 tcg_gen_and_i64(dest, dest, src);
565 break;
567 default:
568 abort();
572 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
573 int32_t disp)
575 TCGv cmp_tmp = tcg_temp_new();
576 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
577 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
580 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
582 TCGv_i64 va, vb, z;
584 z = load_zero(ctx);
585 vb = load_fpr(ctx, rb);
586 va = tcg_temp_new();
587 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
589 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
591 tcg_temp_free(va);
594 #define QUAL_RM_N 0x080 /* Round mode nearest even */
595 #define QUAL_RM_C 0x000 /* Round mode chopped */
596 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
597 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
598 #define QUAL_RM_MASK 0x0c0
600 #define QUAL_U 0x100 /* Underflow enable (fp output) */
601 #define QUAL_V 0x100 /* Overflow enable (int output) */
602 #define QUAL_S 0x400 /* Software completion enable */
603 #define QUAL_I 0x200 /* Inexact detection enable */
605 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
607 TCGv_i32 tmp;
609 fn11 &= QUAL_RM_MASK;
610 if (fn11 == ctx->tb_rm) {
611 return;
613 ctx->tb_rm = fn11;
615 tmp = tcg_temp_new_i32();
616 switch (fn11) {
617 case QUAL_RM_N:
618 tcg_gen_movi_i32(tmp, float_round_nearest_even);
619 break;
620 case QUAL_RM_C:
621 tcg_gen_movi_i32(tmp, float_round_to_zero);
622 break;
623 case QUAL_RM_M:
624 tcg_gen_movi_i32(tmp, float_round_down);
625 break;
626 case QUAL_RM_D:
627 tcg_gen_ld8u_i32(tmp, cpu_env,
628 offsetof(CPUAlphaState, fpcr_dyn_round));
629 break;
632 #if defined(CONFIG_SOFTFLOAT_INLINE)
633 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
634 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
635 sets the one field. */
636 tcg_gen_st8_i32(tmp, cpu_env,
637 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
638 #else
639 gen_helper_setroundmode(tmp);
640 #endif
642 tcg_temp_free_i32(tmp);
645 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
647 TCGv_i32 tmp;
649 fn11 &= QUAL_U;
650 if (fn11 == ctx->tb_ftz) {
651 return;
653 ctx->tb_ftz = fn11;
655 tmp = tcg_temp_new_i32();
656 if (fn11) {
657 /* Underflow is enabled, use the FPCR setting. */
658 tcg_gen_ld8u_i32(tmp, cpu_env,
659 offsetof(CPUAlphaState, fpcr_flush_to_zero));
660 } else {
661 /* Underflow is disabled, force flush-to-zero. */
662 tcg_gen_movi_i32(tmp, 1);
665 #if defined(CONFIG_SOFTFLOAT_INLINE)
666 tcg_gen_st8_i32(tmp, cpu_env,
667 offsetof(CPUAlphaState, fp_status.flush_to_zero));
668 #else
669 gen_helper_setflushzero(tmp);
670 #endif
672 tcg_temp_free_i32(tmp);
675 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
677 TCGv val;
679 if (unlikely(reg == 31)) {
680 val = load_zero(ctx);
681 } else {
682 val = cpu_fir[reg];
683 if ((fn11 & QUAL_S) == 0) {
684 if (is_cmp) {
685 gen_helper_ieee_input_cmp(cpu_env, val);
686 } else {
687 gen_helper_ieee_input(cpu_env, val);
689 } else {
690 #ifndef CONFIG_USER_ONLY
691 /* In system mode, raise exceptions for denormals like real
692 hardware. In user mode, proceed as if the OS completion
693 handler is handling the denormal as per spec. */
694 gen_helper_ieee_input_s(cpu_env, val);
695 #endif
698 return val;
701 static void gen_fp_exc_raise(int rc, int fn11)
703 /* ??? We ought to be able to do something with imprecise exceptions.
704 E.g. notice we're still in the trap shadow of something within the
705 TB and do not generate the code to signal the exception; end the TB
706 when an exception is forced to arrive, either by consumption of a
707 register value or TRAPB or EXCB. */
708 TCGv_i32 reg, ign;
709 uint32_t ignore = 0;
711 if (!(fn11 & QUAL_U)) {
712 /* Note that QUAL_U == QUAL_V, so ignore either. */
713 ignore |= FPCR_UNF | FPCR_IOV;
715 if (!(fn11 & QUAL_I)) {
716 ignore |= FPCR_INE;
718 ign = tcg_const_i32(ignore);
720 /* ??? Pass in the regno of the destination so that the helper can
721 set EXC_MASK, which contains a bitmask of destination registers
722 that have caused arithmetic traps. A simple userspace emulation
723 does not require this. We do need it for a guest kernel's entArith,
724 or if we were to do something clever with imprecise exceptions. */
725 reg = tcg_const_i32(rc + 32);
726 if (fn11 & QUAL_S) {
727 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
728 } else {
729 gen_helper_fp_exc_raise(cpu_env, ign, reg);
732 tcg_temp_free_i32(reg);
733 tcg_temp_free_i32(ign);
736 static void gen_cvtlq(TCGv vc, TCGv vb)
738 TCGv tmp = tcg_temp_new();
740 /* The arithmetic right shift here, plus the sign-extended mask below
741 yields a sign-extended result without an explicit ext32s_i64. */
742 tcg_gen_sari_i64(tmp, vb, 32);
743 tcg_gen_shri_i64(vc, vb, 29);
744 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
745 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
746 tcg_gen_or_i64(vc, vc, tmp);
748 tcg_temp_free(tmp);
751 static void gen_ieee_arith2(DisasContext *ctx,
752 void (*helper)(TCGv, TCGv_ptr, TCGv),
753 int rb, int rc, int fn11)
755 TCGv vb;
757 gen_qual_roundmode(ctx, fn11);
758 gen_qual_flushzero(ctx, fn11);
760 vb = gen_ieee_input(ctx, rb, fn11, 0);
761 helper(dest_fpr(ctx, rc), cpu_env, vb);
763 gen_fp_exc_raise(rc, fn11);
766 #define IEEE_ARITH2(name) \
767 static inline void glue(gen_, name)(DisasContext *ctx, \
768 int rb, int rc, int fn11) \
770 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
772 IEEE_ARITH2(sqrts)
773 IEEE_ARITH2(sqrtt)
774 IEEE_ARITH2(cvtst)
775 IEEE_ARITH2(cvtts)
777 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
779 TCGv vb, vc;
781 /* No need to set flushzero, since we have an integer output. */
782 vb = gen_ieee_input(ctx, rb, fn11, 0);
783 vc = dest_fpr(ctx, rc);
785 /* Almost all integer conversions use cropped rounding;
786 special case that. */
787 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
788 gen_helper_cvttq_c(vc, cpu_env, vb);
789 } else {
790 gen_qual_roundmode(ctx, fn11);
791 gen_helper_cvttq(vc, cpu_env, vb);
793 gen_fp_exc_raise(rc, fn11);
796 static void gen_ieee_intcvt(DisasContext *ctx,
797 void (*helper)(TCGv, TCGv_ptr, TCGv),
798 int rb, int rc, int fn11)
800 TCGv vb, vc;
802 gen_qual_roundmode(ctx, fn11);
803 vb = load_fpr(ctx, rb);
804 vc = dest_fpr(ctx, rc);
806 /* The only exception that can be raised by integer conversion
807 is inexact. Thus we only need to worry about exceptions when
808 inexact handling is requested. */
809 if (fn11 & QUAL_I) {
810 helper(vc, cpu_env, vb);
811 gen_fp_exc_raise(rc, fn11);
812 } else {
813 helper(vc, cpu_env, vb);
817 #define IEEE_INTCVT(name) \
818 static inline void glue(gen_, name)(DisasContext *ctx, \
819 int rb, int rc, int fn11) \
821 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
823 IEEE_INTCVT(cvtqs)
824 IEEE_INTCVT(cvtqt)
826 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
828 TCGv vmask = tcg_const_i64(mask);
829 TCGv tmp = tcg_temp_new_i64();
831 if (inv_a) {
832 tcg_gen_andc_i64(tmp, vmask, va);
833 } else {
834 tcg_gen_and_i64(tmp, va, vmask);
837 tcg_gen_andc_i64(vc, vb, vmask);
838 tcg_gen_or_i64(vc, vc, tmp);
840 tcg_temp_free(vmask);
841 tcg_temp_free(tmp);
844 static void gen_ieee_arith3(DisasContext *ctx,
845 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
846 int ra, int rb, int rc, int fn11)
848 TCGv va, vb, vc;
850 gen_qual_roundmode(ctx, fn11);
851 gen_qual_flushzero(ctx, fn11);
853 va = gen_ieee_input(ctx, ra, fn11, 0);
854 vb = gen_ieee_input(ctx, rb, fn11, 0);
855 vc = dest_fpr(ctx, rc);
856 helper(vc, cpu_env, va, vb);
858 gen_fp_exc_raise(rc, fn11);
861 #define IEEE_ARITH3(name) \
862 static inline void glue(gen_, name)(DisasContext *ctx, \
863 int ra, int rb, int rc, int fn11) \
865 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
867 IEEE_ARITH3(adds)
868 IEEE_ARITH3(subs)
869 IEEE_ARITH3(muls)
870 IEEE_ARITH3(divs)
871 IEEE_ARITH3(addt)
872 IEEE_ARITH3(subt)
873 IEEE_ARITH3(mult)
874 IEEE_ARITH3(divt)
876 static void gen_ieee_compare(DisasContext *ctx,
877 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
878 int ra, int rb, int rc, int fn11)
880 TCGv va, vb, vc;
882 va = gen_ieee_input(ctx, ra, fn11, 1);
883 vb = gen_ieee_input(ctx, rb, fn11, 1);
884 vc = dest_fpr(ctx, rc);
885 helper(vc, cpu_env, va, vb);
887 gen_fp_exc_raise(rc, fn11);
890 #define IEEE_CMP3(name) \
891 static inline void glue(gen_, name)(DisasContext *ctx, \
892 int ra, int rb, int rc, int fn11) \
894 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
896 IEEE_CMP3(cmptun)
897 IEEE_CMP3(cmpteq)
898 IEEE_CMP3(cmptlt)
899 IEEE_CMP3(cmptle)
901 static inline uint64_t zapnot_mask(uint8_t lit)
903 uint64_t mask = 0;
904 int i;
906 for (i = 0; i < 8; ++i) {
907 if ((lit >> i) & 1) {
908 mask |= 0xffull << (i * 8);
911 return mask;
914 /* Implement zapnot with an immediate operand, which expands to some
915 form of immediate AND. This is a basic building block in the
916 definition of many of the other byte manipulation instructions. */
917 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
919 switch (lit) {
920 case 0x00:
921 tcg_gen_movi_i64(dest, 0);
922 break;
923 case 0x01:
924 tcg_gen_ext8u_i64(dest, src);
925 break;
926 case 0x03:
927 tcg_gen_ext16u_i64(dest, src);
928 break;
929 case 0x0f:
930 tcg_gen_ext32u_i64(dest, src);
931 break;
932 case 0xff:
933 tcg_gen_mov_i64(dest, src);
934 break;
935 default:
936 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
937 break;
941 /* EXTWH, EXTLH, EXTQH */
942 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
943 uint8_t lit, uint8_t byte_mask)
945 if (islit) {
946 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
947 } else {
948 TCGv tmp = tcg_temp_new();
949 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
950 tcg_gen_neg_i64(tmp, tmp);
951 tcg_gen_andi_i64(tmp, tmp, 0x3f);
952 tcg_gen_shl_i64(vc, va, tmp);
953 tcg_temp_free(tmp);
955 gen_zapnoti(vc, vc, byte_mask);
958 /* EXTBL, EXTWL, EXTLL, EXTQL */
959 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
960 uint8_t lit, uint8_t byte_mask)
962 if (islit) {
963 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
964 } else {
965 TCGv tmp = tcg_temp_new();
966 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
967 tcg_gen_shli_i64(tmp, tmp, 3);
968 tcg_gen_shr_i64(vc, va, tmp);
969 tcg_temp_free(tmp);
971 gen_zapnoti(vc, vc, byte_mask);
974 /* INSWH, INSLH, INSQH */
975 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
976 uint8_t lit, uint8_t byte_mask)
978 TCGv tmp = tcg_temp_new();
980 /* The instruction description has us left-shift the byte mask and extract
981 bits <15:8> and apply that zap at the end. This is equivalent to simply
982 performing the zap first and shifting afterward. */
983 gen_zapnoti(tmp, va, byte_mask);
985 if (islit) {
986 lit &= 7;
987 if (unlikely(lit == 0)) {
988 tcg_gen_movi_i64(vc, 0);
989 } else {
990 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
992 } else {
993 TCGv shift = tcg_temp_new();
995 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
996 portably by splitting the shift into two parts: shift_count-1 and 1.
997 Arrange for the -1 by using ones-complement instead of
998 twos-complement in the negation: ~(B * 8) & 63. */
1000 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1001 tcg_gen_not_i64(shift, shift);
1002 tcg_gen_andi_i64(shift, shift, 0x3f);
1004 tcg_gen_shr_i64(vc, tmp, shift);
1005 tcg_gen_shri_i64(vc, vc, 1);
1006 tcg_temp_free(shift);
1008 tcg_temp_free(tmp);
1011 /* INSBL, INSWL, INSLL, INSQL */
1012 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1013 uint8_t lit, uint8_t byte_mask)
1015 TCGv tmp = tcg_temp_new();
1017 /* The instruction description has us left-shift the byte mask
1018 the same number of byte slots as the data and apply the zap
1019 at the end. This is equivalent to simply performing the zap
1020 first and shifting afterward. */
1021 gen_zapnoti(tmp, va, byte_mask);
1023 if (islit) {
1024 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1025 } else {
1026 TCGv shift = tcg_temp_new();
1027 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1028 tcg_gen_shli_i64(shift, shift, 3);
1029 tcg_gen_shl_i64(vc, tmp, shift);
1030 tcg_temp_free(shift);
1032 tcg_temp_free(tmp);
1035 /* MSKWH, MSKLH, MSKQH */
1036 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1037 uint8_t lit, uint8_t byte_mask)
1039 if (islit) {
1040 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1041 } else {
1042 TCGv shift = tcg_temp_new();
1043 TCGv mask = tcg_temp_new();
1045 /* The instruction description is as above, where the byte_mask
1046 is shifted left, and then we extract bits <15:8>. This can be
1047 emulated with a right-shift on the expanded byte mask. This
1048 requires extra care because for an input <2:0> == 0 we need a
1049 shift of 64 bits in order to generate a zero. This is done by
1050 splitting the shift into two parts, the variable shift - 1
1051 followed by a constant 1 shift. The code we expand below is
1052 equivalent to ~(B * 8) & 63. */
1054 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1055 tcg_gen_not_i64(shift, shift);
1056 tcg_gen_andi_i64(shift, shift, 0x3f);
1057 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1058 tcg_gen_shr_i64(mask, mask, shift);
1059 tcg_gen_shri_i64(mask, mask, 1);
1061 tcg_gen_andc_i64(vc, va, mask);
1063 tcg_temp_free(mask);
1064 tcg_temp_free(shift);
1068 /* MSKBL, MSKWL, MSKLL, MSKQL */
1069 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1070 uint8_t lit, uint8_t byte_mask)
1072 if (islit) {
1073 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1074 } else {
1075 TCGv shift = tcg_temp_new();
1076 TCGv mask = tcg_temp_new();
1078 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1079 tcg_gen_shli_i64(shift, shift, 3);
1080 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1081 tcg_gen_shl_i64(mask, mask, shift);
1083 tcg_gen_andc_i64(vc, va, mask);
1085 tcg_temp_free(mask);
1086 tcg_temp_free(shift);
1090 static void gen_rx(DisasContext *ctx, int ra, int set)
1092 TCGv_i32 tmp;
1094 if (ra != 31) {
1095 tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
1096 offsetof(CPUAlphaState, intr_flag));
1099 tmp = tcg_const_i32(set);
1100 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1101 tcg_temp_free_i32(tmp);
1104 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1106 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1107 to internal cpu registers. */
1109 /* Unprivileged PAL call */
1110 if (palcode >= 0x80 && palcode < 0xC0) {
1111 switch (palcode) {
1112 case 0x86:
1113 /* IMB */
1114 /* No-op inside QEMU. */
1115 break;
1116 case 0x9E:
1117 /* RDUNIQUE */
1118 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1119 offsetof(CPUAlphaState, unique));
1120 break;
1121 case 0x9F:
1122 /* WRUNIQUE */
1123 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1124 offsetof(CPUAlphaState, unique));
1125 break;
1126 default:
1127 palcode &= 0xbf;
1128 goto do_call_pal;
1130 return NO_EXIT;
1133 #ifndef CONFIG_USER_ONLY
1134 /* Privileged PAL code */
1135 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1136 switch (palcode) {
1137 case 0x01:
1138 /* CFLUSH */
1139 /* No-op inside QEMU. */
1140 break;
1141 case 0x02:
1142 /* DRAINA */
1143 /* No-op inside QEMU. */
1144 break;
1145 case 0x2D:
1146 /* WRVPTPTR */
1147 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1148 offsetof(CPUAlphaState, vptptr));
1149 break;
1150 case 0x31:
1151 /* WRVAL */
1152 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1153 offsetof(CPUAlphaState, sysval));
1154 break;
1155 case 0x32:
1156 /* RDVAL */
1157 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1158 offsetof(CPUAlphaState, sysval));
1159 break;
1161 case 0x35: {
1162 /* SWPIPL */
1163 TCGv tmp;
1165 /* Note that we already know we're in kernel mode, so we know
1166 that PS only contains the 3 IPL bits. */
1167 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1168 offsetof(CPUAlphaState, ps));
1170 /* But make sure and store only the 3 IPL bits from the user. */
1171 tmp = tcg_temp_new();
1172 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1173 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1174 tcg_temp_free(tmp);
1175 break;
1178 case 0x36:
1179 /* RDPS */
1180 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1181 offsetof(CPUAlphaState, ps));
1182 break;
1183 case 0x38:
1184 /* WRUSP */
1185 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1186 offsetof(CPUAlphaState, usp));
1187 break;
1188 case 0x3A:
1189 /* RDUSP */
1190 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1191 offsetof(CPUAlphaState, usp));
1192 break;
1193 case 0x3C:
1194 /* WHAMI */
1195 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1196 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1197 break;
1199 default:
1200 palcode &= 0x3f;
1201 goto do_call_pal;
1203 return NO_EXIT;
1205 #endif
1206 return gen_invalid(ctx);
1208 do_call_pal:
1209 #ifdef CONFIG_USER_ONLY
1210 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1211 #else
1213 TCGv tmp = tcg_temp_new();
1214 uint64_t exc_addr = ctx->pc;
1215 uint64_t entry = ctx->palbr;
1217 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
1218 exc_addr |= 1;
1219 } else {
1220 tcg_gen_movi_i64(tmp, 1);
1221 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
1224 tcg_gen_movi_i64(tmp, exc_addr);
1225 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1226 tcg_temp_free(tmp);
1228 entry += (palcode & 0x80
1229 ? 0x2000 + (palcode - 0x80) * 64
1230 : 0x1000 + palcode * 64);
1232 /* Since the destination is running in PALmode, we don't really
1233 need the page permissions check. We'll see the existence of
1234 the page when we create the TB, and we'll flush all TBs if
1235 we change the PAL base register. */
1236 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1237 tcg_gen_goto_tb(0);
1238 tcg_gen_movi_i64(cpu_pc, entry);
1239 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1240 return EXIT_GOTO_TB;
1241 } else {
1242 tcg_gen_movi_i64(cpu_pc, entry);
1243 return EXIT_PC_UPDATED;
1246 #endif
1249 #ifndef CONFIG_USER_ONLY
1251 #define PR_BYTE 0x100000
1252 #define PR_LONG 0x200000
1254 static int cpu_pr_data(int pr)
1256 switch (pr) {
1257 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1258 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1259 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1260 case 3: return offsetof(CPUAlphaState, trap_arg0);
1261 case 4: return offsetof(CPUAlphaState, trap_arg1);
1262 case 5: return offsetof(CPUAlphaState, trap_arg2);
1263 case 6: return offsetof(CPUAlphaState, exc_addr);
1264 case 7: return offsetof(CPUAlphaState, palbr);
1265 case 8: return offsetof(CPUAlphaState, ptbr);
1266 case 9: return offsetof(CPUAlphaState, vptptr);
1267 case 10: return offsetof(CPUAlphaState, unique);
1268 case 11: return offsetof(CPUAlphaState, sysval);
1269 case 12: return offsetof(CPUAlphaState, usp);
1271 case 40 ... 63:
1272 return offsetof(CPUAlphaState, scratch[pr - 40]);
1274 case 251:
1275 return offsetof(CPUAlphaState, alarm_expire);
1277 return 0;
1280 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1282 void (*helper)(TCGv);
1283 int data;
1285 switch (regno) {
1286 case 32 ... 39:
1287 /* Accessing the "non-shadow" general registers. */
1288 regno = regno == 39 ? 25 : regno - 32 + 8;
1289 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1290 break;
1292 case 250: /* WALLTIME */
1293 helper = gen_helper_get_walltime;
1294 goto do_helper;
1295 case 249: /* VMTIME */
1296 helper = gen_helper_get_vmtime;
1297 do_helper:
1298 if (use_icount) {
1299 gen_io_start();
1300 helper(va);
1301 gen_io_end();
1302 return EXIT_PC_STALE;
1303 } else {
1304 helper(va);
1306 break;
1308 default:
1309 /* The basic registers are data only, and unknown registers
1310 are read-zero, write-ignore. */
1311 data = cpu_pr_data(regno);
1312 if (data == 0) {
1313 tcg_gen_movi_i64(va, 0);
1314 } else if (data & PR_BYTE) {
1315 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1316 } else if (data & PR_LONG) {
1317 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1318 } else {
1319 tcg_gen_ld_i64(va, cpu_env, data);
1321 break;
1324 return NO_EXIT;
1327 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1329 TCGv tmp;
1330 int data;
1332 switch (regno) {
1333 case 255:
1334 /* TBIA */
1335 gen_helper_tbia(cpu_env);
1336 break;
1338 case 254:
1339 /* TBIS */
1340 gen_helper_tbis(cpu_env, vb);
1341 break;
1343 case 253:
1344 /* WAIT */
1345 tmp = tcg_const_i64(1);
1346 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1347 offsetof(CPUState, halted));
1348 return gen_excp(ctx, EXCP_HLT, 0);
1350 case 252:
1351 /* HALT */
1352 gen_helper_halt(vb);
1353 return EXIT_PC_STALE;
1355 case 251:
1356 /* ALARM */
1357 gen_helper_set_alarm(cpu_env, vb);
1358 break;
1360 case 7:
1361 /* PALBR */
1362 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1363 /* Changing the PAL base register implies un-chaining all of the TBs
1364 that ended with a CALL_PAL. Since the base register usually only
1365 changes during boot, flushing everything works well. */
1366 gen_helper_tb_flush(cpu_env);
1367 return EXIT_PC_STALE;
1369 case 32 ... 39:
1370 /* Accessing the "non-shadow" general registers. */
1371 regno = regno == 39 ? 25 : regno - 32 + 8;
1372 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1373 break;
1375 default:
1376 /* The basic registers are data only, and unknown registers
1377 are read-zero, write-ignore. */
1378 data = cpu_pr_data(regno);
1379 if (data != 0) {
1380 if (data & PR_BYTE) {
1381 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1382 } else if (data & PR_LONG) {
1383 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1384 } else {
1385 tcg_gen_st_i64(vb, cpu_env, data);
1388 break;
1391 return NO_EXIT;
1393 #endif /* !USER_ONLY*/
1395 #define REQUIRE_NO_LIT \
1396 do { \
1397 if (real_islit) { \
1398 goto invalid_opc; \
1400 } while (0)
1402 #define REQUIRE_TB_FLAG(FLAG) \
1403 do { \
1404 if ((ctx->tb->flags & (FLAG)) == 0) { \
1405 goto invalid_opc; \
1407 } while (0)
1409 #define REQUIRE_REG_31(WHICH) \
1410 do { \
1411 if (WHICH != 31) { \
1412 goto invalid_opc; \
1414 } while (0)
1416 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1418 int32_t disp21, disp16, disp12 __attribute__((unused));
1419 uint16_t fn11;
1420 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1421 bool islit, real_islit;
1422 TCGv va, vb, vc, tmp, tmp2;
1423 TCGv_i32 t32;
1424 ExitStatus ret;
1426 /* Decode all instruction fields */
1427 opc = extract32(insn, 26, 6);
1428 ra = extract32(insn, 21, 5);
1429 rb = extract32(insn, 16, 5);
1430 rc = extract32(insn, 0, 5);
1431 real_islit = islit = extract32(insn, 12, 1);
1432 lit = extract32(insn, 13, 8);
1434 disp21 = sextract32(insn, 0, 21);
1435 disp16 = sextract32(insn, 0, 16);
1436 disp12 = sextract32(insn, 0, 12);
1438 fn11 = extract32(insn, 5, 11);
1439 fpfn = extract32(insn, 5, 6);
1440 fn7 = extract32(insn, 5, 7);
1442 if (rb == 31 && !islit) {
1443 islit = true;
1444 lit = 0;
1447 ret = NO_EXIT;
1448 switch (opc) {
1449 case 0x00:
1450 /* CALL_PAL */
1451 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1452 break;
1453 case 0x01:
1454 /* OPC01 */
1455 goto invalid_opc;
1456 case 0x02:
1457 /* OPC02 */
1458 goto invalid_opc;
1459 case 0x03:
1460 /* OPC03 */
1461 goto invalid_opc;
1462 case 0x04:
1463 /* OPC04 */
1464 goto invalid_opc;
1465 case 0x05:
1466 /* OPC05 */
1467 goto invalid_opc;
1468 case 0x06:
1469 /* OPC06 */
1470 goto invalid_opc;
1471 case 0x07:
1472 /* OPC07 */
1473 goto invalid_opc;
1475 case 0x09:
1476 /* LDAH */
1477 disp16 = (uint32_t)disp16 << 16;
1478 /* fall through */
1479 case 0x08:
1480 /* LDA */
1481 va = dest_gpr(ctx, ra);
1482 /* It's worth special-casing immediate loads. */
1483 if (rb == 31) {
1484 tcg_gen_movi_i64(va, disp16);
1485 } else {
1486 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1488 break;
1490 case 0x0A:
1491 /* LDBU */
1492 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1493 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1494 break;
1495 case 0x0B:
1496 /* LDQ_U */
1497 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1498 break;
1499 case 0x0C:
1500 /* LDWU */
1501 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1502 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1503 break;
1504 case 0x0D:
1505 /* STW */
1506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1507 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1508 break;
1509 case 0x0E:
1510 /* STB */
1511 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1512 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1513 break;
1514 case 0x0F:
1515 /* STQ_U */
1516 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1517 break;
1519 case 0x10:
1520 vc = dest_gpr(ctx, rc);
1521 vb = load_gpr_lit(ctx, rb, lit, islit);
1523 if (ra == 31) {
1524 if (fn7 == 0x00) {
1525 /* Special case ADDL as SEXTL. */
1526 tcg_gen_ext32s_i64(vc, vb);
1527 break;
1529 if (fn7 == 0x29) {
1530 /* Special case SUBQ as NEGQ. */
1531 tcg_gen_neg_i64(vc, vb);
1532 break;
1536 va = load_gpr(ctx, ra);
1537 switch (fn7) {
1538 case 0x00:
1539 /* ADDL */
1540 tcg_gen_add_i64(vc, va, vb);
1541 tcg_gen_ext32s_i64(vc, vc);
1542 break;
1543 case 0x02:
1544 /* S4ADDL */
1545 tmp = tcg_temp_new();
1546 tcg_gen_shli_i64(tmp, va, 2);
1547 tcg_gen_add_i64(tmp, tmp, vb);
1548 tcg_gen_ext32s_i64(vc, tmp);
1549 tcg_temp_free(tmp);
1550 break;
1551 case 0x09:
1552 /* SUBL */
1553 tcg_gen_sub_i64(vc, va, vb);
1554 tcg_gen_ext32s_i64(vc, vc);
1555 break;
1556 case 0x0B:
1557 /* S4SUBL */
1558 tmp = tcg_temp_new();
1559 tcg_gen_shli_i64(tmp, va, 2);
1560 tcg_gen_sub_i64(tmp, tmp, vb);
1561 tcg_gen_ext32s_i64(vc, tmp);
1562 tcg_temp_free(tmp);
1563 break;
1564 case 0x0F:
1565 /* CMPBGE */
1566 if (ra == 31) {
1567 /* Special case 0 >= X as X == 0. */
1568 gen_helper_cmpbe0(vc, vb);
1569 } else {
1570 gen_helper_cmpbge(vc, va, vb);
1572 break;
1573 case 0x12:
1574 /* S8ADDL */
1575 tmp = tcg_temp_new();
1576 tcg_gen_shli_i64(tmp, va, 3);
1577 tcg_gen_add_i64(tmp, tmp, vb);
1578 tcg_gen_ext32s_i64(vc, tmp);
1579 tcg_temp_free(tmp);
1580 break;
1581 case 0x1B:
1582 /* S8SUBL */
1583 tmp = tcg_temp_new();
1584 tcg_gen_shli_i64(tmp, va, 3);
1585 tcg_gen_sub_i64(tmp, tmp, vb);
1586 tcg_gen_ext32s_i64(vc, tmp);
1587 tcg_temp_free(tmp);
1588 break;
1589 case 0x1D:
1590 /* CMPULT */
1591 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1592 break;
1593 case 0x20:
1594 /* ADDQ */
1595 tcg_gen_add_i64(vc, va, vb);
1596 break;
1597 case 0x22:
1598 /* S4ADDQ */
1599 tmp = tcg_temp_new();
1600 tcg_gen_shli_i64(tmp, va, 2);
1601 tcg_gen_add_i64(vc, tmp, vb);
1602 tcg_temp_free(tmp);
1603 break;
1604 case 0x29:
1605 /* SUBQ */
1606 tcg_gen_sub_i64(vc, va, vb);
1607 break;
1608 case 0x2B:
1609 /* S4SUBQ */
1610 tmp = tcg_temp_new();
1611 tcg_gen_shli_i64(tmp, va, 2);
1612 tcg_gen_sub_i64(vc, tmp, vb);
1613 tcg_temp_free(tmp);
1614 break;
1615 case 0x2D:
1616 /* CMPEQ */
1617 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1618 break;
1619 case 0x32:
1620 /* S8ADDQ */
1621 tmp = tcg_temp_new();
1622 tcg_gen_shli_i64(tmp, va, 3);
1623 tcg_gen_add_i64(vc, tmp, vb);
1624 tcg_temp_free(tmp);
1625 break;
1626 case 0x3B:
1627 /* S8SUBQ */
1628 tmp = tcg_temp_new();
1629 tcg_gen_shli_i64(tmp, va, 3);
1630 tcg_gen_sub_i64(vc, tmp, vb);
1631 tcg_temp_free(tmp);
1632 break;
1633 case 0x3D:
1634 /* CMPULE */
1635 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1636 break;
1637 case 0x40:
1638 /* ADDL/V */
1639 tmp = tcg_temp_new();
1640 tcg_gen_ext32s_i64(tmp, va);
1641 tcg_gen_ext32s_i64(vc, vb);
1642 tcg_gen_add_i64(tmp, tmp, vc);
1643 tcg_gen_ext32s_i64(vc, tmp);
1644 gen_helper_check_overflow(cpu_env, vc, tmp);
1645 tcg_temp_free(tmp);
1646 break;
1647 case 0x49:
1648 /* SUBL/V */
1649 tmp = tcg_temp_new();
1650 tcg_gen_ext32s_i64(tmp, va);
1651 tcg_gen_ext32s_i64(vc, vb);
1652 tcg_gen_sub_i64(tmp, tmp, vc);
1653 tcg_gen_ext32s_i64(vc, tmp);
1654 gen_helper_check_overflow(cpu_env, vc, tmp);
1655 tcg_temp_free(tmp);
1656 break;
1657 case 0x4D:
1658 /* CMPLT */
1659 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1660 break;
1661 case 0x60:
1662 /* ADDQ/V */
1663 tmp = tcg_temp_new();
1664 tmp2 = tcg_temp_new();
1665 tcg_gen_eqv_i64(tmp, va, vb);
1666 tcg_gen_mov_i64(tmp2, va);
1667 tcg_gen_add_i64(vc, va, vb);
1668 tcg_gen_xor_i64(tmp2, tmp2, vc);
1669 tcg_gen_and_i64(tmp, tmp, tmp2);
1670 tcg_gen_shri_i64(tmp, tmp, 63);
1671 tcg_gen_movi_i64(tmp2, 0);
1672 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1673 tcg_temp_free(tmp);
1674 tcg_temp_free(tmp2);
1675 break;
1676 case 0x69:
1677 /* SUBQ/V */
1678 tmp = tcg_temp_new();
1679 tmp2 = tcg_temp_new();
1680 tcg_gen_xor_i64(tmp, va, vb);
1681 tcg_gen_mov_i64(tmp2, va);
1682 tcg_gen_sub_i64(vc, va, vb);
1683 tcg_gen_xor_i64(tmp2, tmp2, vc);
1684 tcg_gen_and_i64(tmp, tmp, tmp2);
1685 tcg_gen_shri_i64(tmp, tmp, 63);
1686 tcg_gen_movi_i64(tmp2, 0);
1687 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1688 tcg_temp_free(tmp);
1689 tcg_temp_free(tmp2);
1690 break;
1691 case 0x6D:
1692 /* CMPLE */
1693 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1694 break;
1695 default:
1696 goto invalid_opc;
1698 break;
1700 case 0x11:
1701 if (fn7 == 0x20) {
1702 if (rc == 31) {
1703 /* Special case BIS as NOP. */
1704 break;
1706 if (ra == 31) {
1707 /* Special case BIS as MOV. */
1708 vc = dest_gpr(ctx, rc);
1709 if (islit) {
1710 tcg_gen_movi_i64(vc, lit);
1711 } else {
1712 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1714 break;
1718 vc = dest_gpr(ctx, rc);
1719 vb = load_gpr_lit(ctx, rb, lit, islit);
1721 if (fn7 == 0x28 && ra == 31) {
1722 /* Special case ORNOT as NOT. */
1723 tcg_gen_not_i64(vc, vb);
1724 break;
1727 va = load_gpr(ctx, ra);
1728 switch (fn7) {
1729 case 0x00:
1730 /* AND */
1731 tcg_gen_and_i64(vc, va, vb);
1732 break;
1733 case 0x08:
1734 /* BIC */
1735 tcg_gen_andc_i64(vc, va, vb);
1736 break;
1737 case 0x14:
1738 /* CMOVLBS */
1739 tmp = tcg_temp_new();
1740 tcg_gen_andi_i64(tmp, va, 1);
1741 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1742 vb, load_gpr(ctx, rc));
1743 tcg_temp_free(tmp);
1744 break;
1745 case 0x16:
1746 /* CMOVLBC */
1747 tmp = tcg_temp_new();
1748 tcg_gen_andi_i64(tmp, va, 1);
1749 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1750 vb, load_gpr(ctx, rc));
1751 tcg_temp_free(tmp);
1752 break;
1753 case 0x20:
1754 /* BIS */
1755 tcg_gen_or_i64(vc, va, vb);
1756 break;
1757 case 0x24:
1758 /* CMOVEQ */
1759 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1760 vb, load_gpr(ctx, rc));
1761 break;
1762 case 0x26:
1763 /* CMOVNE */
1764 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1765 vb, load_gpr(ctx, rc));
1766 break;
1767 case 0x28:
1768 /* ORNOT */
1769 tcg_gen_orc_i64(vc, va, vb);
1770 break;
1771 case 0x40:
1772 /* XOR */
1773 tcg_gen_xor_i64(vc, va, vb);
1774 break;
1775 case 0x44:
1776 /* CMOVLT */
1777 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1778 vb, load_gpr(ctx, rc));
1779 break;
1780 case 0x46:
1781 /* CMOVGE */
1782 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1783 vb, load_gpr(ctx, rc));
1784 break;
1785 case 0x48:
1786 /* EQV */
1787 tcg_gen_eqv_i64(vc, va, vb);
1788 break;
1789 case 0x61:
1790 /* AMASK */
1791 REQUIRE_REG_31(ra);
1793 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1794 tcg_gen_andi_i64(vc, vb, ~amask);
1796 break;
1797 case 0x64:
1798 /* CMOVLE */
1799 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1800 vb, load_gpr(ctx, rc));
1801 break;
1802 case 0x66:
1803 /* CMOVGT */
1804 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1805 vb, load_gpr(ctx, rc));
1806 break;
1807 case 0x6C:
1808 /* IMPLVER */
1809 REQUIRE_REG_31(ra);
1810 tcg_gen_movi_i64(vc, ctx->implver);
1811 break;
1812 default:
1813 goto invalid_opc;
1815 break;
1817 case 0x12:
1818 vc = dest_gpr(ctx, rc);
1819 va = load_gpr(ctx, ra);
1820 switch (fn7) {
1821 case 0x02:
1822 /* MSKBL */
1823 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1824 break;
1825 case 0x06:
1826 /* EXTBL */
1827 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1828 break;
1829 case 0x0B:
1830 /* INSBL */
1831 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1832 break;
1833 case 0x12:
1834 /* MSKWL */
1835 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1836 break;
1837 case 0x16:
1838 /* EXTWL */
1839 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1840 break;
1841 case 0x1B:
1842 /* INSWL */
1843 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1844 break;
1845 case 0x22:
1846 /* MSKLL */
1847 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1848 break;
1849 case 0x26:
1850 /* EXTLL */
1851 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1852 break;
1853 case 0x2B:
1854 /* INSLL */
1855 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1856 break;
1857 case 0x30:
1858 /* ZAP */
1859 if (islit) {
1860 gen_zapnoti(vc, va, ~lit);
1861 } else {
1862 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1864 break;
1865 case 0x31:
1866 /* ZAPNOT */
1867 if (islit) {
1868 gen_zapnoti(vc, va, lit);
1869 } else {
1870 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1872 break;
1873 case 0x32:
1874 /* MSKQL */
1875 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1876 break;
1877 case 0x34:
1878 /* SRL */
1879 if (islit) {
1880 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1881 } else {
1882 tmp = tcg_temp_new();
1883 vb = load_gpr(ctx, rb);
1884 tcg_gen_andi_i64(tmp, vb, 0x3f);
1885 tcg_gen_shr_i64(vc, va, tmp);
1886 tcg_temp_free(tmp);
1888 break;
1889 case 0x36:
1890 /* EXTQL */
1891 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1892 break;
1893 case 0x39:
1894 /* SLL */
1895 if (islit) {
1896 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1897 } else {
1898 tmp = tcg_temp_new();
1899 vb = load_gpr(ctx, rb);
1900 tcg_gen_andi_i64(tmp, vb, 0x3f);
1901 tcg_gen_shl_i64(vc, va, tmp);
1902 tcg_temp_free(tmp);
1904 break;
1905 case 0x3B:
1906 /* INSQL */
1907 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1908 break;
1909 case 0x3C:
1910 /* SRA */
1911 if (islit) {
1912 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1913 } else {
1914 tmp = tcg_temp_new();
1915 vb = load_gpr(ctx, rb);
1916 tcg_gen_andi_i64(tmp, vb, 0x3f);
1917 tcg_gen_sar_i64(vc, va, tmp);
1918 tcg_temp_free(tmp);
1920 break;
1921 case 0x52:
1922 /* MSKWH */
1923 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1924 break;
1925 case 0x57:
1926 /* INSWH */
1927 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1928 break;
1929 case 0x5A:
1930 /* EXTWH */
1931 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1932 break;
1933 case 0x62:
1934 /* MSKLH */
1935 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1936 break;
1937 case 0x67:
1938 /* INSLH */
1939 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1940 break;
1941 case 0x6A:
1942 /* EXTLH */
1943 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1944 break;
1945 case 0x72:
1946 /* MSKQH */
1947 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1948 break;
1949 case 0x77:
1950 /* INSQH */
1951 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1952 break;
1953 case 0x7A:
1954 /* EXTQH */
1955 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1956 break;
1957 default:
1958 goto invalid_opc;
1960 break;
1962 case 0x13:
1963 vc = dest_gpr(ctx, rc);
1964 vb = load_gpr_lit(ctx, rb, lit, islit);
1965 va = load_gpr(ctx, ra);
1966 switch (fn7) {
1967 case 0x00:
1968 /* MULL */
1969 tcg_gen_mul_i64(vc, va, vb);
1970 tcg_gen_ext32s_i64(vc, vc);
1971 break;
1972 case 0x20:
1973 /* MULQ */
1974 tcg_gen_mul_i64(vc, va, vb);
1975 break;
1976 case 0x30:
1977 /* UMULH */
1978 tmp = tcg_temp_new();
1979 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1980 tcg_temp_free(tmp);
1981 break;
1982 case 0x40:
1983 /* MULL/V */
1984 tmp = tcg_temp_new();
1985 tcg_gen_ext32s_i64(tmp, va);
1986 tcg_gen_ext32s_i64(vc, vb);
1987 tcg_gen_mul_i64(tmp, tmp, vc);
1988 tcg_gen_ext32s_i64(vc, tmp);
1989 gen_helper_check_overflow(cpu_env, vc, tmp);
1990 tcg_temp_free(tmp);
1991 break;
1992 case 0x60:
1993 /* MULQ/V */
1994 tmp = tcg_temp_new();
1995 tmp2 = tcg_temp_new();
1996 tcg_gen_muls2_i64(vc, tmp, va, vb);
1997 tcg_gen_sari_i64(tmp2, vc, 63);
1998 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1999 tcg_temp_free(tmp);
2000 tcg_temp_free(tmp2);
2001 break;
2002 default:
2003 goto invalid_opc;
2005 break;
2007 case 0x14:
2008 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2009 vc = dest_fpr(ctx, rc);
2010 switch (fpfn) { /* fn11 & 0x3F */
2011 case 0x04:
2012 /* ITOFS */
2013 REQUIRE_REG_31(rb);
2014 t32 = tcg_temp_new_i32();
2015 va = load_gpr(ctx, ra);
2016 tcg_gen_extrl_i64_i32(t32, va);
2017 gen_helper_memory_to_s(vc, t32);
2018 tcg_temp_free_i32(t32);
2019 break;
2020 case 0x0A:
2021 /* SQRTF */
2022 REQUIRE_REG_31(ra);
2023 vb = load_fpr(ctx, rb);
2024 gen_helper_sqrtf(vc, cpu_env, vb);
2025 break;
2026 case 0x0B:
2027 /* SQRTS */
2028 REQUIRE_REG_31(ra);
2029 gen_sqrts(ctx, rb, rc, fn11);
2030 break;
2031 case 0x14:
2032 /* ITOFF */
2033 REQUIRE_REG_31(rb);
2034 t32 = tcg_temp_new_i32();
2035 va = load_gpr(ctx, ra);
2036 tcg_gen_extrl_i64_i32(t32, va);
2037 gen_helper_memory_to_f(vc, t32);
2038 tcg_temp_free_i32(t32);
2039 break;
2040 case 0x24:
2041 /* ITOFT */
2042 REQUIRE_REG_31(rb);
2043 va = load_gpr(ctx, ra);
2044 tcg_gen_mov_i64(vc, va);
2045 break;
2046 case 0x2A:
2047 /* SQRTG */
2048 REQUIRE_REG_31(ra);
2049 vb = load_fpr(ctx, rb);
2050 gen_helper_sqrtg(vc, cpu_env, vb);
2051 break;
2052 case 0x02B:
2053 /* SQRTT */
2054 REQUIRE_REG_31(ra);
2055 gen_sqrtt(ctx, rb, rc, fn11);
2056 break;
2057 default:
2058 goto invalid_opc;
2060 break;
2062 case 0x15:
2063 /* VAX floating point */
2064 /* XXX: rounding mode and trap are ignored (!) */
2065 vc = dest_fpr(ctx, rc);
2066 vb = load_fpr(ctx, rb);
2067 va = load_fpr(ctx, ra);
2068 switch (fpfn) { /* fn11 & 0x3F */
2069 case 0x00:
2070 /* ADDF */
2071 gen_helper_addf(vc, cpu_env, va, vb);
2072 break;
2073 case 0x01:
2074 /* SUBF */
2075 gen_helper_subf(vc, cpu_env, va, vb);
2076 break;
2077 case 0x02:
2078 /* MULF */
2079 gen_helper_mulf(vc, cpu_env, va, vb);
2080 break;
2081 case 0x03:
2082 /* DIVF */
2083 gen_helper_divf(vc, cpu_env, va, vb);
2084 break;
2085 case 0x1E:
2086 /* CVTDG -- TODO */
2087 REQUIRE_REG_31(ra);
2088 goto invalid_opc;
2089 case 0x20:
2090 /* ADDG */
2091 gen_helper_addg(vc, cpu_env, va, vb);
2092 break;
2093 case 0x21:
2094 /* SUBG */
2095 gen_helper_subg(vc, cpu_env, va, vb);
2096 break;
2097 case 0x22:
2098 /* MULG */
2099 gen_helper_mulg(vc, cpu_env, va, vb);
2100 break;
2101 case 0x23:
2102 /* DIVG */
2103 gen_helper_divg(vc, cpu_env, va, vb);
2104 break;
2105 case 0x25:
2106 /* CMPGEQ */
2107 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2108 break;
2109 case 0x26:
2110 /* CMPGLT */
2111 gen_helper_cmpglt(vc, cpu_env, va, vb);
2112 break;
2113 case 0x27:
2114 /* CMPGLE */
2115 gen_helper_cmpgle(vc, cpu_env, va, vb);
2116 break;
2117 case 0x2C:
2118 /* CVTGF */
2119 REQUIRE_REG_31(ra);
2120 gen_helper_cvtgf(vc, cpu_env, vb);
2121 break;
2122 case 0x2D:
2123 /* CVTGD -- TODO */
2124 REQUIRE_REG_31(ra);
2125 goto invalid_opc;
2126 case 0x2F:
2127 /* CVTGQ */
2128 REQUIRE_REG_31(ra);
2129 gen_helper_cvtgq(vc, cpu_env, vb);
2130 break;
2131 case 0x3C:
2132 /* CVTQF */
2133 REQUIRE_REG_31(ra);
2134 gen_helper_cvtqf(vc, cpu_env, vb);
2135 break;
2136 case 0x3E:
2137 /* CVTQG */
2138 REQUIRE_REG_31(ra);
2139 gen_helper_cvtqg(vc, cpu_env, vb);
2140 break;
2141 default:
2142 goto invalid_opc;
2144 break;
2146 case 0x16:
2147 /* IEEE floating-point */
2148 switch (fpfn) { /* fn11 & 0x3F */
2149 case 0x00:
2150 /* ADDS */
2151 gen_adds(ctx, ra, rb, rc, fn11);
2152 break;
2153 case 0x01:
2154 /* SUBS */
2155 gen_subs(ctx, ra, rb, rc, fn11);
2156 break;
2157 case 0x02:
2158 /* MULS */
2159 gen_muls(ctx, ra, rb, rc, fn11);
2160 break;
2161 case 0x03:
2162 /* DIVS */
2163 gen_divs(ctx, ra, rb, rc, fn11);
2164 break;
2165 case 0x20:
2166 /* ADDT */
2167 gen_addt(ctx, ra, rb, rc, fn11);
2168 break;
2169 case 0x21:
2170 /* SUBT */
2171 gen_subt(ctx, ra, rb, rc, fn11);
2172 break;
2173 case 0x22:
2174 /* MULT */
2175 gen_mult(ctx, ra, rb, rc, fn11);
2176 break;
2177 case 0x23:
2178 /* DIVT */
2179 gen_divt(ctx, ra, rb, rc, fn11);
2180 break;
2181 case 0x24:
2182 /* CMPTUN */
2183 gen_cmptun(ctx, ra, rb, rc, fn11);
2184 break;
2185 case 0x25:
2186 /* CMPTEQ */
2187 gen_cmpteq(ctx, ra, rb, rc, fn11);
2188 break;
2189 case 0x26:
2190 /* CMPTLT */
2191 gen_cmptlt(ctx, ra, rb, rc, fn11);
2192 break;
2193 case 0x27:
2194 /* CMPTLE */
2195 gen_cmptle(ctx, ra, rb, rc, fn11);
2196 break;
2197 case 0x2C:
2198 REQUIRE_REG_31(ra);
2199 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2200 /* CVTST */
2201 gen_cvtst(ctx, rb, rc, fn11);
2202 } else {
2203 /* CVTTS */
2204 gen_cvtts(ctx, rb, rc, fn11);
2206 break;
2207 case 0x2F:
2208 /* CVTTQ */
2209 REQUIRE_REG_31(ra);
2210 gen_cvttq(ctx, rb, rc, fn11);
2211 break;
2212 case 0x3C:
2213 /* CVTQS */
2214 REQUIRE_REG_31(ra);
2215 gen_cvtqs(ctx, rb, rc, fn11);
2216 break;
2217 case 0x3E:
2218 /* CVTQT */
2219 REQUIRE_REG_31(ra);
2220 gen_cvtqt(ctx, rb, rc, fn11);
2221 break;
2222 default:
2223 goto invalid_opc;
2225 break;
2227 case 0x17:
2228 switch (fn11) {
2229 case 0x010:
2230 /* CVTLQ */
2231 REQUIRE_REG_31(ra);
2232 vc = dest_fpr(ctx, rc);
2233 vb = load_fpr(ctx, rb);
2234 gen_cvtlq(vc, vb);
2235 break;
2236 case 0x020:
2237 /* CPYS */
2238 if (rc == 31) {
2239 /* Special case CPYS as FNOP. */
2240 } else {
2241 vc = dest_fpr(ctx, rc);
2242 va = load_fpr(ctx, ra);
2243 if (ra == rb) {
2244 /* Special case CPYS as FMOV. */
2245 tcg_gen_mov_i64(vc, va);
2246 } else {
2247 vb = load_fpr(ctx, rb);
2248 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2251 break;
2252 case 0x021:
2253 /* CPYSN */
2254 vc = dest_fpr(ctx, rc);
2255 vb = load_fpr(ctx, rb);
2256 va = load_fpr(ctx, ra);
2257 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2258 break;
2259 case 0x022:
2260 /* CPYSE */
2261 vc = dest_fpr(ctx, rc);
2262 vb = load_fpr(ctx, rb);
2263 va = load_fpr(ctx, ra);
2264 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2265 break;
2266 case 0x024:
2267 /* MT_FPCR */
2268 va = load_fpr(ctx, ra);
2269 gen_helper_store_fpcr(cpu_env, va);
2270 if (ctx->tb_rm == QUAL_RM_D) {
2271 /* Re-do the copy of the rounding mode to fp_status
2272 the next time we use dynamic rounding. */
2273 ctx->tb_rm = -1;
2275 break;
2276 case 0x025:
2277 /* MF_FPCR */
2278 va = dest_fpr(ctx, ra);
2279 gen_helper_load_fpcr(va, cpu_env);
2280 break;
2281 case 0x02A:
2282 /* FCMOVEQ */
2283 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2284 break;
2285 case 0x02B:
2286 /* FCMOVNE */
2287 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2288 break;
2289 case 0x02C:
2290 /* FCMOVLT */
2291 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2292 break;
2293 case 0x02D:
2294 /* FCMOVGE */
2295 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2296 break;
2297 case 0x02E:
2298 /* FCMOVLE */
2299 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2300 break;
2301 case 0x02F:
2302 /* FCMOVGT */
2303 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2304 break;
2305 case 0x030: /* CVTQL */
2306 case 0x130: /* CVTQL/V */
2307 case 0x530: /* CVTQL/SV */
2308 REQUIRE_REG_31(ra);
2309 vc = dest_fpr(ctx, rc);
2310 vb = load_fpr(ctx, rb);
2311 gen_helper_cvtql(vc, cpu_env, vb);
2312 gen_fp_exc_raise(rc, fn11);
2313 break;
2314 default:
2315 goto invalid_opc;
2317 break;
2319 case 0x18:
2320 switch ((uint16_t)disp16) {
2321 case 0x0000:
2322 /* TRAPB */
2323 /* No-op. */
2324 break;
2325 case 0x0400:
2326 /* EXCB */
2327 /* No-op. */
2328 break;
2329 case 0x4000:
2330 /* MB */
2331 /* No-op */
2332 break;
2333 case 0x4400:
2334 /* WMB */
2335 /* No-op */
2336 break;
2337 case 0x8000:
2338 /* FETCH */
2339 /* No-op */
2340 break;
2341 case 0xA000:
2342 /* FETCH_M */
2343 /* No-op */
2344 break;
2345 case 0xC000:
2346 /* RPCC */
2347 va = dest_gpr(ctx, ra);
2348 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2349 gen_io_start();
2350 gen_helper_load_pcc(va, cpu_env);
2351 gen_io_end();
2352 ret = EXIT_PC_STALE;
2353 } else {
2354 gen_helper_load_pcc(va, cpu_env);
2356 break;
2357 case 0xE000:
2358 /* RC */
2359 gen_rx(ctx, ra, 0);
2360 break;
2361 case 0xE800:
2362 /* ECB */
2363 break;
2364 case 0xF000:
2365 /* RS */
2366 gen_rx(ctx, ra, 1);
2367 break;
2368 case 0xF800:
2369 /* WH64 */
2370 /* No-op */
2371 break;
2372 case 0xFC00:
2373 /* WH64EN */
2374 /* No-op */
2375 break;
2376 default:
2377 goto invalid_opc;
2379 break;
2381 case 0x19:
2382 /* HW_MFPR (PALcode) */
2383 #ifndef CONFIG_USER_ONLY
2384 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2385 va = dest_gpr(ctx, ra);
2386 ret = gen_mfpr(ctx, va, insn & 0xffff);
2387 break;
2388 #else
2389 goto invalid_opc;
2390 #endif
2392 case 0x1A:
2393 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2394 prediction stack action, which of course we don't implement. */
2395 vb = load_gpr(ctx, rb);
2396 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2397 if (ra != 31) {
2398 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
2400 ret = EXIT_PC_UPDATED;
2401 break;
2403 case 0x1B:
2404 /* HW_LD (PALcode) */
2405 #ifndef CONFIG_USER_ONLY
2406 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2408 TCGv addr = tcg_temp_new();
2409 vb = load_gpr(ctx, rb);
2410 va = dest_gpr(ctx, ra);
2412 tcg_gen_addi_i64(addr, vb, disp12);
2413 switch ((insn >> 12) & 0xF) {
2414 case 0x0:
2415 /* Longword physical access (hw_ldl/p) */
2416 gen_helper_ldl_phys(va, cpu_env, addr);
2417 break;
2418 case 0x1:
2419 /* Quadword physical access (hw_ldq/p) */
2420 gen_helper_ldq_phys(va, cpu_env, addr);
2421 break;
2422 case 0x2:
2423 /* Longword physical access with lock (hw_ldl_l/p) */
2424 gen_helper_ldl_l_phys(va, cpu_env, addr);
2425 break;
2426 case 0x3:
2427 /* Quadword physical access with lock (hw_ldq_l/p) */
2428 gen_helper_ldq_l_phys(va, cpu_env, addr);
2429 break;
2430 case 0x4:
2431 /* Longword virtual PTE fetch (hw_ldl/v) */
2432 goto invalid_opc;
2433 case 0x5:
2434 /* Quadword virtual PTE fetch (hw_ldq/v) */
2435 goto invalid_opc;
2436 break;
2437 case 0x6:
2438 /* Invalid */
2439 goto invalid_opc;
2440 case 0x7:
2441 /* Invaliid */
2442 goto invalid_opc;
2443 case 0x8:
2444 /* Longword virtual access (hw_ldl) */
2445 goto invalid_opc;
2446 case 0x9:
2447 /* Quadword virtual access (hw_ldq) */
2448 goto invalid_opc;
2449 case 0xA:
2450 /* Longword virtual access with protection check (hw_ldl/w) */
2451 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2452 break;
2453 case 0xB:
2454 /* Quadword virtual access with protection check (hw_ldq/w) */
2455 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2456 break;
2457 case 0xC:
2458 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2459 goto invalid_opc;
2460 case 0xD:
2461 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2462 goto invalid_opc;
2463 case 0xE:
2464 /* Longword virtual access with alternate access mode and
2465 protection checks (hw_ldl/wa) */
2466 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2467 break;
2468 case 0xF:
2469 /* Quadword virtual access with alternate access mode and
2470 protection checks (hw_ldq/wa) */
2471 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2472 break;
2474 tcg_temp_free(addr);
2475 break;
2477 #else
2478 goto invalid_opc;
2479 #endif
2481 case 0x1C:
2482 vc = dest_gpr(ctx, rc);
2483 if (fn7 == 0x70) {
2484 /* FTOIT */
2485 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2486 REQUIRE_REG_31(rb);
2487 va = load_fpr(ctx, ra);
2488 tcg_gen_mov_i64(vc, va);
2489 break;
2490 } else if (fn7 == 0x78) {
2491 /* FTOIS */
2492 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2493 REQUIRE_REG_31(rb);
2494 t32 = tcg_temp_new_i32();
2495 va = load_fpr(ctx, ra);
2496 gen_helper_s_to_memory(t32, va);
2497 tcg_gen_ext_i32_i64(vc, t32);
2498 tcg_temp_free_i32(t32);
2499 break;
2502 vb = load_gpr_lit(ctx, rb, lit, islit);
2503 switch (fn7) {
2504 case 0x00:
2505 /* SEXTB */
2506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2507 REQUIRE_REG_31(ra);
2508 tcg_gen_ext8s_i64(vc, vb);
2509 break;
2510 case 0x01:
2511 /* SEXTW */
2512 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2513 REQUIRE_REG_31(ra);
2514 tcg_gen_ext16s_i64(vc, vb);
2515 break;
2516 case 0x30:
2517 /* CTPOP */
2518 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2519 REQUIRE_REG_31(ra);
2520 REQUIRE_NO_LIT;
2521 gen_helper_ctpop(vc, vb);
2522 break;
2523 case 0x31:
2524 /* PERR */
2525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2526 REQUIRE_NO_LIT;
2527 va = load_gpr(ctx, ra);
2528 gen_helper_perr(vc, va, vb);
2529 break;
2530 case 0x32:
2531 /* CTLZ */
2532 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2533 REQUIRE_REG_31(ra);
2534 REQUIRE_NO_LIT;
2535 gen_helper_ctlz(vc, vb);
2536 break;
2537 case 0x33:
2538 /* CTTZ */
2539 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2540 REQUIRE_REG_31(ra);
2541 REQUIRE_NO_LIT;
2542 gen_helper_cttz(vc, vb);
2543 break;
2544 case 0x34:
2545 /* UNPKBW */
2546 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2547 REQUIRE_REG_31(ra);
2548 REQUIRE_NO_LIT;
2549 gen_helper_unpkbw(vc, vb);
2550 break;
2551 case 0x35:
2552 /* UNPKBL */
2553 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2554 REQUIRE_REG_31(ra);
2555 REQUIRE_NO_LIT;
2556 gen_helper_unpkbl(vc, vb);
2557 break;
2558 case 0x36:
2559 /* PKWB */
2560 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2561 REQUIRE_REG_31(ra);
2562 REQUIRE_NO_LIT;
2563 gen_helper_pkwb(vc, vb);
2564 break;
2565 case 0x37:
2566 /* PKLB */
2567 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2568 REQUIRE_REG_31(ra);
2569 REQUIRE_NO_LIT;
2570 gen_helper_pklb(vc, vb);
2571 break;
2572 case 0x38:
2573 /* MINSB8 */
2574 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2575 va = load_gpr(ctx, ra);
2576 gen_helper_minsb8(vc, va, vb);
2577 break;
2578 case 0x39:
2579 /* MINSW4 */
2580 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2581 va = load_gpr(ctx, ra);
2582 gen_helper_minsw4(vc, va, vb);
2583 break;
2584 case 0x3A:
2585 /* MINUB8 */
2586 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2587 va = load_gpr(ctx, ra);
2588 gen_helper_minub8(vc, va, vb);
2589 break;
2590 case 0x3B:
2591 /* MINUW4 */
2592 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2593 va = load_gpr(ctx, ra);
2594 gen_helper_minuw4(vc, va, vb);
2595 break;
2596 case 0x3C:
2597 /* MAXUB8 */
2598 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2599 va = load_gpr(ctx, ra);
2600 gen_helper_maxub8(vc, va, vb);
2601 break;
2602 case 0x3D:
2603 /* MAXUW4 */
2604 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2605 va = load_gpr(ctx, ra);
2606 gen_helper_maxuw4(vc, va, vb);
2607 break;
2608 case 0x3E:
2609 /* MAXSB8 */
2610 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2611 va = load_gpr(ctx, ra);
2612 gen_helper_maxsb8(vc, va, vb);
2613 break;
2614 case 0x3F:
2615 /* MAXSW4 */
2616 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2617 va = load_gpr(ctx, ra);
2618 gen_helper_maxsw4(vc, va, vb);
2619 break;
2620 default:
2621 goto invalid_opc;
2623 break;
2625 case 0x1D:
2626 /* HW_MTPR (PALcode) */
2627 #ifndef CONFIG_USER_ONLY
2628 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2629 vb = load_gpr(ctx, rb);
2630 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2631 break;
2632 #else
2633 goto invalid_opc;
2634 #endif
2636 case 0x1E:
2637 /* HW_RET (PALcode) */
2638 #ifndef CONFIG_USER_ONLY
2639 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2640 if (rb == 31) {
2641 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2642 address from EXC_ADDR. This turns out to be useful for our
2643 emulation PALcode, so continue to accept it. */
2644 ctx->lit = vb = tcg_temp_new();
2645 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2646 } else {
2647 vb = load_gpr(ctx, rb);
2649 tmp = tcg_temp_new();
2650 tcg_gen_movi_i64(tmp, 0);
2651 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
2652 tcg_gen_movi_i64(cpu_lock_addr, -1);
2653 tcg_gen_andi_i64(tmp, vb, 1);
2654 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
2655 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2656 ret = EXIT_PC_UPDATED;
2657 break;
2658 #else
2659 goto invalid_opc;
2660 #endif
2662 case 0x1F:
2663 /* HW_ST (PALcode) */
2664 #ifndef CONFIG_USER_ONLY
2665 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2667 TCGv addr = tcg_temp_new();
2668 va = load_gpr(ctx, ra);
2669 vb = load_gpr(ctx, rb);
2671 tcg_gen_addi_i64(addr, vb, disp12);
2672 switch ((insn >> 12) & 0xF) {
2673 case 0x0:
2674 /* Longword physical access */
2675 gen_helper_stl_phys(cpu_env, addr, va);
2676 break;
2677 case 0x1:
2678 /* Quadword physical access */
2679 gen_helper_stq_phys(cpu_env, addr, va);
2680 break;
2681 case 0x2:
2682 /* Longword physical access with lock */
2683 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2684 break;
2685 case 0x3:
2686 /* Quadword physical access with lock */
2687 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2688 break;
2689 case 0x4:
2690 /* Longword virtual access */
2691 goto invalid_opc;
2692 case 0x5:
2693 /* Quadword virtual access */
2694 goto invalid_opc;
2695 case 0x6:
2696 /* Invalid */
2697 goto invalid_opc;
2698 case 0x7:
2699 /* Invalid */
2700 goto invalid_opc;
2701 case 0x8:
2702 /* Invalid */
2703 goto invalid_opc;
2704 case 0x9:
2705 /* Invalid */
2706 goto invalid_opc;
2707 case 0xA:
2708 /* Invalid */
2709 goto invalid_opc;
2710 case 0xB:
2711 /* Invalid */
2712 goto invalid_opc;
2713 case 0xC:
2714 /* Longword virtual access with alternate access mode */
2715 goto invalid_opc;
2716 case 0xD:
2717 /* Quadword virtual access with alternate access mode */
2718 goto invalid_opc;
2719 case 0xE:
2720 /* Invalid */
2721 goto invalid_opc;
2722 case 0xF:
2723 /* Invalid */
2724 goto invalid_opc;
2726 tcg_temp_free(addr);
2727 break;
2729 #else
2730 goto invalid_opc;
2731 #endif
2732 case 0x20:
2733 /* LDF */
2734 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2735 break;
2736 case 0x21:
2737 /* LDG */
2738 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2739 break;
2740 case 0x22:
2741 /* LDS */
2742 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2743 break;
2744 case 0x23:
2745 /* LDT */
2746 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2747 break;
2748 case 0x24:
2749 /* STF */
2750 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2751 break;
2752 case 0x25:
2753 /* STG */
2754 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2755 break;
2756 case 0x26:
2757 /* STS */
2758 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2759 break;
2760 case 0x27:
2761 /* STT */
2762 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2763 break;
2764 case 0x28:
2765 /* LDL */
2766 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2767 break;
2768 case 0x29:
2769 /* LDQ */
2770 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2771 break;
2772 case 0x2A:
2773 /* LDL_L */
2774 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2775 break;
2776 case 0x2B:
2777 /* LDQ_L */
2778 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2779 break;
2780 case 0x2C:
2781 /* STL */
2782 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2783 break;
2784 case 0x2D:
2785 /* STQ */
2786 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2787 break;
2788 case 0x2E:
2789 /* STL_C */
2790 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
2791 break;
2792 case 0x2F:
2793 /* STQ_C */
2794 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
2795 break;
2796 case 0x30:
2797 /* BR */
2798 ret = gen_bdirect(ctx, ra, disp21);
2799 break;
2800 case 0x31: /* FBEQ */
2801 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2802 break;
2803 case 0x32: /* FBLT */
2804 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2805 break;
2806 case 0x33: /* FBLE */
2807 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2808 break;
2809 case 0x34:
2810 /* BSR */
2811 ret = gen_bdirect(ctx, ra, disp21);
2812 break;
2813 case 0x35: /* FBNE */
2814 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2815 break;
2816 case 0x36: /* FBGE */
2817 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2818 break;
2819 case 0x37: /* FBGT */
2820 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2821 break;
2822 case 0x38:
2823 /* BLBC */
2824 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2825 break;
2826 case 0x39:
2827 /* BEQ */
2828 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2829 break;
2830 case 0x3A:
2831 /* BLT */
2832 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2833 break;
2834 case 0x3B:
2835 /* BLE */
2836 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2837 break;
2838 case 0x3C:
2839 /* BLBS */
2840 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2841 break;
2842 case 0x3D:
2843 /* BNE */
2844 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2845 break;
2846 case 0x3E:
2847 /* BGE */
2848 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2849 break;
2850 case 0x3F:
2851 /* BGT */
2852 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2853 break;
2854 invalid_opc:
2855 ret = gen_invalid(ctx);
2856 break;
2859 return ret;
2862 void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
2864 AlphaCPU *cpu = alpha_env_get_cpu(env);
2865 CPUState *cs = CPU(cpu);
2866 DisasContext ctx, *ctxp = &ctx;
2867 target_ulong pc_start;
2868 target_ulong pc_mask;
2869 uint32_t insn;
2870 ExitStatus ret;
2871 int num_insns;
2872 int max_insns;
2874 pc_start = tb->pc;
2876 ctx.tb = tb;
2877 ctx.pc = pc_start;
2878 ctx.mem_idx = cpu_mmu_index(env, false);
2879 ctx.implver = env->implver;
2880 ctx.singlestep_enabled = cs->singlestep_enabled;
2882 #ifdef CONFIG_USER_ONLY
2883 ctx.ir = cpu_std_ir;
2884 #else
2885 ctx.palbr = env->palbr;
2886 ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2887 #endif
2889 /* ??? Every TB begins with unset rounding mode, to be initialized on
2890 the first fp insn of the TB. Alternately we could define a proper
2891 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2892 to reset the FP_STATUS to that default at the end of any TB that
2893 changes the default. We could even (gasp) dynamiclly figure out
2894 what default would be most efficient given the running program. */
2895 ctx.tb_rm = -1;
2896 /* Similarly for flush-to-zero. */
2897 ctx.tb_ftz = -1;
2899 num_insns = 0;
2900 max_insns = tb->cflags & CF_COUNT_MASK;
2901 if (max_insns == 0) {
2902 max_insns = CF_COUNT_MASK;
2904 if (max_insns > TCG_MAX_INSNS) {
2905 max_insns = TCG_MAX_INSNS;
2908 if (in_superpage(&ctx, pc_start)) {
2909 pc_mask = (1ULL << 41) - 1;
2910 } else {
2911 pc_mask = ~TARGET_PAGE_MASK;
2914 gen_tb_start(tb);
2915 do {
2916 tcg_gen_insn_start(ctx.pc);
2917 num_insns++;
2919 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2920 ret = gen_excp(&ctx, EXCP_DEBUG, 0);
2921 /* The address covered by the breakpoint must be included in
2922 [tb->pc, tb->pc + tb->size) in order to for it to be
2923 properly cleared -- thus we increment the PC here so that
2924 the logic setting tb->size below does the right thing. */
2925 ctx.pc += 4;
2926 break;
2928 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2929 gen_io_start();
2931 insn = cpu_ldl_code(env, ctx.pc);
2933 TCGV_UNUSED_I64(ctx.zero);
2934 TCGV_UNUSED_I64(ctx.sink);
2935 TCGV_UNUSED_I64(ctx.lit);
2937 ctx.pc += 4;
2938 ret = translate_one(ctxp, insn);
2940 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2941 tcg_gen_discard_i64(ctx.sink);
2942 tcg_temp_free(ctx.sink);
2944 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2945 tcg_temp_free(ctx.zero);
2947 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2948 tcg_temp_free(ctx.lit);
2951 /* If we reach a page boundary, are single stepping,
2952 or exhaust instruction count, stop generation. */
2953 if (ret == NO_EXIT
2954 && ((ctx.pc & pc_mask) == 0
2955 || tcg_op_buf_full()
2956 || num_insns >= max_insns
2957 || singlestep
2958 || ctx.singlestep_enabled)) {
2959 ret = EXIT_PC_STALE;
2961 } while (ret == NO_EXIT);
2963 if (tb->cflags & CF_LAST_IO) {
2964 gen_io_end();
2967 switch (ret) {
2968 case EXIT_GOTO_TB:
2969 case EXIT_NORETURN:
2970 break;
2971 case EXIT_PC_STALE:
2972 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2973 /* FALLTHRU */
2974 case EXIT_PC_UPDATED:
2975 if (ctx.singlestep_enabled) {
2976 gen_excp_1(EXCP_DEBUG, 0);
2977 } else {
2978 tcg_gen_exit_tb(0);
2980 break;
2981 default:
2982 abort();
2985 gen_tb_end(tb, num_insns);
2987 tb->size = ctx.pc - pc_start;
2988 tb->icount = num_insns;
2990 #ifdef DEBUG_DISAS
2991 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2992 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2993 log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
2994 qemu_log("\n");
2996 #endif
2999 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3000 target_ulong *data)
3002 env->pc = data[0];