Merge remote-tracking branch 'remotes/kraxel/tags/pull-input-20160928-1' into staging
[qemu/kevin.git] / target-alpha / translate.c
blobc27c7b9cc49937098b5e54f95236f66f084abaac
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 struct TranslationBlock *tb;
47 uint64_t pc;
48 #ifndef CONFIG_USER_ONLY
49 uint64_t palbr;
50 #endif
51 int mem_idx;
53 /* Current rounding mode for this TB. */
54 int tb_rm;
55 /* Current flush-to-zero setting for this TB. */
56 int tb_ftz;
58 /* implver value for this CPU. */
59 int implver;
61 /* The set of registers active in the current context. */
62 TCGv *ir;
64 /* Temporaries for $31 and $f31 as source and destination. */
65 TCGv zero;
66 TCGv sink;
67 /* Temporary for immediate constants. */
68 TCGv lit;
70 bool singlestep_enabled;
73 /* Return values from translate_one, indicating the state of the TB.
74 Note that zero indicates that we are not exiting the TB. */
76 typedef enum {
77 NO_EXIT,
79 /* We have emitted one or more goto_tb. No fixup required. */
80 EXIT_GOTO_TB,
82 /* We are not using a goto_tb (for whatever reason), but have updated
83 the PC (for whatever reason), so there's no need to do it again on
84 exiting the TB. */
85 EXIT_PC_UPDATED,
87 /* We are exiting the TB, but have neither emitted a goto_tb, nor
88 updated the PC for the next instruction to be executed. */
89 EXIT_PC_STALE,
91 /* We are ending the TB with a noreturn function call, e.g. longjmp.
92 No following code will be executed. */
93 EXIT_NORETURN,
94 } ExitStatus;
96 /* global register indexes */
97 static TCGv_env cpu_env;
98 static TCGv cpu_std_ir[31];
99 static TCGv cpu_fir[31];
100 static TCGv cpu_pc;
101 static TCGv cpu_lock_addr;
102 static TCGv cpu_lock_st_addr;
103 static TCGv cpu_lock_value;
105 #ifndef CONFIG_USER_ONLY
106 static TCGv cpu_pal_ir[31];
107 #endif
109 #include "exec/gen-icount.h"
111 void alpha_translate_init(void)
113 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
115 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
116 static const GlobalVar vars[] = {
117 DEF_VAR(pc),
118 DEF_VAR(lock_addr),
119 DEF_VAR(lock_st_addr),
120 DEF_VAR(lock_value),
123 #undef DEF_VAR
125 /* Use the symbolic register names that match the disassembler. */
126 static const char greg_names[31][4] = {
127 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
128 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
129 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
130 "t10", "t11", "ra", "t12", "at", "gp", "sp"
132 static const char freg_names[31][4] = {
133 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
134 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
135 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
136 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
138 #ifndef CONFIG_USER_ONLY
139 static const char shadow_names[8][8] = {
140 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
141 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
143 #endif
145 static bool done_init = 0;
146 int i;
148 if (done_init) {
149 return;
151 done_init = 1;
153 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
154 tcg_ctx.tcg_env = cpu_env;
156 for (i = 0; i < 31; i++) {
157 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
158 offsetof(CPUAlphaState, ir[i]),
159 greg_names[i]);
162 for (i = 0; i < 31; i++) {
163 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
164 offsetof(CPUAlphaState, fir[i]),
165 freg_names[i]);
168 #ifndef CONFIG_USER_ONLY
169 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
170 for (i = 0; i < 8; i++) {
171 int r = (i == 7 ? 25 : i + 8);
172 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
173 offsetof(CPUAlphaState,
174 shadow[i]),
175 shadow_names[i]);
177 #endif
179 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
180 const GlobalVar *v = &vars[i];
181 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
185 static TCGv load_zero(DisasContext *ctx)
187 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
188 ctx->zero = tcg_const_i64(0);
190 return ctx->zero;
193 static TCGv dest_sink(DisasContext *ctx)
195 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
196 ctx->sink = tcg_temp_new();
198 return ctx->sink;
201 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
203 if (likely(reg < 31)) {
204 return ctx->ir[reg];
205 } else {
206 return load_zero(ctx);
210 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
211 uint8_t lit, bool islit)
213 if (islit) {
214 ctx->lit = tcg_const_i64(lit);
215 return ctx->lit;
216 } else if (likely(reg < 31)) {
217 return ctx->ir[reg];
218 } else {
219 return load_zero(ctx);
223 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
225 if (likely(reg < 31)) {
226 return ctx->ir[reg];
227 } else {
228 return dest_sink(ctx);
232 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
234 if (likely(reg < 31)) {
235 return cpu_fir[reg];
236 } else {
237 return load_zero(ctx);
241 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
243 if (likely(reg < 31)) {
244 return cpu_fir[reg];
245 } else {
246 return dest_sink(ctx);
250 static void gen_excp_1(int exception, int error_code)
252 TCGv_i32 tmp1, tmp2;
254 tmp1 = tcg_const_i32(exception);
255 tmp2 = tcg_const_i32(error_code);
256 gen_helper_excp(cpu_env, tmp1, tmp2);
257 tcg_temp_free_i32(tmp2);
258 tcg_temp_free_i32(tmp1);
261 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
263 tcg_gen_movi_i64(cpu_pc, ctx->pc);
264 gen_excp_1(exception, error_code);
265 return EXIT_NORETURN;
268 static inline ExitStatus gen_invalid(DisasContext *ctx)
270 return gen_excp(ctx, EXCP_OPCDEC, 0);
273 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
275 TCGv_i32 tmp32 = tcg_temp_new_i32();
276 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
277 gen_helper_memory_to_f(t0, tmp32);
278 tcg_temp_free_i32(tmp32);
281 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
283 TCGv tmp = tcg_temp_new();
284 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
285 gen_helper_memory_to_g(t0, tmp);
286 tcg_temp_free(tmp);
289 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
291 TCGv_i32 tmp32 = tcg_temp_new_i32();
292 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
293 gen_helper_memory_to_s(t0, tmp32);
294 tcg_temp_free_i32(tmp32);
297 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
299 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
300 tcg_gen_mov_i64(cpu_lock_addr, t1);
301 tcg_gen_mov_i64(cpu_lock_value, t0);
304 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
306 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
307 tcg_gen_mov_i64(cpu_lock_addr, t1);
308 tcg_gen_mov_i64(cpu_lock_value, t0);
311 static inline void gen_load_mem(DisasContext *ctx,
312 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
313 int flags),
314 int ra, int rb, int32_t disp16, bool fp,
315 bool clear)
317 TCGv tmp, addr, va;
319 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
320 prefetches, which we can treat as nops. No worries about
321 missed exceptions here. */
322 if (unlikely(ra == 31)) {
323 return;
326 tmp = tcg_temp_new();
327 addr = load_gpr(ctx, rb);
329 if (disp16) {
330 tcg_gen_addi_i64(tmp, addr, disp16);
331 addr = tmp;
333 if (clear) {
334 tcg_gen_andi_i64(tmp, addr, ~0x7);
335 addr = tmp;
338 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
339 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
341 tcg_temp_free(tmp);
344 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
346 TCGv_i32 tmp32 = tcg_temp_new_i32();
347 gen_helper_f_to_memory(tmp32, t0);
348 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
349 tcg_temp_free_i32(tmp32);
352 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
354 TCGv tmp = tcg_temp_new();
355 gen_helper_g_to_memory(tmp, t0);
356 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
357 tcg_temp_free(tmp);
360 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
362 TCGv_i32 tmp32 = tcg_temp_new_i32();
363 gen_helper_s_to_memory(tmp32, t0);
364 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
365 tcg_temp_free_i32(tmp32);
368 static inline void gen_store_mem(DisasContext *ctx,
369 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
370 int flags),
371 int ra, int rb, int32_t disp16, bool fp,
372 bool clear)
374 TCGv tmp, addr, va;
376 tmp = tcg_temp_new();
377 addr = load_gpr(ctx, rb);
379 if (disp16) {
380 tcg_gen_addi_i64(tmp, addr, disp16);
381 addr = tmp;
383 if (clear) {
384 tcg_gen_andi_i64(tmp, addr, ~0x7);
385 addr = tmp;
388 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
389 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
391 tcg_temp_free(tmp);
394 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
395 int32_t disp16, int quad)
397 TCGv addr;
399 if (ra == 31) {
400 /* ??? Don't bother storing anything. The user can't tell
401 the difference, since the zero register always reads zero. */
402 return NO_EXIT;
405 #if defined(CONFIG_USER_ONLY)
406 addr = cpu_lock_st_addr;
407 #else
408 addr = tcg_temp_local_new();
409 #endif
411 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
413 #if defined(CONFIG_USER_ONLY)
414 /* ??? This is handled via a complicated version of compare-and-swap
415 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
416 in TCG so that this isn't necessary. */
417 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
418 #else
419 /* ??? In system mode we are never multi-threaded, so CAS can be
420 implemented via a non-atomic load-compare-store sequence. */
422 TCGLabel *lab_fail, *lab_done;
423 TCGv val;
425 lab_fail = gen_new_label();
426 lab_done = gen_new_label();
427 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
429 val = tcg_temp_new();
430 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
431 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
433 tcg_gen_qemu_st_i64(ctx->ir[ra], addr, ctx->mem_idx,
434 quad ? MO_LEQ : MO_LEUL);
435 tcg_gen_movi_i64(ctx->ir[ra], 1);
436 tcg_gen_br(lab_done);
438 gen_set_label(lab_fail);
439 tcg_gen_movi_i64(ctx->ir[ra], 0);
441 gen_set_label(lab_done);
442 tcg_gen_movi_i64(cpu_lock_addr, -1);
444 tcg_temp_free(addr);
445 return NO_EXIT;
447 #endif
450 static bool in_superpage(DisasContext *ctx, int64_t addr)
452 #ifndef CONFIG_USER_ONLY
453 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
454 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
455 && ((addr >> 41) & 3) == 2);
456 #else
457 return false;
458 #endif
461 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
463 /* Suppress goto_tb in the case of single-steping and IO. */
464 if ((ctx->tb->cflags & CF_LAST_IO)
465 || ctx->singlestep_enabled || singlestep) {
466 return false;
468 #ifndef CONFIG_USER_ONLY
469 /* If the destination is in the superpage, the page perms can't change. */
470 if (in_superpage(ctx, dest)) {
471 return true;
473 /* Check for the dest on the same page as the start of the TB. */
474 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
475 #else
476 return true;
477 #endif
480 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
482 uint64_t dest = ctx->pc + (disp << 2);
484 if (ra != 31) {
485 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
488 /* Notice branch-to-next; used to initialize RA with the PC. */
489 if (disp == 0) {
490 return 0;
491 } else if (use_goto_tb(ctx, dest)) {
492 tcg_gen_goto_tb(0);
493 tcg_gen_movi_i64(cpu_pc, dest);
494 tcg_gen_exit_tb((uintptr_t)ctx->tb);
495 return EXIT_GOTO_TB;
496 } else {
497 tcg_gen_movi_i64(cpu_pc, dest);
498 return EXIT_PC_UPDATED;
502 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
503 TCGv cmp, int32_t disp)
505 uint64_t dest = ctx->pc + (disp << 2);
506 TCGLabel *lab_true = gen_new_label();
508 if (use_goto_tb(ctx, dest)) {
509 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
511 tcg_gen_goto_tb(0);
512 tcg_gen_movi_i64(cpu_pc, ctx->pc);
513 tcg_gen_exit_tb((uintptr_t)ctx->tb);
515 gen_set_label(lab_true);
516 tcg_gen_goto_tb(1);
517 tcg_gen_movi_i64(cpu_pc, dest);
518 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
520 return EXIT_GOTO_TB;
521 } else {
522 TCGv_i64 z = tcg_const_i64(0);
523 TCGv_i64 d = tcg_const_i64(dest);
524 TCGv_i64 p = tcg_const_i64(ctx->pc);
526 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
528 tcg_temp_free_i64(z);
529 tcg_temp_free_i64(d);
530 tcg_temp_free_i64(p);
531 return EXIT_PC_UPDATED;
535 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
536 int32_t disp, int mask)
538 TCGv cmp_tmp;
540 if (mask) {
541 cmp_tmp = tcg_temp_new();
542 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
543 } else {
544 cmp_tmp = load_gpr(ctx, ra);
547 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
550 /* Fold -0.0 for comparison with COND. */
552 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
554 uint64_t mzero = 1ull << 63;
556 switch (cond) {
557 case TCG_COND_LE:
558 case TCG_COND_GT:
559 /* For <= or >, the -0.0 value directly compares the way we want. */
560 tcg_gen_mov_i64(dest, src);
561 break;
563 case TCG_COND_EQ:
564 case TCG_COND_NE:
565 /* For == or !=, we can simply mask off the sign bit and compare. */
566 tcg_gen_andi_i64(dest, src, mzero - 1);
567 break;
569 case TCG_COND_GE:
570 case TCG_COND_LT:
571 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
572 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
573 tcg_gen_neg_i64(dest, dest);
574 tcg_gen_and_i64(dest, dest, src);
575 break;
577 default:
578 abort();
582 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
583 int32_t disp)
585 TCGv cmp_tmp = tcg_temp_new();
586 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
587 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
590 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
592 TCGv_i64 va, vb, z;
594 z = load_zero(ctx);
595 vb = load_fpr(ctx, rb);
596 va = tcg_temp_new();
597 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
599 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
601 tcg_temp_free(va);
604 #define QUAL_RM_N 0x080 /* Round mode nearest even */
605 #define QUAL_RM_C 0x000 /* Round mode chopped */
606 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
607 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
608 #define QUAL_RM_MASK 0x0c0
610 #define QUAL_U 0x100 /* Underflow enable (fp output) */
611 #define QUAL_V 0x100 /* Overflow enable (int output) */
612 #define QUAL_S 0x400 /* Software completion enable */
613 #define QUAL_I 0x200 /* Inexact detection enable */
615 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
617 TCGv_i32 tmp;
619 fn11 &= QUAL_RM_MASK;
620 if (fn11 == ctx->tb_rm) {
621 return;
623 ctx->tb_rm = fn11;
625 tmp = tcg_temp_new_i32();
626 switch (fn11) {
627 case QUAL_RM_N:
628 tcg_gen_movi_i32(tmp, float_round_nearest_even);
629 break;
630 case QUAL_RM_C:
631 tcg_gen_movi_i32(tmp, float_round_to_zero);
632 break;
633 case QUAL_RM_M:
634 tcg_gen_movi_i32(tmp, float_round_down);
635 break;
636 case QUAL_RM_D:
637 tcg_gen_ld8u_i32(tmp, cpu_env,
638 offsetof(CPUAlphaState, fpcr_dyn_round));
639 break;
642 #if defined(CONFIG_SOFTFLOAT_INLINE)
643 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
644 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
645 sets the one field. */
646 tcg_gen_st8_i32(tmp, cpu_env,
647 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
648 #else
649 gen_helper_setroundmode(tmp);
650 #endif
652 tcg_temp_free_i32(tmp);
655 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
657 TCGv_i32 tmp;
659 fn11 &= QUAL_U;
660 if (fn11 == ctx->tb_ftz) {
661 return;
663 ctx->tb_ftz = fn11;
665 tmp = tcg_temp_new_i32();
666 if (fn11) {
667 /* Underflow is enabled, use the FPCR setting. */
668 tcg_gen_ld8u_i32(tmp, cpu_env,
669 offsetof(CPUAlphaState, fpcr_flush_to_zero));
670 } else {
671 /* Underflow is disabled, force flush-to-zero. */
672 tcg_gen_movi_i32(tmp, 1);
675 #if defined(CONFIG_SOFTFLOAT_INLINE)
676 tcg_gen_st8_i32(tmp, cpu_env,
677 offsetof(CPUAlphaState, fp_status.flush_to_zero));
678 #else
679 gen_helper_setflushzero(tmp);
680 #endif
682 tcg_temp_free_i32(tmp);
685 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
687 TCGv val;
689 if (unlikely(reg == 31)) {
690 val = load_zero(ctx);
691 } else {
692 val = cpu_fir[reg];
693 if ((fn11 & QUAL_S) == 0) {
694 if (is_cmp) {
695 gen_helper_ieee_input_cmp(cpu_env, val);
696 } else {
697 gen_helper_ieee_input(cpu_env, val);
699 } else {
700 #ifndef CONFIG_USER_ONLY
701 /* In system mode, raise exceptions for denormals like real
702 hardware. In user mode, proceed as if the OS completion
703 handler is handling the denormal as per spec. */
704 gen_helper_ieee_input_s(cpu_env, val);
705 #endif
708 return val;
711 static void gen_fp_exc_raise(int rc, int fn11)
713 /* ??? We ought to be able to do something with imprecise exceptions.
714 E.g. notice we're still in the trap shadow of something within the
715 TB and do not generate the code to signal the exception; end the TB
716 when an exception is forced to arrive, either by consumption of a
717 register value or TRAPB or EXCB. */
718 TCGv_i32 reg, ign;
719 uint32_t ignore = 0;
721 if (!(fn11 & QUAL_U)) {
722 /* Note that QUAL_U == QUAL_V, so ignore either. */
723 ignore |= FPCR_UNF | FPCR_IOV;
725 if (!(fn11 & QUAL_I)) {
726 ignore |= FPCR_INE;
728 ign = tcg_const_i32(ignore);
730 /* ??? Pass in the regno of the destination so that the helper can
731 set EXC_MASK, which contains a bitmask of destination registers
732 that have caused arithmetic traps. A simple userspace emulation
733 does not require this. We do need it for a guest kernel's entArith,
734 or if we were to do something clever with imprecise exceptions. */
735 reg = tcg_const_i32(rc + 32);
736 if (fn11 & QUAL_S) {
737 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
738 } else {
739 gen_helper_fp_exc_raise(cpu_env, ign, reg);
742 tcg_temp_free_i32(reg);
743 tcg_temp_free_i32(ign);
746 static void gen_cvtlq(TCGv vc, TCGv vb)
748 TCGv tmp = tcg_temp_new();
750 /* The arithmetic right shift here, plus the sign-extended mask below
751 yields a sign-extended result without an explicit ext32s_i64. */
752 tcg_gen_sari_i64(tmp, vb, 32);
753 tcg_gen_shri_i64(vc, vb, 29);
754 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
755 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
756 tcg_gen_or_i64(vc, vc, tmp);
758 tcg_temp_free(tmp);
761 static void gen_ieee_arith2(DisasContext *ctx,
762 void (*helper)(TCGv, TCGv_ptr, TCGv),
763 int rb, int rc, int fn11)
765 TCGv vb;
767 gen_qual_roundmode(ctx, fn11);
768 gen_qual_flushzero(ctx, fn11);
770 vb = gen_ieee_input(ctx, rb, fn11, 0);
771 helper(dest_fpr(ctx, rc), cpu_env, vb);
773 gen_fp_exc_raise(rc, fn11);
776 #define IEEE_ARITH2(name) \
777 static inline void glue(gen_, name)(DisasContext *ctx, \
778 int rb, int rc, int fn11) \
780 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
782 IEEE_ARITH2(sqrts)
783 IEEE_ARITH2(sqrtt)
784 IEEE_ARITH2(cvtst)
785 IEEE_ARITH2(cvtts)
787 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
789 TCGv vb, vc;
791 /* No need to set flushzero, since we have an integer output. */
792 vb = gen_ieee_input(ctx, rb, fn11, 0);
793 vc = dest_fpr(ctx, rc);
795 /* Almost all integer conversions use cropped rounding;
796 special case that. */
797 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
798 gen_helper_cvttq_c(vc, cpu_env, vb);
799 } else {
800 gen_qual_roundmode(ctx, fn11);
801 gen_helper_cvttq(vc, cpu_env, vb);
803 gen_fp_exc_raise(rc, fn11);
806 static void gen_ieee_intcvt(DisasContext *ctx,
807 void (*helper)(TCGv, TCGv_ptr, TCGv),
808 int rb, int rc, int fn11)
810 TCGv vb, vc;
812 gen_qual_roundmode(ctx, fn11);
813 vb = load_fpr(ctx, rb);
814 vc = dest_fpr(ctx, rc);
816 /* The only exception that can be raised by integer conversion
817 is inexact. Thus we only need to worry about exceptions when
818 inexact handling is requested. */
819 if (fn11 & QUAL_I) {
820 helper(vc, cpu_env, vb);
821 gen_fp_exc_raise(rc, fn11);
822 } else {
823 helper(vc, cpu_env, vb);
827 #define IEEE_INTCVT(name) \
828 static inline void glue(gen_, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
831 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
833 IEEE_INTCVT(cvtqs)
834 IEEE_INTCVT(cvtqt)
836 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
838 TCGv vmask = tcg_const_i64(mask);
839 TCGv tmp = tcg_temp_new_i64();
841 if (inv_a) {
842 tcg_gen_andc_i64(tmp, vmask, va);
843 } else {
844 tcg_gen_and_i64(tmp, va, vmask);
847 tcg_gen_andc_i64(vc, vb, vmask);
848 tcg_gen_or_i64(vc, vc, tmp);
850 tcg_temp_free(vmask);
851 tcg_temp_free(tmp);
854 static void gen_ieee_arith3(DisasContext *ctx,
855 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
856 int ra, int rb, int rc, int fn11)
858 TCGv va, vb, vc;
860 gen_qual_roundmode(ctx, fn11);
861 gen_qual_flushzero(ctx, fn11);
863 va = gen_ieee_input(ctx, ra, fn11, 0);
864 vb = gen_ieee_input(ctx, rb, fn11, 0);
865 vc = dest_fpr(ctx, rc);
866 helper(vc, cpu_env, va, vb);
868 gen_fp_exc_raise(rc, fn11);
871 #define IEEE_ARITH3(name) \
872 static inline void glue(gen_, name)(DisasContext *ctx, \
873 int ra, int rb, int rc, int fn11) \
875 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
877 IEEE_ARITH3(adds)
878 IEEE_ARITH3(subs)
879 IEEE_ARITH3(muls)
880 IEEE_ARITH3(divs)
881 IEEE_ARITH3(addt)
882 IEEE_ARITH3(subt)
883 IEEE_ARITH3(mult)
884 IEEE_ARITH3(divt)
886 static void gen_ieee_compare(DisasContext *ctx,
887 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
888 int ra, int rb, int rc, int fn11)
890 TCGv va, vb, vc;
892 va = gen_ieee_input(ctx, ra, fn11, 1);
893 vb = gen_ieee_input(ctx, rb, fn11, 1);
894 vc = dest_fpr(ctx, rc);
895 helper(vc, cpu_env, va, vb);
897 gen_fp_exc_raise(rc, fn11);
900 #define IEEE_CMP3(name) \
901 static inline void glue(gen_, name)(DisasContext *ctx, \
902 int ra, int rb, int rc, int fn11) \
904 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
906 IEEE_CMP3(cmptun)
907 IEEE_CMP3(cmpteq)
908 IEEE_CMP3(cmptlt)
909 IEEE_CMP3(cmptle)
911 static inline uint64_t zapnot_mask(uint8_t lit)
913 uint64_t mask = 0;
914 int i;
916 for (i = 0; i < 8; ++i) {
917 if ((lit >> i) & 1) {
918 mask |= 0xffull << (i * 8);
921 return mask;
924 /* Implement zapnot with an immediate operand, which expands to some
925 form of immediate AND. This is a basic building block in the
926 definition of many of the other byte manipulation instructions. */
927 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
929 switch (lit) {
930 case 0x00:
931 tcg_gen_movi_i64(dest, 0);
932 break;
933 case 0x01:
934 tcg_gen_ext8u_i64(dest, src);
935 break;
936 case 0x03:
937 tcg_gen_ext16u_i64(dest, src);
938 break;
939 case 0x0f:
940 tcg_gen_ext32u_i64(dest, src);
941 break;
942 case 0xff:
943 tcg_gen_mov_i64(dest, src);
944 break;
945 default:
946 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
947 break;
951 /* EXTWH, EXTLH, EXTQH */
952 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
953 uint8_t lit, uint8_t byte_mask)
955 if (islit) {
956 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
957 } else {
958 TCGv tmp = tcg_temp_new();
959 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
960 tcg_gen_neg_i64(tmp, tmp);
961 tcg_gen_andi_i64(tmp, tmp, 0x3f);
962 tcg_gen_shl_i64(vc, va, tmp);
963 tcg_temp_free(tmp);
965 gen_zapnoti(vc, vc, byte_mask);
968 /* EXTBL, EXTWL, EXTLL, EXTQL */
969 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
970 uint8_t lit, uint8_t byte_mask)
972 if (islit) {
973 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
974 } else {
975 TCGv tmp = tcg_temp_new();
976 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
977 tcg_gen_shli_i64(tmp, tmp, 3);
978 tcg_gen_shr_i64(vc, va, tmp);
979 tcg_temp_free(tmp);
981 gen_zapnoti(vc, vc, byte_mask);
984 /* INSWH, INSLH, INSQH */
985 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
986 uint8_t lit, uint8_t byte_mask)
988 TCGv tmp = tcg_temp_new();
990 /* The instruction description has us left-shift the byte mask and extract
991 bits <15:8> and apply that zap at the end. This is equivalent to simply
992 performing the zap first and shifting afterward. */
993 gen_zapnoti(tmp, va, byte_mask);
995 if (islit) {
996 lit &= 7;
997 if (unlikely(lit == 0)) {
998 tcg_gen_movi_i64(vc, 0);
999 } else {
1000 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
1002 } else {
1003 TCGv shift = tcg_temp_new();
1005 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1006 portably by splitting the shift into two parts: shift_count-1 and 1.
1007 Arrange for the -1 by using ones-complement instead of
1008 twos-complement in the negation: ~(B * 8) & 63. */
1010 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1011 tcg_gen_not_i64(shift, shift);
1012 tcg_gen_andi_i64(shift, shift, 0x3f);
1014 tcg_gen_shr_i64(vc, tmp, shift);
1015 tcg_gen_shri_i64(vc, vc, 1);
1016 tcg_temp_free(shift);
1018 tcg_temp_free(tmp);
1021 /* INSBL, INSWL, INSLL, INSQL */
1022 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1023 uint8_t lit, uint8_t byte_mask)
1025 TCGv tmp = tcg_temp_new();
1027 /* The instruction description has us left-shift the byte mask
1028 the same number of byte slots as the data and apply the zap
1029 at the end. This is equivalent to simply performing the zap
1030 first and shifting afterward. */
1031 gen_zapnoti(tmp, va, byte_mask);
1033 if (islit) {
1034 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1035 } else {
1036 TCGv shift = tcg_temp_new();
1037 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1038 tcg_gen_shli_i64(shift, shift, 3);
1039 tcg_gen_shl_i64(vc, tmp, shift);
1040 tcg_temp_free(shift);
1042 tcg_temp_free(tmp);
1045 /* MSKWH, MSKLH, MSKQH */
1046 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1047 uint8_t lit, uint8_t byte_mask)
1049 if (islit) {
1050 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1051 } else {
1052 TCGv shift = tcg_temp_new();
1053 TCGv mask = tcg_temp_new();
1055 /* The instruction description is as above, where the byte_mask
1056 is shifted left, and then we extract bits <15:8>. This can be
1057 emulated with a right-shift on the expanded byte mask. This
1058 requires extra care because for an input <2:0> == 0 we need a
1059 shift of 64 bits in order to generate a zero. This is done by
1060 splitting the shift into two parts, the variable shift - 1
1061 followed by a constant 1 shift. The code we expand below is
1062 equivalent to ~(B * 8) & 63. */
1064 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1065 tcg_gen_not_i64(shift, shift);
1066 tcg_gen_andi_i64(shift, shift, 0x3f);
1067 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1068 tcg_gen_shr_i64(mask, mask, shift);
1069 tcg_gen_shri_i64(mask, mask, 1);
1071 tcg_gen_andc_i64(vc, va, mask);
1073 tcg_temp_free(mask);
1074 tcg_temp_free(shift);
1078 /* MSKBL, MSKWL, MSKLL, MSKQL */
1079 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1080 uint8_t lit, uint8_t byte_mask)
1082 if (islit) {
1083 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1084 } else {
1085 TCGv shift = tcg_temp_new();
1086 TCGv mask = tcg_temp_new();
1088 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1089 tcg_gen_shli_i64(shift, shift, 3);
1090 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1091 tcg_gen_shl_i64(mask, mask, shift);
1093 tcg_gen_andc_i64(vc, va, mask);
1095 tcg_temp_free(mask);
1096 tcg_temp_free(shift);
1100 static void gen_rx(DisasContext *ctx, int ra, int set)
1102 TCGv_i32 tmp;
1104 if (ra != 31) {
1105 tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
1106 offsetof(CPUAlphaState, intr_flag));
1109 tmp = tcg_const_i32(set);
1110 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1111 tcg_temp_free_i32(tmp);
1114 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1116 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1117 to internal cpu registers. */
1119 /* Unprivileged PAL call */
1120 if (palcode >= 0x80 && palcode < 0xC0) {
1121 switch (palcode) {
1122 case 0x86:
1123 /* IMB */
1124 /* No-op inside QEMU. */
1125 break;
1126 case 0x9E:
1127 /* RDUNIQUE */
1128 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1129 offsetof(CPUAlphaState, unique));
1130 break;
1131 case 0x9F:
1132 /* WRUNIQUE */
1133 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1134 offsetof(CPUAlphaState, unique));
1135 break;
1136 default:
1137 palcode &= 0xbf;
1138 goto do_call_pal;
1140 return NO_EXIT;
1143 #ifndef CONFIG_USER_ONLY
1144 /* Privileged PAL code */
1145 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1146 switch (palcode) {
1147 case 0x01:
1148 /* CFLUSH */
1149 /* No-op inside QEMU. */
1150 break;
1151 case 0x02:
1152 /* DRAINA */
1153 /* No-op inside QEMU. */
1154 break;
1155 case 0x2D:
1156 /* WRVPTPTR */
1157 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1158 offsetof(CPUAlphaState, vptptr));
1159 break;
1160 case 0x31:
1161 /* WRVAL */
1162 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1163 offsetof(CPUAlphaState, sysval));
1164 break;
1165 case 0x32:
1166 /* RDVAL */
1167 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1168 offsetof(CPUAlphaState, sysval));
1169 break;
1171 case 0x35: {
1172 /* SWPIPL */
1173 TCGv tmp;
1175 /* Note that we already know we're in kernel mode, so we know
1176 that PS only contains the 3 IPL bits. */
1177 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1178 offsetof(CPUAlphaState, ps));
1180 /* But make sure and store only the 3 IPL bits from the user. */
1181 tmp = tcg_temp_new();
1182 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1183 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1184 tcg_temp_free(tmp);
1185 break;
1188 case 0x36:
1189 /* RDPS */
1190 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1191 offsetof(CPUAlphaState, ps));
1192 break;
1193 case 0x38:
1194 /* WRUSP */
1195 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1196 offsetof(CPUAlphaState, usp));
1197 break;
1198 case 0x3A:
1199 /* RDUSP */
1200 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1201 offsetof(CPUAlphaState, usp));
1202 break;
1203 case 0x3C:
1204 /* WHAMI */
1205 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1206 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1207 break;
1209 default:
1210 palcode &= 0x3f;
1211 goto do_call_pal;
1213 return NO_EXIT;
1215 #endif
1216 return gen_invalid(ctx);
1218 do_call_pal:
1219 #ifdef CONFIG_USER_ONLY
1220 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1221 #else
1223 TCGv tmp = tcg_temp_new();
1224 uint64_t exc_addr = ctx->pc;
1225 uint64_t entry = ctx->palbr;
1227 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
1228 exc_addr |= 1;
1229 } else {
1230 tcg_gen_movi_i64(tmp, 1);
1231 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
1234 tcg_gen_movi_i64(tmp, exc_addr);
1235 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1236 tcg_temp_free(tmp);
1238 entry += (palcode & 0x80
1239 ? 0x2000 + (palcode - 0x80) * 64
1240 : 0x1000 + palcode * 64);
1242 /* Since the destination is running in PALmode, we don't really
1243 need the page permissions check. We'll see the existence of
1244 the page when we create the TB, and we'll flush all TBs if
1245 we change the PAL base register. */
1246 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1247 tcg_gen_goto_tb(0);
1248 tcg_gen_movi_i64(cpu_pc, entry);
1249 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1250 return EXIT_GOTO_TB;
1251 } else {
1252 tcg_gen_movi_i64(cpu_pc, entry);
1253 return EXIT_PC_UPDATED;
1256 #endif
1259 #ifndef CONFIG_USER_ONLY
1261 #define PR_BYTE 0x100000
1262 #define PR_LONG 0x200000
1264 static int cpu_pr_data(int pr)
1266 switch (pr) {
1267 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1268 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1269 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1270 case 3: return offsetof(CPUAlphaState, trap_arg0);
1271 case 4: return offsetof(CPUAlphaState, trap_arg1);
1272 case 5: return offsetof(CPUAlphaState, trap_arg2);
1273 case 6: return offsetof(CPUAlphaState, exc_addr);
1274 case 7: return offsetof(CPUAlphaState, palbr);
1275 case 8: return offsetof(CPUAlphaState, ptbr);
1276 case 9: return offsetof(CPUAlphaState, vptptr);
1277 case 10: return offsetof(CPUAlphaState, unique);
1278 case 11: return offsetof(CPUAlphaState, sysval);
1279 case 12: return offsetof(CPUAlphaState, usp);
1281 case 40 ... 63:
1282 return offsetof(CPUAlphaState, scratch[pr - 40]);
1284 case 251:
1285 return offsetof(CPUAlphaState, alarm_expire);
1287 return 0;
1290 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1292 void (*helper)(TCGv);
1293 int data;
1295 switch (regno) {
1296 case 32 ... 39:
1297 /* Accessing the "non-shadow" general registers. */
1298 regno = regno == 39 ? 25 : regno - 32 + 8;
1299 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1300 break;
1302 case 250: /* WALLTIME */
1303 helper = gen_helper_get_walltime;
1304 goto do_helper;
1305 case 249: /* VMTIME */
1306 helper = gen_helper_get_vmtime;
1307 do_helper:
1308 if (use_icount) {
1309 gen_io_start();
1310 helper(va);
1311 gen_io_end();
1312 return EXIT_PC_STALE;
1313 } else {
1314 helper(va);
1316 break;
1318 default:
1319 /* The basic registers are data only, and unknown registers
1320 are read-zero, write-ignore. */
1321 data = cpu_pr_data(regno);
1322 if (data == 0) {
1323 tcg_gen_movi_i64(va, 0);
1324 } else if (data & PR_BYTE) {
1325 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1326 } else if (data & PR_LONG) {
1327 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1328 } else {
1329 tcg_gen_ld_i64(va, cpu_env, data);
1331 break;
1334 return NO_EXIT;
1337 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1339 TCGv tmp;
1340 int data;
1342 switch (regno) {
1343 case 255:
1344 /* TBIA */
1345 gen_helper_tbia(cpu_env);
1346 break;
1348 case 254:
1349 /* TBIS */
1350 gen_helper_tbis(cpu_env, vb);
1351 break;
1353 case 253:
1354 /* WAIT */
1355 tmp = tcg_const_i64(1);
1356 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1357 offsetof(CPUState, halted));
1358 return gen_excp(ctx, EXCP_HLT, 0);
1360 case 252:
1361 /* HALT */
1362 gen_helper_halt(vb);
1363 return EXIT_PC_STALE;
1365 case 251:
1366 /* ALARM */
1367 gen_helper_set_alarm(cpu_env, vb);
1368 break;
1370 case 7:
1371 /* PALBR */
1372 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1373 /* Changing the PAL base register implies un-chaining all of the TBs
1374 that ended with a CALL_PAL. Since the base register usually only
1375 changes during boot, flushing everything works well. */
1376 gen_helper_tb_flush(cpu_env);
1377 return EXIT_PC_STALE;
1379 case 32 ... 39:
1380 /* Accessing the "non-shadow" general registers. */
1381 regno = regno == 39 ? 25 : regno - 32 + 8;
1382 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1383 break;
1385 default:
1386 /* The basic registers are data only, and unknown registers
1387 are read-zero, write-ignore. */
1388 data = cpu_pr_data(regno);
1389 if (data != 0) {
1390 if (data & PR_BYTE) {
1391 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1392 } else if (data & PR_LONG) {
1393 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1394 } else {
1395 tcg_gen_st_i64(vb, cpu_env, data);
1398 break;
1401 return NO_EXIT;
1403 #endif /* !USER_ONLY*/
1405 #define REQUIRE_NO_LIT \
1406 do { \
1407 if (real_islit) { \
1408 goto invalid_opc; \
1410 } while (0)
1412 #define REQUIRE_TB_FLAG(FLAG) \
1413 do { \
1414 if ((ctx->tb->flags & (FLAG)) == 0) { \
1415 goto invalid_opc; \
1417 } while (0)
1419 #define REQUIRE_REG_31(WHICH) \
1420 do { \
1421 if (WHICH != 31) { \
1422 goto invalid_opc; \
1424 } while (0)
1426 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1428 int32_t disp21, disp16, disp12 __attribute__((unused));
1429 uint16_t fn11;
1430 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1431 bool islit, real_islit;
1432 TCGv va, vb, vc, tmp, tmp2;
1433 TCGv_i32 t32;
1434 ExitStatus ret;
1436 /* Decode all instruction fields */
1437 opc = extract32(insn, 26, 6);
1438 ra = extract32(insn, 21, 5);
1439 rb = extract32(insn, 16, 5);
1440 rc = extract32(insn, 0, 5);
1441 real_islit = islit = extract32(insn, 12, 1);
1442 lit = extract32(insn, 13, 8);
1444 disp21 = sextract32(insn, 0, 21);
1445 disp16 = sextract32(insn, 0, 16);
1446 disp12 = sextract32(insn, 0, 12);
1448 fn11 = extract32(insn, 5, 11);
1449 fpfn = extract32(insn, 5, 6);
1450 fn7 = extract32(insn, 5, 7);
1452 if (rb == 31 && !islit) {
1453 islit = true;
1454 lit = 0;
1457 ret = NO_EXIT;
1458 switch (opc) {
1459 case 0x00:
1460 /* CALL_PAL */
1461 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1462 break;
1463 case 0x01:
1464 /* OPC01 */
1465 goto invalid_opc;
1466 case 0x02:
1467 /* OPC02 */
1468 goto invalid_opc;
1469 case 0x03:
1470 /* OPC03 */
1471 goto invalid_opc;
1472 case 0x04:
1473 /* OPC04 */
1474 goto invalid_opc;
1475 case 0x05:
1476 /* OPC05 */
1477 goto invalid_opc;
1478 case 0x06:
1479 /* OPC06 */
1480 goto invalid_opc;
1481 case 0x07:
1482 /* OPC07 */
1483 goto invalid_opc;
1485 case 0x09:
1486 /* LDAH */
1487 disp16 = (uint32_t)disp16 << 16;
1488 /* fall through */
1489 case 0x08:
1490 /* LDA */
1491 va = dest_gpr(ctx, ra);
1492 /* It's worth special-casing immediate loads. */
1493 if (rb == 31) {
1494 tcg_gen_movi_i64(va, disp16);
1495 } else {
1496 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1498 break;
1500 case 0x0A:
1501 /* LDBU */
1502 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1503 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1504 break;
1505 case 0x0B:
1506 /* LDQ_U */
1507 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1508 break;
1509 case 0x0C:
1510 /* LDWU */
1511 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1512 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1513 break;
1514 case 0x0D:
1515 /* STW */
1516 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1517 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1518 break;
1519 case 0x0E:
1520 /* STB */
1521 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1522 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1523 break;
1524 case 0x0F:
1525 /* STQ_U */
1526 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1527 break;
1529 case 0x10:
1530 vc = dest_gpr(ctx, rc);
1531 vb = load_gpr_lit(ctx, rb, lit, islit);
1533 if (ra == 31) {
1534 if (fn7 == 0x00) {
1535 /* Special case ADDL as SEXTL. */
1536 tcg_gen_ext32s_i64(vc, vb);
1537 break;
1539 if (fn7 == 0x29) {
1540 /* Special case SUBQ as NEGQ. */
1541 tcg_gen_neg_i64(vc, vb);
1542 break;
1546 va = load_gpr(ctx, ra);
1547 switch (fn7) {
1548 case 0x00:
1549 /* ADDL */
1550 tcg_gen_add_i64(vc, va, vb);
1551 tcg_gen_ext32s_i64(vc, vc);
1552 break;
1553 case 0x02:
1554 /* S4ADDL */
1555 tmp = tcg_temp_new();
1556 tcg_gen_shli_i64(tmp, va, 2);
1557 tcg_gen_add_i64(tmp, tmp, vb);
1558 tcg_gen_ext32s_i64(vc, tmp);
1559 tcg_temp_free(tmp);
1560 break;
1561 case 0x09:
1562 /* SUBL */
1563 tcg_gen_sub_i64(vc, va, vb);
1564 tcg_gen_ext32s_i64(vc, vc);
1565 break;
1566 case 0x0B:
1567 /* S4SUBL */
1568 tmp = tcg_temp_new();
1569 tcg_gen_shli_i64(tmp, va, 2);
1570 tcg_gen_sub_i64(tmp, tmp, vb);
1571 tcg_gen_ext32s_i64(vc, tmp);
1572 tcg_temp_free(tmp);
1573 break;
1574 case 0x0F:
1575 /* CMPBGE */
1576 if (ra == 31) {
1577 /* Special case 0 >= X as X == 0. */
1578 gen_helper_cmpbe0(vc, vb);
1579 } else {
1580 gen_helper_cmpbge(vc, va, vb);
1582 break;
1583 case 0x12:
1584 /* S8ADDL */
1585 tmp = tcg_temp_new();
1586 tcg_gen_shli_i64(tmp, va, 3);
1587 tcg_gen_add_i64(tmp, tmp, vb);
1588 tcg_gen_ext32s_i64(vc, tmp);
1589 tcg_temp_free(tmp);
1590 break;
1591 case 0x1B:
1592 /* S8SUBL */
1593 tmp = tcg_temp_new();
1594 tcg_gen_shli_i64(tmp, va, 3);
1595 tcg_gen_sub_i64(tmp, tmp, vb);
1596 tcg_gen_ext32s_i64(vc, tmp);
1597 tcg_temp_free(tmp);
1598 break;
1599 case 0x1D:
1600 /* CMPULT */
1601 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1602 break;
1603 case 0x20:
1604 /* ADDQ */
1605 tcg_gen_add_i64(vc, va, vb);
1606 break;
1607 case 0x22:
1608 /* S4ADDQ */
1609 tmp = tcg_temp_new();
1610 tcg_gen_shli_i64(tmp, va, 2);
1611 tcg_gen_add_i64(vc, tmp, vb);
1612 tcg_temp_free(tmp);
1613 break;
1614 case 0x29:
1615 /* SUBQ */
1616 tcg_gen_sub_i64(vc, va, vb);
1617 break;
1618 case 0x2B:
1619 /* S4SUBQ */
1620 tmp = tcg_temp_new();
1621 tcg_gen_shli_i64(tmp, va, 2);
1622 tcg_gen_sub_i64(vc, tmp, vb);
1623 tcg_temp_free(tmp);
1624 break;
1625 case 0x2D:
1626 /* CMPEQ */
1627 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1628 break;
1629 case 0x32:
1630 /* S8ADDQ */
1631 tmp = tcg_temp_new();
1632 tcg_gen_shli_i64(tmp, va, 3);
1633 tcg_gen_add_i64(vc, tmp, vb);
1634 tcg_temp_free(tmp);
1635 break;
1636 case 0x3B:
1637 /* S8SUBQ */
1638 tmp = tcg_temp_new();
1639 tcg_gen_shli_i64(tmp, va, 3);
1640 tcg_gen_sub_i64(vc, tmp, vb);
1641 tcg_temp_free(tmp);
1642 break;
1643 case 0x3D:
1644 /* CMPULE */
1645 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1646 break;
1647 case 0x40:
1648 /* ADDL/V */
1649 tmp = tcg_temp_new();
1650 tcg_gen_ext32s_i64(tmp, va);
1651 tcg_gen_ext32s_i64(vc, vb);
1652 tcg_gen_add_i64(tmp, tmp, vc);
1653 tcg_gen_ext32s_i64(vc, tmp);
1654 gen_helper_check_overflow(cpu_env, vc, tmp);
1655 tcg_temp_free(tmp);
1656 break;
1657 case 0x49:
1658 /* SUBL/V */
1659 tmp = tcg_temp_new();
1660 tcg_gen_ext32s_i64(tmp, va);
1661 tcg_gen_ext32s_i64(vc, vb);
1662 tcg_gen_sub_i64(tmp, tmp, vc);
1663 tcg_gen_ext32s_i64(vc, tmp);
1664 gen_helper_check_overflow(cpu_env, vc, tmp);
1665 tcg_temp_free(tmp);
1666 break;
1667 case 0x4D:
1668 /* CMPLT */
1669 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1670 break;
1671 case 0x60:
1672 /* ADDQ/V */
1673 tmp = tcg_temp_new();
1674 tmp2 = tcg_temp_new();
1675 tcg_gen_eqv_i64(tmp, va, vb);
1676 tcg_gen_mov_i64(tmp2, va);
1677 tcg_gen_add_i64(vc, va, vb);
1678 tcg_gen_xor_i64(tmp2, tmp2, vc);
1679 tcg_gen_and_i64(tmp, tmp, tmp2);
1680 tcg_gen_shri_i64(tmp, tmp, 63);
1681 tcg_gen_movi_i64(tmp2, 0);
1682 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1683 tcg_temp_free(tmp);
1684 tcg_temp_free(tmp2);
1685 break;
1686 case 0x69:
1687 /* SUBQ/V */
1688 tmp = tcg_temp_new();
1689 tmp2 = tcg_temp_new();
1690 tcg_gen_xor_i64(tmp, va, vb);
1691 tcg_gen_mov_i64(tmp2, va);
1692 tcg_gen_sub_i64(vc, va, vb);
1693 tcg_gen_xor_i64(tmp2, tmp2, vc);
1694 tcg_gen_and_i64(tmp, tmp, tmp2);
1695 tcg_gen_shri_i64(tmp, tmp, 63);
1696 tcg_gen_movi_i64(tmp2, 0);
1697 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1698 tcg_temp_free(tmp);
1699 tcg_temp_free(tmp2);
1700 break;
1701 case 0x6D:
1702 /* CMPLE */
1703 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1704 break;
1705 default:
1706 goto invalid_opc;
1708 break;
1710 case 0x11:
1711 if (fn7 == 0x20) {
1712 if (rc == 31) {
1713 /* Special case BIS as NOP. */
1714 break;
1716 if (ra == 31) {
1717 /* Special case BIS as MOV. */
1718 vc = dest_gpr(ctx, rc);
1719 if (islit) {
1720 tcg_gen_movi_i64(vc, lit);
1721 } else {
1722 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1724 break;
1728 vc = dest_gpr(ctx, rc);
1729 vb = load_gpr_lit(ctx, rb, lit, islit);
1731 if (fn7 == 0x28 && ra == 31) {
1732 /* Special case ORNOT as NOT. */
1733 tcg_gen_not_i64(vc, vb);
1734 break;
1737 va = load_gpr(ctx, ra);
1738 switch (fn7) {
1739 case 0x00:
1740 /* AND */
1741 tcg_gen_and_i64(vc, va, vb);
1742 break;
1743 case 0x08:
1744 /* BIC */
1745 tcg_gen_andc_i64(vc, va, vb);
1746 break;
1747 case 0x14:
1748 /* CMOVLBS */
1749 tmp = tcg_temp_new();
1750 tcg_gen_andi_i64(tmp, va, 1);
1751 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1752 vb, load_gpr(ctx, rc));
1753 tcg_temp_free(tmp);
1754 break;
1755 case 0x16:
1756 /* CMOVLBC */
1757 tmp = tcg_temp_new();
1758 tcg_gen_andi_i64(tmp, va, 1);
1759 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1760 vb, load_gpr(ctx, rc));
1761 tcg_temp_free(tmp);
1762 break;
1763 case 0x20:
1764 /* BIS */
1765 tcg_gen_or_i64(vc, va, vb);
1766 break;
1767 case 0x24:
1768 /* CMOVEQ */
1769 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1770 vb, load_gpr(ctx, rc));
1771 break;
1772 case 0x26:
1773 /* CMOVNE */
1774 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1775 vb, load_gpr(ctx, rc));
1776 break;
1777 case 0x28:
1778 /* ORNOT */
1779 tcg_gen_orc_i64(vc, va, vb);
1780 break;
1781 case 0x40:
1782 /* XOR */
1783 tcg_gen_xor_i64(vc, va, vb);
1784 break;
1785 case 0x44:
1786 /* CMOVLT */
1787 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1788 vb, load_gpr(ctx, rc));
1789 break;
1790 case 0x46:
1791 /* CMOVGE */
1792 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1793 vb, load_gpr(ctx, rc));
1794 break;
1795 case 0x48:
1796 /* EQV */
1797 tcg_gen_eqv_i64(vc, va, vb);
1798 break;
1799 case 0x61:
1800 /* AMASK */
1801 REQUIRE_REG_31(ra);
1803 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1804 tcg_gen_andi_i64(vc, vb, ~amask);
1806 break;
1807 case 0x64:
1808 /* CMOVLE */
1809 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1810 vb, load_gpr(ctx, rc));
1811 break;
1812 case 0x66:
1813 /* CMOVGT */
1814 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1815 vb, load_gpr(ctx, rc));
1816 break;
1817 case 0x6C:
1818 /* IMPLVER */
1819 REQUIRE_REG_31(ra);
1820 tcg_gen_movi_i64(vc, ctx->implver);
1821 break;
1822 default:
1823 goto invalid_opc;
1825 break;
1827 case 0x12:
1828 vc = dest_gpr(ctx, rc);
1829 va = load_gpr(ctx, ra);
1830 switch (fn7) {
1831 case 0x02:
1832 /* MSKBL */
1833 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1834 break;
1835 case 0x06:
1836 /* EXTBL */
1837 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1838 break;
1839 case 0x0B:
1840 /* INSBL */
1841 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1842 break;
1843 case 0x12:
1844 /* MSKWL */
1845 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1846 break;
1847 case 0x16:
1848 /* EXTWL */
1849 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1850 break;
1851 case 0x1B:
1852 /* INSWL */
1853 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1854 break;
1855 case 0x22:
1856 /* MSKLL */
1857 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1858 break;
1859 case 0x26:
1860 /* EXTLL */
1861 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1862 break;
1863 case 0x2B:
1864 /* INSLL */
1865 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1866 break;
1867 case 0x30:
1868 /* ZAP */
1869 if (islit) {
1870 gen_zapnoti(vc, va, ~lit);
1871 } else {
1872 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1874 break;
1875 case 0x31:
1876 /* ZAPNOT */
1877 if (islit) {
1878 gen_zapnoti(vc, va, lit);
1879 } else {
1880 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1882 break;
1883 case 0x32:
1884 /* MSKQL */
1885 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1886 break;
1887 case 0x34:
1888 /* SRL */
1889 if (islit) {
1890 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1891 } else {
1892 tmp = tcg_temp_new();
1893 vb = load_gpr(ctx, rb);
1894 tcg_gen_andi_i64(tmp, vb, 0x3f);
1895 tcg_gen_shr_i64(vc, va, tmp);
1896 tcg_temp_free(tmp);
1898 break;
1899 case 0x36:
1900 /* EXTQL */
1901 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1902 break;
1903 case 0x39:
1904 /* SLL */
1905 if (islit) {
1906 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1907 } else {
1908 tmp = tcg_temp_new();
1909 vb = load_gpr(ctx, rb);
1910 tcg_gen_andi_i64(tmp, vb, 0x3f);
1911 tcg_gen_shl_i64(vc, va, tmp);
1912 tcg_temp_free(tmp);
1914 break;
1915 case 0x3B:
1916 /* INSQL */
1917 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1918 break;
1919 case 0x3C:
1920 /* SRA */
1921 if (islit) {
1922 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1923 } else {
1924 tmp = tcg_temp_new();
1925 vb = load_gpr(ctx, rb);
1926 tcg_gen_andi_i64(tmp, vb, 0x3f);
1927 tcg_gen_sar_i64(vc, va, tmp);
1928 tcg_temp_free(tmp);
1930 break;
1931 case 0x52:
1932 /* MSKWH */
1933 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1934 break;
1935 case 0x57:
1936 /* INSWH */
1937 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1938 break;
1939 case 0x5A:
1940 /* EXTWH */
1941 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1942 break;
1943 case 0x62:
1944 /* MSKLH */
1945 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1946 break;
1947 case 0x67:
1948 /* INSLH */
1949 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1950 break;
1951 case 0x6A:
1952 /* EXTLH */
1953 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1954 break;
1955 case 0x72:
1956 /* MSKQH */
1957 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1958 break;
1959 case 0x77:
1960 /* INSQH */
1961 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1962 break;
1963 case 0x7A:
1964 /* EXTQH */
1965 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1966 break;
1967 default:
1968 goto invalid_opc;
1970 break;
1972 case 0x13:
1973 vc = dest_gpr(ctx, rc);
1974 vb = load_gpr_lit(ctx, rb, lit, islit);
1975 va = load_gpr(ctx, ra);
1976 switch (fn7) {
1977 case 0x00:
1978 /* MULL */
1979 tcg_gen_mul_i64(vc, va, vb);
1980 tcg_gen_ext32s_i64(vc, vc);
1981 break;
1982 case 0x20:
1983 /* MULQ */
1984 tcg_gen_mul_i64(vc, va, vb);
1985 break;
1986 case 0x30:
1987 /* UMULH */
1988 tmp = tcg_temp_new();
1989 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1990 tcg_temp_free(tmp);
1991 break;
1992 case 0x40:
1993 /* MULL/V */
1994 tmp = tcg_temp_new();
1995 tcg_gen_ext32s_i64(tmp, va);
1996 tcg_gen_ext32s_i64(vc, vb);
1997 tcg_gen_mul_i64(tmp, tmp, vc);
1998 tcg_gen_ext32s_i64(vc, tmp);
1999 gen_helper_check_overflow(cpu_env, vc, tmp);
2000 tcg_temp_free(tmp);
2001 break;
2002 case 0x60:
2003 /* MULQ/V */
2004 tmp = tcg_temp_new();
2005 tmp2 = tcg_temp_new();
2006 tcg_gen_muls2_i64(vc, tmp, va, vb);
2007 tcg_gen_sari_i64(tmp2, vc, 63);
2008 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2009 tcg_temp_free(tmp);
2010 tcg_temp_free(tmp2);
2011 break;
2012 default:
2013 goto invalid_opc;
2015 break;
2017 case 0x14:
2018 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2019 vc = dest_fpr(ctx, rc);
2020 switch (fpfn) { /* fn11 & 0x3F */
2021 case 0x04:
2022 /* ITOFS */
2023 REQUIRE_REG_31(rb);
2024 t32 = tcg_temp_new_i32();
2025 va = load_gpr(ctx, ra);
2026 tcg_gen_extrl_i64_i32(t32, va);
2027 gen_helper_memory_to_s(vc, t32);
2028 tcg_temp_free_i32(t32);
2029 break;
2030 case 0x0A:
2031 /* SQRTF */
2032 REQUIRE_REG_31(ra);
2033 vb = load_fpr(ctx, rb);
2034 gen_helper_sqrtf(vc, cpu_env, vb);
2035 break;
2036 case 0x0B:
2037 /* SQRTS */
2038 REQUIRE_REG_31(ra);
2039 gen_sqrts(ctx, rb, rc, fn11);
2040 break;
2041 case 0x14:
2042 /* ITOFF */
2043 REQUIRE_REG_31(rb);
2044 t32 = tcg_temp_new_i32();
2045 va = load_gpr(ctx, ra);
2046 tcg_gen_extrl_i64_i32(t32, va);
2047 gen_helper_memory_to_f(vc, t32);
2048 tcg_temp_free_i32(t32);
2049 break;
2050 case 0x24:
2051 /* ITOFT */
2052 REQUIRE_REG_31(rb);
2053 va = load_gpr(ctx, ra);
2054 tcg_gen_mov_i64(vc, va);
2055 break;
2056 case 0x2A:
2057 /* SQRTG */
2058 REQUIRE_REG_31(ra);
2059 vb = load_fpr(ctx, rb);
2060 gen_helper_sqrtg(vc, cpu_env, vb);
2061 break;
2062 case 0x02B:
2063 /* SQRTT */
2064 REQUIRE_REG_31(ra);
2065 gen_sqrtt(ctx, rb, rc, fn11);
2066 break;
2067 default:
2068 goto invalid_opc;
2070 break;
2072 case 0x15:
2073 /* VAX floating point */
2074 /* XXX: rounding mode and trap are ignored (!) */
2075 vc = dest_fpr(ctx, rc);
2076 vb = load_fpr(ctx, rb);
2077 va = load_fpr(ctx, ra);
2078 switch (fpfn) { /* fn11 & 0x3F */
2079 case 0x00:
2080 /* ADDF */
2081 gen_helper_addf(vc, cpu_env, va, vb);
2082 break;
2083 case 0x01:
2084 /* SUBF */
2085 gen_helper_subf(vc, cpu_env, va, vb);
2086 break;
2087 case 0x02:
2088 /* MULF */
2089 gen_helper_mulf(vc, cpu_env, va, vb);
2090 break;
2091 case 0x03:
2092 /* DIVF */
2093 gen_helper_divf(vc, cpu_env, va, vb);
2094 break;
2095 case 0x1E:
2096 /* CVTDG -- TODO */
2097 REQUIRE_REG_31(ra);
2098 goto invalid_opc;
2099 case 0x20:
2100 /* ADDG */
2101 gen_helper_addg(vc, cpu_env, va, vb);
2102 break;
2103 case 0x21:
2104 /* SUBG */
2105 gen_helper_subg(vc, cpu_env, va, vb);
2106 break;
2107 case 0x22:
2108 /* MULG */
2109 gen_helper_mulg(vc, cpu_env, va, vb);
2110 break;
2111 case 0x23:
2112 /* DIVG */
2113 gen_helper_divg(vc, cpu_env, va, vb);
2114 break;
2115 case 0x25:
2116 /* CMPGEQ */
2117 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2118 break;
2119 case 0x26:
2120 /* CMPGLT */
2121 gen_helper_cmpglt(vc, cpu_env, va, vb);
2122 break;
2123 case 0x27:
2124 /* CMPGLE */
2125 gen_helper_cmpgle(vc, cpu_env, va, vb);
2126 break;
2127 case 0x2C:
2128 /* CVTGF */
2129 REQUIRE_REG_31(ra);
2130 gen_helper_cvtgf(vc, cpu_env, vb);
2131 break;
2132 case 0x2D:
2133 /* CVTGD -- TODO */
2134 REQUIRE_REG_31(ra);
2135 goto invalid_opc;
2136 case 0x2F:
2137 /* CVTGQ */
2138 REQUIRE_REG_31(ra);
2139 gen_helper_cvtgq(vc, cpu_env, vb);
2140 break;
2141 case 0x3C:
2142 /* CVTQF */
2143 REQUIRE_REG_31(ra);
2144 gen_helper_cvtqf(vc, cpu_env, vb);
2145 break;
2146 case 0x3E:
2147 /* CVTQG */
2148 REQUIRE_REG_31(ra);
2149 gen_helper_cvtqg(vc, cpu_env, vb);
2150 break;
2151 default:
2152 goto invalid_opc;
2154 break;
2156 case 0x16:
2157 /* IEEE floating-point */
2158 switch (fpfn) { /* fn11 & 0x3F */
2159 case 0x00:
2160 /* ADDS */
2161 gen_adds(ctx, ra, rb, rc, fn11);
2162 break;
2163 case 0x01:
2164 /* SUBS */
2165 gen_subs(ctx, ra, rb, rc, fn11);
2166 break;
2167 case 0x02:
2168 /* MULS */
2169 gen_muls(ctx, ra, rb, rc, fn11);
2170 break;
2171 case 0x03:
2172 /* DIVS */
2173 gen_divs(ctx, ra, rb, rc, fn11);
2174 break;
2175 case 0x20:
2176 /* ADDT */
2177 gen_addt(ctx, ra, rb, rc, fn11);
2178 break;
2179 case 0x21:
2180 /* SUBT */
2181 gen_subt(ctx, ra, rb, rc, fn11);
2182 break;
2183 case 0x22:
2184 /* MULT */
2185 gen_mult(ctx, ra, rb, rc, fn11);
2186 break;
2187 case 0x23:
2188 /* DIVT */
2189 gen_divt(ctx, ra, rb, rc, fn11);
2190 break;
2191 case 0x24:
2192 /* CMPTUN */
2193 gen_cmptun(ctx, ra, rb, rc, fn11);
2194 break;
2195 case 0x25:
2196 /* CMPTEQ */
2197 gen_cmpteq(ctx, ra, rb, rc, fn11);
2198 break;
2199 case 0x26:
2200 /* CMPTLT */
2201 gen_cmptlt(ctx, ra, rb, rc, fn11);
2202 break;
2203 case 0x27:
2204 /* CMPTLE */
2205 gen_cmptle(ctx, ra, rb, rc, fn11);
2206 break;
2207 case 0x2C:
2208 REQUIRE_REG_31(ra);
2209 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2210 /* CVTST */
2211 gen_cvtst(ctx, rb, rc, fn11);
2212 } else {
2213 /* CVTTS */
2214 gen_cvtts(ctx, rb, rc, fn11);
2216 break;
2217 case 0x2F:
2218 /* CVTTQ */
2219 REQUIRE_REG_31(ra);
2220 gen_cvttq(ctx, rb, rc, fn11);
2221 break;
2222 case 0x3C:
2223 /* CVTQS */
2224 REQUIRE_REG_31(ra);
2225 gen_cvtqs(ctx, rb, rc, fn11);
2226 break;
2227 case 0x3E:
2228 /* CVTQT */
2229 REQUIRE_REG_31(ra);
2230 gen_cvtqt(ctx, rb, rc, fn11);
2231 break;
2232 default:
2233 goto invalid_opc;
2235 break;
2237 case 0x17:
2238 switch (fn11) {
2239 case 0x010:
2240 /* CVTLQ */
2241 REQUIRE_REG_31(ra);
2242 vc = dest_fpr(ctx, rc);
2243 vb = load_fpr(ctx, rb);
2244 gen_cvtlq(vc, vb);
2245 break;
2246 case 0x020:
2247 /* CPYS */
2248 if (rc == 31) {
2249 /* Special case CPYS as FNOP. */
2250 } else {
2251 vc = dest_fpr(ctx, rc);
2252 va = load_fpr(ctx, ra);
2253 if (ra == rb) {
2254 /* Special case CPYS as FMOV. */
2255 tcg_gen_mov_i64(vc, va);
2256 } else {
2257 vb = load_fpr(ctx, rb);
2258 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2261 break;
2262 case 0x021:
2263 /* CPYSN */
2264 vc = dest_fpr(ctx, rc);
2265 vb = load_fpr(ctx, rb);
2266 va = load_fpr(ctx, ra);
2267 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2268 break;
2269 case 0x022:
2270 /* CPYSE */
2271 vc = dest_fpr(ctx, rc);
2272 vb = load_fpr(ctx, rb);
2273 va = load_fpr(ctx, ra);
2274 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2275 break;
2276 case 0x024:
2277 /* MT_FPCR */
2278 va = load_fpr(ctx, ra);
2279 gen_helper_store_fpcr(cpu_env, va);
2280 if (ctx->tb_rm == QUAL_RM_D) {
2281 /* Re-do the copy of the rounding mode to fp_status
2282 the next time we use dynamic rounding. */
2283 ctx->tb_rm = -1;
2285 break;
2286 case 0x025:
2287 /* MF_FPCR */
2288 va = dest_fpr(ctx, ra);
2289 gen_helper_load_fpcr(va, cpu_env);
2290 break;
2291 case 0x02A:
2292 /* FCMOVEQ */
2293 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2294 break;
2295 case 0x02B:
2296 /* FCMOVNE */
2297 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2298 break;
2299 case 0x02C:
2300 /* FCMOVLT */
2301 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2302 break;
2303 case 0x02D:
2304 /* FCMOVGE */
2305 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2306 break;
2307 case 0x02E:
2308 /* FCMOVLE */
2309 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2310 break;
2311 case 0x02F:
2312 /* FCMOVGT */
2313 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2314 break;
2315 case 0x030: /* CVTQL */
2316 case 0x130: /* CVTQL/V */
2317 case 0x530: /* CVTQL/SV */
2318 REQUIRE_REG_31(ra);
2319 vc = dest_fpr(ctx, rc);
2320 vb = load_fpr(ctx, rb);
2321 gen_helper_cvtql(vc, cpu_env, vb);
2322 gen_fp_exc_raise(rc, fn11);
2323 break;
2324 default:
2325 goto invalid_opc;
2327 break;
2329 case 0x18:
2330 switch ((uint16_t)disp16) {
2331 case 0x0000:
2332 /* TRAPB */
2333 /* No-op. */
2334 break;
2335 case 0x0400:
2336 /* EXCB */
2337 /* No-op. */
2338 break;
2339 case 0x4000:
2340 /* MB */
2341 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2342 break;
2343 case 0x4400:
2344 /* WMB */
2345 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2346 break;
2347 case 0x8000:
2348 /* FETCH */
2349 /* No-op */
2350 break;
2351 case 0xA000:
2352 /* FETCH_M */
2353 /* No-op */
2354 break;
2355 case 0xC000:
2356 /* RPCC */
2357 va = dest_gpr(ctx, ra);
2358 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2359 gen_io_start();
2360 gen_helper_load_pcc(va, cpu_env);
2361 gen_io_end();
2362 ret = EXIT_PC_STALE;
2363 } else {
2364 gen_helper_load_pcc(va, cpu_env);
2366 break;
2367 case 0xE000:
2368 /* RC */
2369 gen_rx(ctx, ra, 0);
2370 break;
2371 case 0xE800:
2372 /* ECB */
2373 break;
2374 case 0xF000:
2375 /* RS */
2376 gen_rx(ctx, ra, 1);
2377 break;
2378 case 0xF800:
2379 /* WH64 */
2380 /* No-op */
2381 break;
2382 case 0xFC00:
2383 /* WH64EN */
2384 /* No-op */
2385 break;
2386 default:
2387 goto invalid_opc;
2389 break;
2391 case 0x19:
2392 /* HW_MFPR (PALcode) */
2393 #ifndef CONFIG_USER_ONLY
2394 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2395 va = dest_gpr(ctx, ra);
2396 ret = gen_mfpr(ctx, va, insn & 0xffff);
2397 break;
2398 #else
2399 goto invalid_opc;
2400 #endif
2402 case 0x1A:
2403 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2404 prediction stack action, which of course we don't implement. */
2405 vb = load_gpr(ctx, rb);
2406 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2407 if (ra != 31) {
2408 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
2410 ret = EXIT_PC_UPDATED;
2411 break;
2413 case 0x1B:
2414 /* HW_LD (PALcode) */
2415 #ifndef CONFIG_USER_ONLY
2416 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2418 TCGv addr = tcg_temp_new();
2419 vb = load_gpr(ctx, rb);
2420 va = dest_gpr(ctx, ra);
2422 tcg_gen_addi_i64(addr, vb, disp12);
2423 switch ((insn >> 12) & 0xF) {
2424 case 0x0:
2425 /* Longword physical access (hw_ldl/p) */
2426 gen_helper_ldl_phys(va, cpu_env, addr);
2427 break;
2428 case 0x1:
2429 /* Quadword physical access (hw_ldq/p) */
2430 gen_helper_ldq_phys(va, cpu_env, addr);
2431 break;
2432 case 0x2:
2433 /* Longword physical access with lock (hw_ldl_l/p) */
2434 gen_helper_ldl_l_phys(va, cpu_env, addr);
2435 break;
2436 case 0x3:
2437 /* Quadword physical access with lock (hw_ldq_l/p) */
2438 gen_helper_ldq_l_phys(va, cpu_env, addr);
2439 break;
2440 case 0x4:
2441 /* Longword virtual PTE fetch (hw_ldl/v) */
2442 goto invalid_opc;
2443 case 0x5:
2444 /* Quadword virtual PTE fetch (hw_ldq/v) */
2445 goto invalid_opc;
2446 break;
2447 case 0x6:
2448 /* Invalid */
2449 goto invalid_opc;
2450 case 0x7:
2451 /* Invaliid */
2452 goto invalid_opc;
2453 case 0x8:
2454 /* Longword virtual access (hw_ldl) */
2455 goto invalid_opc;
2456 case 0x9:
2457 /* Quadword virtual access (hw_ldq) */
2458 goto invalid_opc;
2459 case 0xA:
2460 /* Longword virtual access with protection check (hw_ldl/w) */
2461 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2462 break;
2463 case 0xB:
2464 /* Quadword virtual access with protection check (hw_ldq/w) */
2465 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2466 break;
2467 case 0xC:
2468 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2469 goto invalid_opc;
2470 case 0xD:
2471 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2472 goto invalid_opc;
2473 case 0xE:
2474 /* Longword virtual access with alternate access mode and
2475 protection checks (hw_ldl/wa) */
2476 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2477 break;
2478 case 0xF:
2479 /* Quadword virtual access with alternate access mode and
2480 protection checks (hw_ldq/wa) */
2481 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2482 break;
2484 tcg_temp_free(addr);
2485 break;
2487 #else
2488 goto invalid_opc;
2489 #endif
2491 case 0x1C:
2492 vc = dest_gpr(ctx, rc);
2493 if (fn7 == 0x70) {
2494 /* FTOIT */
2495 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2496 REQUIRE_REG_31(rb);
2497 va = load_fpr(ctx, ra);
2498 tcg_gen_mov_i64(vc, va);
2499 break;
2500 } else if (fn7 == 0x78) {
2501 /* FTOIS */
2502 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2503 REQUIRE_REG_31(rb);
2504 t32 = tcg_temp_new_i32();
2505 va = load_fpr(ctx, ra);
2506 gen_helper_s_to_memory(t32, va);
2507 tcg_gen_ext_i32_i64(vc, t32);
2508 tcg_temp_free_i32(t32);
2509 break;
2512 vb = load_gpr_lit(ctx, rb, lit, islit);
2513 switch (fn7) {
2514 case 0x00:
2515 /* SEXTB */
2516 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2517 REQUIRE_REG_31(ra);
2518 tcg_gen_ext8s_i64(vc, vb);
2519 break;
2520 case 0x01:
2521 /* SEXTW */
2522 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2523 REQUIRE_REG_31(ra);
2524 tcg_gen_ext16s_i64(vc, vb);
2525 break;
2526 case 0x30:
2527 /* CTPOP */
2528 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2529 REQUIRE_REG_31(ra);
2530 REQUIRE_NO_LIT;
2531 gen_helper_ctpop(vc, vb);
2532 break;
2533 case 0x31:
2534 /* PERR */
2535 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2536 REQUIRE_NO_LIT;
2537 va = load_gpr(ctx, ra);
2538 gen_helper_perr(vc, va, vb);
2539 break;
2540 case 0x32:
2541 /* CTLZ */
2542 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2543 REQUIRE_REG_31(ra);
2544 REQUIRE_NO_LIT;
2545 gen_helper_ctlz(vc, vb);
2546 break;
2547 case 0x33:
2548 /* CTTZ */
2549 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2550 REQUIRE_REG_31(ra);
2551 REQUIRE_NO_LIT;
2552 gen_helper_cttz(vc, vb);
2553 break;
2554 case 0x34:
2555 /* UNPKBW */
2556 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2557 REQUIRE_REG_31(ra);
2558 REQUIRE_NO_LIT;
2559 gen_helper_unpkbw(vc, vb);
2560 break;
2561 case 0x35:
2562 /* UNPKBL */
2563 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2564 REQUIRE_REG_31(ra);
2565 REQUIRE_NO_LIT;
2566 gen_helper_unpkbl(vc, vb);
2567 break;
2568 case 0x36:
2569 /* PKWB */
2570 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2571 REQUIRE_REG_31(ra);
2572 REQUIRE_NO_LIT;
2573 gen_helper_pkwb(vc, vb);
2574 break;
2575 case 0x37:
2576 /* PKLB */
2577 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2578 REQUIRE_REG_31(ra);
2579 REQUIRE_NO_LIT;
2580 gen_helper_pklb(vc, vb);
2581 break;
2582 case 0x38:
2583 /* MINSB8 */
2584 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2585 va = load_gpr(ctx, ra);
2586 gen_helper_minsb8(vc, va, vb);
2587 break;
2588 case 0x39:
2589 /* MINSW4 */
2590 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2591 va = load_gpr(ctx, ra);
2592 gen_helper_minsw4(vc, va, vb);
2593 break;
2594 case 0x3A:
2595 /* MINUB8 */
2596 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2597 va = load_gpr(ctx, ra);
2598 gen_helper_minub8(vc, va, vb);
2599 break;
2600 case 0x3B:
2601 /* MINUW4 */
2602 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2603 va = load_gpr(ctx, ra);
2604 gen_helper_minuw4(vc, va, vb);
2605 break;
2606 case 0x3C:
2607 /* MAXUB8 */
2608 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2609 va = load_gpr(ctx, ra);
2610 gen_helper_maxub8(vc, va, vb);
2611 break;
2612 case 0x3D:
2613 /* MAXUW4 */
2614 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2615 va = load_gpr(ctx, ra);
2616 gen_helper_maxuw4(vc, va, vb);
2617 break;
2618 case 0x3E:
2619 /* MAXSB8 */
2620 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2621 va = load_gpr(ctx, ra);
2622 gen_helper_maxsb8(vc, va, vb);
2623 break;
2624 case 0x3F:
2625 /* MAXSW4 */
2626 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2627 va = load_gpr(ctx, ra);
2628 gen_helper_maxsw4(vc, va, vb);
2629 break;
2630 default:
2631 goto invalid_opc;
2633 break;
2635 case 0x1D:
2636 /* HW_MTPR (PALcode) */
2637 #ifndef CONFIG_USER_ONLY
2638 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2639 vb = load_gpr(ctx, rb);
2640 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2641 break;
2642 #else
2643 goto invalid_opc;
2644 #endif
2646 case 0x1E:
2647 /* HW_RET (PALcode) */
2648 #ifndef CONFIG_USER_ONLY
2649 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2650 if (rb == 31) {
2651 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2652 address from EXC_ADDR. This turns out to be useful for our
2653 emulation PALcode, so continue to accept it. */
2654 ctx->lit = vb = tcg_temp_new();
2655 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2656 } else {
2657 vb = load_gpr(ctx, rb);
2659 tmp = tcg_temp_new();
2660 tcg_gen_movi_i64(tmp, 0);
2661 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
2662 tcg_gen_movi_i64(cpu_lock_addr, -1);
2663 tcg_gen_andi_i64(tmp, vb, 1);
2664 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
2665 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2666 ret = EXIT_PC_UPDATED;
2667 break;
2668 #else
2669 goto invalid_opc;
2670 #endif
2672 case 0x1F:
2673 /* HW_ST (PALcode) */
2674 #ifndef CONFIG_USER_ONLY
2675 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2677 TCGv addr = tcg_temp_new();
2678 va = load_gpr(ctx, ra);
2679 vb = load_gpr(ctx, rb);
2681 tcg_gen_addi_i64(addr, vb, disp12);
2682 switch ((insn >> 12) & 0xF) {
2683 case 0x0:
2684 /* Longword physical access */
2685 gen_helper_stl_phys(cpu_env, addr, va);
2686 break;
2687 case 0x1:
2688 /* Quadword physical access */
2689 gen_helper_stq_phys(cpu_env, addr, va);
2690 break;
2691 case 0x2:
2692 /* Longword physical access with lock */
2693 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2694 break;
2695 case 0x3:
2696 /* Quadword physical access with lock */
2697 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2698 break;
2699 case 0x4:
2700 /* Longword virtual access */
2701 goto invalid_opc;
2702 case 0x5:
2703 /* Quadword virtual access */
2704 goto invalid_opc;
2705 case 0x6:
2706 /* Invalid */
2707 goto invalid_opc;
2708 case 0x7:
2709 /* Invalid */
2710 goto invalid_opc;
2711 case 0x8:
2712 /* Invalid */
2713 goto invalid_opc;
2714 case 0x9:
2715 /* Invalid */
2716 goto invalid_opc;
2717 case 0xA:
2718 /* Invalid */
2719 goto invalid_opc;
2720 case 0xB:
2721 /* Invalid */
2722 goto invalid_opc;
2723 case 0xC:
2724 /* Longword virtual access with alternate access mode */
2725 goto invalid_opc;
2726 case 0xD:
2727 /* Quadword virtual access with alternate access mode */
2728 goto invalid_opc;
2729 case 0xE:
2730 /* Invalid */
2731 goto invalid_opc;
2732 case 0xF:
2733 /* Invalid */
2734 goto invalid_opc;
2736 tcg_temp_free(addr);
2737 break;
2739 #else
2740 goto invalid_opc;
2741 #endif
2742 case 0x20:
2743 /* LDF */
2744 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2745 break;
2746 case 0x21:
2747 /* LDG */
2748 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2749 break;
2750 case 0x22:
2751 /* LDS */
2752 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2753 break;
2754 case 0x23:
2755 /* LDT */
2756 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2757 break;
2758 case 0x24:
2759 /* STF */
2760 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2761 break;
2762 case 0x25:
2763 /* STG */
2764 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2765 break;
2766 case 0x26:
2767 /* STS */
2768 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2769 break;
2770 case 0x27:
2771 /* STT */
2772 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2773 break;
2774 case 0x28:
2775 /* LDL */
2776 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2777 break;
2778 case 0x29:
2779 /* LDQ */
2780 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2781 break;
2782 case 0x2A:
2783 /* LDL_L */
2784 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2785 break;
2786 case 0x2B:
2787 /* LDQ_L */
2788 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2789 break;
2790 case 0x2C:
2791 /* STL */
2792 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2793 break;
2794 case 0x2D:
2795 /* STQ */
2796 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2797 break;
2798 case 0x2E:
2799 /* STL_C */
2800 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
2801 break;
2802 case 0x2F:
2803 /* STQ_C */
2804 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
2805 break;
2806 case 0x30:
2807 /* BR */
2808 ret = gen_bdirect(ctx, ra, disp21);
2809 break;
2810 case 0x31: /* FBEQ */
2811 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2812 break;
2813 case 0x32: /* FBLT */
2814 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2815 break;
2816 case 0x33: /* FBLE */
2817 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2818 break;
2819 case 0x34:
2820 /* BSR */
2821 ret = gen_bdirect(ctx, ra, disp21);
2822 break;
2823 case 0x35: /* FBNE */
2824 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2825 break;
2826 case 0x36: /* FBGE */
2827 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2828 break;
2829 case 0x37: /* FBGT */
2830 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2831 break;
2832 case 0x38:
2833 /* BLBC */
2834 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2835 break;
2836 case 0x39:
2837 /* BEQ */
2838 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2839 break;
2840 case 0x3A:
2841 /* BLT */
2842 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2843 break;
2844 case 0x3B:
2845 /* BLE */
2846 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2847 break;
2848 case 0x3C:
2849 /* BLBS */
2850 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2851 break;
2852 case 0x3D:
2853 /* BNE */
2854 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2855 break;
2856 case 0x3E:
2857 /* BGE */
2858 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2859 break;
2860 case 0x3F:
2861 /* BGT */
2862 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2863 break;
2864 invalid_opc:
2865 ret = gen_invalid(ctx);
2866 break;
2869 return ret;
2872 void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
2874 AlphaCPU *cpu = alpha_env_get_cpu(env);
2875 CPUState *cs = CPU(cpu);
2876 DisasContext ctx, *ctxp = &ctx;
2877 target_ulong pc_start;
2878 target_ulong pc_mask;
2879 uint32_t insn;
2880 ExitStatus ret;
2881 int num_insns;
2882 int max_insns;
2884 pc_start = tb->pc;
2886 ctx.tb = tb;
2887 ctx.pc = pc_start;
2888 ctx.mem_idx = cpu_mmu_index(env, false);
2889 ctx.implver = env->implver;
2890 ctx.singlestep_enabled = cs->singlestep_enabled;
2892 #ifdef CONFIG_USER_ONLY
2893 ctx.ir = cpu_std_ir;
2894 #else
2895 ctx.palbr = env->palbr;
2896 ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2897 #endif
2899 /* ??? Every TB begins with unset rounding mode, to be initialized on
2900 the first fp insn of the TB. Alternately we could define a proper
2901 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2902 to reset the FP_STATUS to that default at the end of any TB that
2903 changes the default. We could even (gasp) dynamiclly figure out
2904 what default would be most efficient given the running program. */
2905 ctx.tb_rm = -1;
2906 /* Similarly for flush-to-zero. */
2907 ctx.tb_ftz = -1;
2909 num_insns = 0;
2910 max_insns = tb->cflags & CF_COUNT_MASK;
2911 if (max_insns == 0) {
2912 max_insns = CF_COUNT_MASK;
2914 if (max_insns > TCG_MAX_INSNS) {
2915 max_insns = TCG_MAX_INSNS;
2918 if (in_superpage(&ctx, pc_start)) {
2919 pc_mask = (1ULL << 41) - 1;
2920 } else {
2921 pc_mask = ~TARGET_PAGE_MASK;
2924 gen_tb_start(tb);
2925 do {
2926 tcg_gen_insn_start(ctx.pc);
2927 num_insns++;
2929 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2930 ret = gen_excp(&ctx, EXCP_DEBUG, 0);
2931 /* The address covered by the breakpoint must be included in
2932 [tb->pc, tb->pc + tb->size) in order to for it to be
2933 properly cleared -- thus we increment the PC here so that
2934 the logic setting tb->size below does the right thing. */
2935 ctx.pc += 4;
2936 break;
2938 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2939 gen_io_start();
2941 insn = cpu_ldl_code(env, ctx.pc);
2943 TCGV_UNUSED_I64(ctx.zero);
2944 TCGV_UNUSED_I64(ctx.sink);
2945 TCGV_UNUSED_I64(ctx.lit);
2947 ctx.pc += 4;
2948 ret = translate_one(ctxp, insn);
2950 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2951 tcg_gen_discard_i64(ctx.sink);
2952 tcg_temp_free(ctx.sink);
2954 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2955 tcg_temp_free(ctx.zero);
2957 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2958 tcg_temp_free(ctx.lit);
2961 /* If we reach a page boundary, are single stepping,
2962 or exhaust instruction count, stop generation. */
2963 if (ret == NO_EXIT
2964 && ((ctx.pc & pc_mask) == 0
2965 || tcg_op_buf_full()
2966 || num_insns >= max_insns
2967 || singlestep
2968 || ctx.singlestep_enabled)) {
2969 ret = EXIT_PC_STALE;
2971 } while (ret == NO_EXIT);
2973 if (tb->cflags & CF_LAST_IO) {
2974 gen_io_end();
2977 switch (ret) {
2978 case EXIT_GOTO_TB:
2979 case EXIT_NORETURN:
2980 break;
2981 case EXIT_PC_STALE:
2982 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2983 /* FALLTHRU */
2984 case EXIT_PC_UPDATED:
2985 if (ctx.singlestep_enabled) {
2986 gen_excp_1(EXCP_DEBUG, 0);
2987 } else {
2988 tcg_gen_exit_tb(0);
2990 break;
2991 default:
2992 abort();
2995 gen_tb_end(tb, num_insns);
2997 tb->size = ctx.pc - pc_start;
2998 tb->icount = num_insns;
3000 #ifdef DEBUG_DISAS
3001 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3002 && qemu_log_in_addr_range(pc_start)) {
3003 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3004 log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
3005 qemu_log("\n");
3007 #endif
3010 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3011 target_ulong *data)
3013 env->pc = data[0];