target/s390x: check alignment in CDSG in the !CONFIG_ATOMIC128 case
[qemu/ar7.git] / target / alpha / translate.c
blob7c45ae360c67f5958fca43d81a0012563a778851
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 # define LOG_DISAS(...) do { } while (0)
43 #endif
45 typedef struct DisasContext DisasContext;
46 struct DisasContext {
47 struct TranslationBlock *tb;
48 uint64_t pc;
49 #ifndef CONFIG_USER_ONLY
50 uint64_t palbr;
51 #endif
52 int mem_idx;
54 /* Current rounding mode for this TB. */
55 int tb_rm;
56 /* Current flush-to-zero setting for this TB. */
57 int tb_ftz;
59 /* implver value for this CPU. */
60 int implver;
62 /* The set of registers active in the current context. */
63 TCGv *ir;
65 /* Temporaries for $31 and $f31 as source and destination. */
66 TCGv zero;
67 TCGv sink;
68 /* Temporary for immediate constants. */
69 TCGv lit;
71 bool singlestep_enabled;
74 /* Return values from translate_one, indicating the state of the TB.
75 Note that zero indicates that we are not exiting the TB. */
77 typedef enum {
78 NO_EXIT,
80 /* We have emitted one or more goto_tb. No fixup required. */
81 EXIT_GOTO_TB,
83 /* We are not using a goto_tb (for whatever reason), but have updated
84 the PC (for whatever reason), so there's no need to do it again on
85 exiting the TB. */
86 EXIT_PC_UPDATED,
88 /* We are exiting the TB, but have neither emitted a goto_tb, nor
89 updated the PC for the next instruction to be executed. */
90 EXIT_PC_STALE,
92 /* We are exiting the TB due to page crossing or space constraints. */
93 EXIT_FALLTHRU,
95 /* We are ending the TB with a noreturn function call, e.g. longjmp.
96 No following code will be executed. */
97 EXIT_NORETURN,
98 } ExitStatus;
100 /* global register indexes */
101 static TCGv_env cpu_env;
102 static TCGv cpu_std_ir[31];
103 static TCGv cpu_fir[31];
104 static TCGv cpu_pc;
105 static TCGv cpu_lock_addr;
106 static TCGv cpu_lock_value;
108 #ifndef CONFIG_USER_ONLY
109 static TCGv cpu_pal_ir[31];
110 #endif
112 #include "exec/gen-icount.h"
114 void alpha_translate_init(void)
116 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
118 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
119 static const GlobalVar vars[] = {
120 DEF_VAR(pc),
121 DEF_VAR(lock_addr),
122 DEF_VAR(lock_value),
125 #undef DEF_VAR
127 /* Use the symbolic register names that match the disassembler. */
128 static const char greg_names[31][4] = {
129 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
130 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
131 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
132 "t10", "t11", "ra", "t12", "at", "gp", "sp"
134 static const char freg_names[31][4] = {
135 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
136 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
137 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
138 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
140 #ifndef CONFIG_USER_ONLY
141 static const char shadow_names[8][8] = {
142 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
143 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
145 #endif
147 static bool done_init = 0;
148 int i;
150 if (done_init) {
151 return;
153 done_init = 1;
155 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
156 tcg_ctx.tcg_env = cpu_env;
158 for (i = 0; i < 31; i++) {
159 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
160 offsetof(CPUAlphaState, ir[i]),
161 greg_names[i]);
164 for (i = 0; i < 31; i++) {
165 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
166 offsetof(CPUAlphaState, fir[i]),
167 freg_names[i]);
170 #ifndef CONFIG_USER_ONLY
171 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
172 for (i = 0; i < 8; i++) {
173 int r = (i == 7 ? 25 : i + 8);
174 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
175 offsetof(CPUAlphaState,
176 shadow[i]),
177 shadow_names[i]);
179 #endif
181 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
182 const GlobalVar *v = &vars[i];
183 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
187 static TCGv load_zero(DisasContext *ctx)
189 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
190 ctx->zero = tcg_const_i64(0);
192 return ctx->zero;
195 static TCGv dest_sink(DisasContext *ctx)
197 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
198 ctx->sink = tcg_temp_new();
200 return ctx->sink;
203 static void free_context_temps(DisasContext *ctx)
205 if (!TCGV_IS_UNUSED_I64(ctx->sink)) {
206 tcg_gen_discard_i64(ctx->sink);
207 tcg_temp_free(ctx->sink);
208 TCGV_UNUSED_I64(ctx->sink);
210 if (!TCGV_IS_UNUSED_I64(ctx->zero)) {
211 tcg_temp_free(ctx->zero);
212 TCGV_UNUSED_I64(ctx->zero);
214 if (!TCGV_IS_UNUSED_I64(ctx->lit)) {
215 tcg_temp_free(ctx->lit);
216 TCGV_UNUSED_I64(ctx->lit);
220 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
222 if (likely(reg < 31)) {
223 return ctx->ir[reg];
224 } else {
225 return load_zero(ctx);
229 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
230 uint8_t lit, bool islit)
232 if (islit) {
233 ctx->lit = tcg_const_i64(lit);
234 return ctx->lit;
235 } else if (likely(reg < 31)) {
236 return ctx->ir[reg];
237 } else {
238 return load_zero(ctx);
242 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
244 if (likely(reg < 31)) {
245 return ctx->ir[reg];
246 } else {
247 return dest_sink(ctx);
251 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
253 if (likely(reg < 31)) {
254 return cpu_fir[reg];
255 } else {
256 return load_zero(ctx);
260 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
262 if (likely(reg < 31)) {
263 return cpu_fir[reg];
264 } else {
265 return dest_sink(ctx);
269 static void gen_excp_1(int exception, int error_code)
271 TCGv_i32 tmp1, tmp2;
273 tmp1 = tcg_const_i32(exception);
274 tmp2 = tcg_const_i32(error_code);
275 gen_helper_excp(cpu_env, tmp1, tmp2);
276 tcg_temp_free_i32(tmp2);
277 tcg_temp_free_i32(tmp1);
280 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
282 tcg_gen_movi_i64(cpu_pc, ctx->pc);
283 gen_excp_1(exception, error_code);
284 return EXIT_NORETURN;
287 static inline ExitStatus gen_invalid(DisasContext *ctx)
289 return gen_excp(ctx, EXCP_OPCDEC, 0);
292 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
294 TCGv_i32 tmp32 = tcg_temp_new_i32();
295 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
296 gen_helper_memory_to_f(t0, tmp32);
297 tcg_temp_free_i32(tmp32);
300 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
302 TCGv tmp = tcg_temp_new();
303 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
304 gen_helper_memory_to_g(t0, tmp);
305 tcg_temp_free(tmp);
308 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
310 TCGv_i32 tmp32 = tcg_temp_new_i32();
311 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
312 gen_helper_memory_to_s(t0, tmp32);
313 tcg_temp_free_i32(tmp32);
316 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
318 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
319 tcg_gen_mov_i64(cpu_lock_addr, t1);
320 tcg_gen_mov_i64(cpu_lock_value, t0);
323 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
325 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
326 tcg_gen_mov_i64(cpu_lock_addr, t1);
327 tcg_gen_mov_i64(cpu_lock_value, t0);
330 static inline void gen_load_mem(DisasContext *ctx,
331 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
332 int flags),
333 int ra, int rb, int32_t disp16, bool fp,
334 bool clear)
336 TCGv tmp, addr, va;
338 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
339 prefetches, which we can treat as nops. No worries about
340 missed exceptions here. */
341 if (unlikely(ra == 31)) {
342 return;
345 tmp = tcg_temp_new();
346 addr = load_gpr(ctx, rb);
348 if (disp16) {
349 tcg_gen_addi_i64(tmp, addr, disp16);
350 addr = tmp;
352 if (clear) {
353 tcg_gen_andi_i64(tmp, addr, ~0x7);
354 addr = tmp;
357 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
358 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
360 tcg_temp_free(tmp);
363 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
365 TCGv_i32 tmp32 = tcg_temp_new_i32();
366 gen_helper_f_to_memory(tmp32, t0);
367 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
368 tcg_temp_free_i32(tmp32);
371 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
373 TCGv tmp = tcg_temp_new();
374 gen_helper_g_to_memory(tmp, t0);
375 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
376 tcg_temp_free(tmp);
379 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
381 TCGv_i32 tmp32 = tcg_temp_new_i32();
382 gen_helper_s_to_memory(tmp32, t0);
383 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
384 tcg_temp_free_i32(tmp32);
387 static inline void gen_store_mem(DisasContext *ctx,
388 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
389 int flags),
390 int ra, int rb, int32_t disp16, bool fp,
391 bool clear)
393 TCGv tmp, addr, va;
395 tmp = tcg_temp_new();
396 addr = load_gpr(ctx, rb);
398 if (disp16) {
399 tcg_gen_addi_i64(tmp, addr, disp16);
400 addr = tmp;
402 if (clear) {
403 tcg_gen_andi_i64(tmp, addr, ~0x7);
404 addr = tmp;
407 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
408 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
410 tcg_temp_free(tmp);
413 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
414 int32_t disp16, int mem_idx,
415 TCGMemOp op)
417 TCGLabel *lab_fail, *lab_done;
418 TCGv addr, val;
420 addr = tcg_temp_new_i64();
421 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
422 free_context_temps(ctx);
424 lab_fail = gen_new_label();
425 lab_done = gen_new_label();
426 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
427 tcg_temp_free_i64(addr);
429 val = tcg_temp_new_i64();
430 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
431 load_gpr(ctx, ra), mem_idx, op);
432 free_context_temps(ctx);
434 if (ra != 31) {
435 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
437 tcg_temp_free_i64(val);
438 tcg_gen_br(lab_done);
440 gen_set_label(lab_fail);
441 if (ra != 31) {
442 tcg_gen_movi_i64(ctx->ir[ra], 0);
445 gen_set_label(lab_done);
446 tcg_gen_movi_i64(cpu_lock_addr, -1);
447 return NO_EXIT;
450 static bool in_superpage(DisasContext *ctx, int64_t addr)
452 #ifndef CONFIG_USER_ONLY
453 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
454 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
455 && ((addr >> 41) & 3) == 2);
456 #else
457 return false;
458 #endif
461 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
463 /* Suppress goto_tb in the case of single-steping and IO. */
464 if ((ctx->tb->cflags & CF_LAST_IO)
465 || ctx->singlestep_enabled || singlestep) {
466 return false;
468 #ifndef CONFIG_USER_ONLY
469 /* If the destination is in the superpage, the page perms can't change. */
470 if (in_superpage(ctx, dest)) {
471 return true;
473 /* Check for the dest on the same page as the start of the TB. */
474 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
475 #else
476 return true;
477 #endif
480 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
482 uint64_t dest = ctx->pc + (disp << 2);
484 if (ra != 31) {
485 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
488 /* Notice branch-to-next; used to initialize RA with the PC. */
489 if (disp == 0) {
490 return 0;
491 } else if (use_goto_tb(ctx, dest)) {
492 tcg_gen_goto_tb(0);
493 tcg_gen_movi_i64(cpu_pc, dest);
494 tcg_gen_exit_tb((uintptr_t)ctx->tb);
495 return EXIT_GOTO_TB;
496 } else {
497 tcg_gen_movi_i64(cpu_pc, dest);
498 return EXIT_PC_UPDATED;
502 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
503 TCGv cmp, int32_t disp)
505 uint64_t dest = ctx->pc + (disp << 2);
506 TCGLabel *lab_true = gen_new_label();
508 if (use_goto_tb(ctx, dest)) {
509 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
511 tcg_gen_goto_tb(0);
512 tcg_gen_movi_i64(cpu_pc, ctx->pc);
513 tcg_gen_exit_tb((uintptr_t)ctx->tb);
515 gen_set_label(lab_true);
516 tcg_gen_goto_tb(1);
517 tcg_gen_movi_i64(cpu_pc, dest);
518 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
520 return EXIT_GOTO_TB;
521 } else {
522 TCGv_i64 z = tcg_const_i64(0);
523 TCGv_i64 d = tcg_const_i64(dest);
524 TCGv_i64 p = tcg_const_i64(ctx->pc);
526 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
528 tcg_temp_free_i64(z);
529 tcg_temp_free_i64(d);
530 tcg_temp_free_i64(p);
531 return EXIT_PC_UPDATED;
535 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
536 int32_t disp, int mask)
538 TCGv cmp_tmp;
540 if (mask) {
541 cmp_tmp = tcg_temp_new();
542 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
543 } else {
544 cmp_tmp = load_gpr(ctx, ra);
547 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
550 /* Fold -0.0 for comparison with COND. */
552 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
554 uint64_t mzero = 1ull << 63;
556 switch (cond) {
557 case TCG_COND_LE:
558 case TCG_COND_GT:
559 /* For <= or >, the -0.0 value directly compares the way we want. */
560 tcg_gen_mov_i64(dest, src);
561 break;
563 case TCG_COND_EQ:
564 case TCG_COND_NE:
565 /* For == or !=, we can simply mask off the sign bit and compare. */
566 tcg_gen_andi_i64(dest, src, mzero - 1);
567 break;
569 case TCG_COND_GE:
570 case TCG_COND_LT:
571 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
572 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
573 tcg_gen_neg_i64(dest, dest);
574 tcg_gen_and_i64(dest, dest, src);
575 break;
577 default:
578 abort();
582 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
583 int32_t disp)
585 TCGv cmp_tmp = tcg_temp_new();
586 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
587 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
590 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
592 TCGv_i64 va, vb, z;
594 z = load_zero(ctx);
595 vb = load_fpr(ctx, rb);
596 va = tcg_temp_new();
597 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
599 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
601 tcg_temp_free(va);
604 #define QUAL_RM_N 0x080 /* Round mode nearest even */
605 #define QUAL_RM_C 0x000 /* Round mode chopped */
606 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
607 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
608 #define QUAL_RM_MASK 0x0c0
610 #define QUAL_U 0x100 /* Underflow enable (fp output) */
611 #define QUAL_V 0x100 /* Overflow enable (int output) */
612 #define QUAL_S 0x400 /* Software completion enable */
613 #define QUAL_I 0x200 /* Inexact detection enable */
615 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
617 TCGv_i32 tmp;
619 fn11 &= QUAL_RM_MASK;
620 if (fn11 == ctx->tb_rm) {
621 return;
623 ctx->tb_rm = fn11;
625 tmp = tcg_temp_new_i32();
626 switch (fn11) {
627 case QUAL_RM_N:
628 tcg_gen_movi_i32(tmp, float_round_nearest_even);
629 break;
630 case QUAL_RM_C:
631 tcg_gen_movi_i32(tmp, float_round_to_zero);
632 break;
633 case QUAL_RM_M:
634 tcg_gen_movi_i32(tmp, float_round_down);
635 break;
636 case QUAL_RM_D:
637 tcg_gen_ld8u_i32(tmp, cpu_env,
638 offsetof(CPUAlphaState, fpcr_dyn_round));
639 break;
642 #if defined(CONFIG_SOFTFLOAT_INLINE)
643 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
644 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
645 sets the one field. */
646 tcg_gen_st8_i32(tmp, cpu_env,
647 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
648 #else
649 gen_helper_setroundmode(tmp);
650 #endif
652 tcg_temp_free_i32(tmp);
655 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
657 TCGv_i32 tmp;
659 fn11 &= QUAL_U;
660 if (fn11 == ctx->tb_ftz) {
661 return;
663 ctx->tb_ftz = fn11;
665 tmp = tcg_temp_new_i32();
666 if (fn11) {
667 /* Underflow is enabled, use the FPCR setting. */
668 tcg_gen_ld8u_i32(tmp, cpu_env,
669 offsetof(CPUAlphaState, fpcr_flush_to_zero));
670 } else {
671 /* Underflow is disabled, force flush-to-zero. */
672 tcg_gen_movi_i32(tmp, 1);
675 #if defined(CONFIG_SOFTFLOAT_INLINE)
676 tcg_gen_st8_i32(tmp, cpu_env,
677 offsetof(CPUAlphaState, fp_status.flush_to_zero));
678 #else
679 gen_helper_setflushzero(tmp);
680 #endif
682 tcg_temp_free_i32(tmp);
685 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
687 TCGv val;
689 if (unlikely(reg == 31)) {
690 val = load_zero(ctx);
691 } else {
692 val = cpu_fir[reg];
693 if ((fn11 & QUAL_S) == 0) {
694 if (is_cmp) {
695 gen_helper_ieee_input_cmp(cpu_env, val);
696 } else {
697 gen_helper_ieee_input(cpu_env, val);
699 } else {
700 #ifndef CONFIG_USER_ONLY
701 /* In system mode, raise exceptions for denormals like real
702 hardware. In user mode, proceed as if the OS completion
703 handler is handling the denormal as per spec. */
704 gen_helper_ieee_input_s(cpu_env, val);
705 #endif
708 return val;
711 static void gen_fp_exc_raise(int rc, int fn11)
713 /* ??? We ought to be able to do something with imprecise exceptions.
714 E.g. notice we're still in the trap shadow of something within the
715 TB and do not generate the code to signal the exception; end the TB
716 when an exception is forced to arrive, either by consumption of a
717 register value or TRAPB or EXCB. */
718 TCGv_i32 reg, ign;
719 uint32_t ignore = 0;
721 if (!(fn11 & QUAL_U)) {
722 /* Note that QUAL_U == QUAL_V, so ignore either. */
723 ignore |= FPCR_UNF | FPCR_IOV;
725 if (!(fn11 & QUAL_I)) {
726 ignore |= FPCR_INE;
728 ign = tcg_const_i32(ignore);
730 /* ??? Pass in the regno of the destination so that the helper can
731 set EXC_MASK, which contains a bitmask of destination registers
732 that have caused arithmetic traps. A simple userspace emulation
733 does not require this. We do need it for a guest kernel's entArith,
734 or if we were to do something clever with imprecise exceptions. */
735 reg = tcg_const_i32(rc + 32);
736 if (fn11 & QUAL_S) {
737 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
738 } else {
739 gen_helper_fp_exc_raise(cpu_env, ign, reg);
742 tcg_temp_free_i32(reg);
743 tcg_temp_free_i32(ign);
746 static void gen_cvtlq(TCGv vc, TCGv vb)
748 TCGv tmp = tcg_temp_new();
750 /* The arithmetic right shift here, plus the sign-extended mask below
751 yields a sign-extended result without an explicit ext32s_i64. */
752 tcg_gen_sari_i64(tmp, vb, 32);
753 tcg_gen_shri_i64(vc, vb, 29);
754 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
755 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
756 tcg_gen_or_i64(vc, vc, tmp);
758 tcg_temp_free(tmp);
761 static void gen_ieee_arith2(DisasContext *ctx,
762 void (*helper)(TCGv, TCGv_ptr, TCGv),
763 int rb, int rc, int fn11)
765 TCGv vb;
767 gen_qual_roundmode(ctx, fn11);
768 gen_qual_flushzero(ctx, fn11);
770 vb = gen_ieee_input(ctx, rb, fn11, 0);
771 helper(dest_fpr(ctx, rc), cpu_env, vb);
773 gen_fp_exc_raise(rc, fn11);
776 #define IEEE_ARITH2(name) \
777 static inline void glue(gen_, name)(DisasContext *ctx, \
778 int rb, int rc, int fn11) \
780 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
782 IEEE_ARITH2(sqrts)
783 IEEE_ARITH2(sqrtt)
784 IEEE_ARITH2(cvtst)
785 IEEE_ARITH2(cvtts)
787 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
789 TCGv vb, vc;
791 /* No need to set flushzero, since we have an integer output. */
792 vb = gen_ieee_input(ctx, rb, fn11, 0);
793 vc = dest_fpr(ctx, rc);
795 /* Almost all integer conversions use cropped rounding;
796 special case that. */
797 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
798 gen_helper_cvttq_c(vc, cpu_env, vb);
799 } else {
800 gen_qual_roundmode(ctx, fn11);
801 gen_helper_cvttq(vc, cpu_env, vb);
803 gen_fp_exc_raise(rc, fn11);
806 static void gen_ieee_intcvt(DisasContext *ctx,
807 void (*helper)(TCGv, TCGv_ptr, TCGv),
808 int rb, int rc, int fn11)
810 TCGv vb, vc;
812 gen_qual_roundmode(ctx, fn11);
813 vb = load_fpr(ctx, rb);
814 vc = dest_fpr(ctx, rc);
816 /* The only exception that can be raised by integer conversion
817 is inexact. Thus we only need to worry about exceptions when
818 inexact handling is requested. */
819 if (fn11 & QUAL_I) {
820 helper(vc, cpu_env, vb);
821 gen_fp_exc_raise(rc, fn11);
822 } else {
823 helper(vc, cpu_env, vb);
827 #define IEEE_INTCVT(name) \
828 static inline void glue(gen_, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
831 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
833 IEEE_INTCVT(cvtqs)
834 IEEE_INTCVT(cvtqt)
836 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
838 TCGv vmask = tcg_const_i64(mask);
839 TCGv tmp = tcg_temp_new_i64();
841 if (inv_a) {
842 tcg_gen_andc_i64(tmp, vmask, va);
843 } else {
844 tcg_gen_and_i64(tmp, va, vmask);
847 tcg_gen_andc_i64(vc, vb, vmask);
848 tcg_gen_or_i64(vc, vc, tmp);
850 tcg_temp_free(vmask);
851 tcg_temp_free(tmp);
854 static void gen_ieee_arith3(DisasContext *ctx,
855 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
856 int ra, int rb, int rc, int fn11)
858 TCGv va, vb, vc;
860 gen_qual_roundmode(ctx, fn11);
861 gen_qual_flushzero(ctx, fn11);
863 va = gen_ieee_input(ctx, ra, fn11, 0);
864 vb = gen_ieee_input(ctx, rb, fn11, 0);
865 vc = dest_fpr(ctx, rc);
866 helper(vc, cpu_env, va, vb);
868 gen_fp_exc_raise(rc, fn11);
871 #define IEEE_ARITH3(name) \
872 static inline void glue(gen_, name)(DisasContext *ctx, \
873 int ra, int rb, int rc, int fn11) \
875 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
877 IEEE_ARITH3(adds)
878 IEEE_ARITH3(subs)
879 IEEE_ARITH3(muls)
880 IEEE_ARITH3(divs)
881 IEEE_ARITH3(addt)
882 IEEE_ARITH3(subt)
883 IEEE_ARITH3(mult)
884 IEEE_ARITH3(divt)
886 static void gen_ieee_compare(DisasContext *ctx,
887 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
888 int ra, int rb, int rc, int fn11)
890 TCGv va, vb, vc;
892 va = gen_ieee_input(ctx, ra, fn11, 1);
893 vb = gen_ieee_input(ctx, rb, fn11, 1);
894 vc = dest_fpr(ctx, rc);
895 helper(vc, cpu_env, va, vb);
897 gen_fp_exc_raise(rc, fn11);
900 #define IEEE_CMP3(name) \
901 static inline void glue(gen_, name)(DisasContext *ctx, \
902 int ra, int rb, int rc, int fn11) \
904 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
906 IEEE_CMP3(cmptun)
907 IEEE_CMP3(cmpteq)
908 IEEE_CMP3(cmptlt)
909 IEEE_CMP3(cmptle)
911 static inline uint64_t zapnot_mask(uint8_t lit)
913 uint64_t mask = 0;
914 int i;
916 for (i = 0; i < 8; ++i) {
917 if ((lit >> i) & 1) {
918 mask |= 0xffull << (i * 8);
921 return mask;
924 /* Implement zapnot with an immediate operand, which expands to some
925 form of immediate AND. This is a basic building block in the
926 definition of many of the other byte manipulation instructions. */
927 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
929 switch (lit) {
930 case 0x00:
931 tcg_gen_movi_i64(dest, 0);
932 break;
933 case 0x01:
934 tcg_gen_ext8u_i64(dest, src);
935 break;
936 case 0x03:
937 tcg_gen_ext16u_i64(dest, src);
938 break;
939 case 0x0f:
940 tcg_gen_ext32u_i64(dest, src);
941 break;
942 case 0xff:
943 tcg_gen_mov_i64(dest, src);
944 break;
945 default:
946 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
947 break;
951 /* EXTWH, EXTLH, EXTQH */
952 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
953 uint8_t lit, uint8_t byte_mask)
955 if (islit) {
956 int pos = (64 - lit * 8) & 0x3f;
957 int len = cto32(byte_mask) * 8;
958 if (pos < len) {
959 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
960 } else {
961 tcg_gen_movi_i64(vc, 0);
963 } else {
964 TCGv tmp = tcg_temp_new();
965 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
966 tcg_gen_neg_i64(tmp, tmp);
967 tcg_gen_andi_i64(tmp, tmp, 0x3f);
968 tcg_gen_shl_i64(vc, va, tmp);
969 tcg_temp_free(tmp);
971 gen_zapnoti(vc, vc, byte_mask);
974 /* EXTBL, EXTWL, EXTLL, EXTQL */
975 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
976 uint8_t lit, uint8_t byte_mask)
978 if (islit) {
979 int pos = (lit & 7) * 8;
980 int len = cto32(byte_mask) * 8;
981 if (pos + len >= 64) {
982 len = 64 - pos;
984 tcg_gen_extract_i64(vc, va, pos, len);
985 } else {
986 TCGv tmp = tcg_temp_new();
987 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
988 tcg_gen_shli_i64(tmp, tmp, 3);
989 tcg_gen_shr_i64(vc, va, tmp);
990 tcg_temp_free(tmp);
991 gen_zapnoti(vc, vc, byte_mask);
995 /* INSWH, INSLH, INSQH */
996 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
997 uint8_t lit, uint8_t byte_mask)
999 if (islit) {
1000 int pos = 64 - (lit & 7) * 8;
1001 int len = cto32(byte_mask) * 8;
1002 if (pos < len) {
1003 tcg_gen_extract_i64(vc, va, pos, len - pos);
1004 } else {
1005 tcg_gen_movi_i64(vc, 0);
1007 } else {
1008 TCGv tmp = tcg_temp_new();
1009 TCGv shift = tcg_temp_new();
1011 /* The instruction description has us left-shift the byte mask
1012 and extract bits <15:8> and apply that zap at the end. This
1013 is equivalent to simply performing the zap first and shifting
1014 afterward. */
1015 gen_zapnoti(tmp, va, byte_mask);
1017 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1018 portably by splitting the shift into two parts: shift_count-1 and 1.
1019 Arrange for the -1 by using ones-complement instead of
1020 twos-complement in the negation: ~(B * 8) & 63. */
1022 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1023 tcg_gen_not_i64(shift, shift);
1024 tcg_gen_andi_i64(shift, shift, 0x3f);
1026 tcg_gen_shr_i64(vc, tmp, shift);
1027 tcg_gen_shri_i64(vc, vc, 1);
1028 tcg_temp_free(shift);
1029 tcg_temp_free(tmp);
1033 /* INSBL, INSWL, INSLL, INSQL */
1034 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1035 uint8_t lit, uint8_t byte_mask)
1037 if (islit) {
1038 int pos = (lit & 7) * 8;
1039 int len = cto32(byte_mask) * 8;
1040 if (pos + len > 64) {
1041 len = 64 - pos;
1043 tcg_gen_deposit_z_i64(vc, va, pos, len);
1044 } else {
1045 TCGv tmp = tcg_temp_new();
1046 TCGv shift = tcg_temp_new();
1048 /* The instruction description has us left-shift the byte mask
1049 and extract bits <15:8> and apply that zap at the end. This
1050 is equivalent to simply performing the zap first and shifting
1051 afterward. */
1052 gen_zapnoti(tmp, va, byte_mask);
1054 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1055 tcg_gen_shli_i64(shift, shift, 3);
1056 tcg_gen_shl_i64(vc, tmp, shift);
1057 tcg_temp_free(shift);
1058 tcg_temp_free(tmp);
1062 /* MSKWH, MSKLH, MSKQH */
1063 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1064 uint8_t lit, uint8_t byte_mask)
1066 if (islit) {
1067 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1068 } else {
1069 TCGv shift = tcg_temp_new();
1070 TCGv mask = tcg_temp_new();
1072 /* The instruction description is as above, where the byte_mask
1073 is shifted left, and then we extract bits <15:8>. This can be
1074 emulated with a right-shift on the expanded byte mask. This
1075 requires extra care because for an input <2:0> == 0 we need a
1076 shift of 64 bits in order to generate a zero. This is done by
1077 splitting the shift into two parts, the variable shift - 1
1078 followed by a constant 1 shift. The code we expand below is
1079 equivalent to ~(B * 8) & 63. */
1081 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1082 tcg_gen_not_i64(shift, shift);
1083 tcg_gen_andi_i64(shift, shift, 0x3f);
1084 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1085 tcg_gen_shr_i64(mask, mask, shift);
1086 tcg_gen_shri_i64(mask, mask, 1);
1088 tcg_gen_andc_i64(vc, va, mask);
1090 tcg_temp_free(mask);
1091 tcg_temp_free(shift);
1095 /* MSKBL, MSKWL, MSKLL, MSKQL */
1096 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1097 uint8_t lit, uint8_t byte_mask)
1099 if (islit) {
1100 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1101 } else {
1102 TCGv shift = tcg_temp_new();
1103 TCGv mask = tcg_temp_new();
1105 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1106 tcg_gen_shli_i64(shift, shift, 3);
1107 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1108 tcg_gen_shl_i64(mask, mask, shift);
1110 tcg_gen_andc_i64(vc, va, mask);
1112 tcg_temp_free(mask);
1113 tcg_temp_free(shift);
1117 static void gen_rx(DisasContext *ctx, int ra, int set)
1119 TCGv_i32 tmp;
1121 if (ra != 31) {
1122 tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
1123 offsetof(CPUAlphaState, intr_flag));
1126 tmp = tcg_const_i32(set);
1127 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1128 tcg_temp_free_i32(tmp);
1131 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1133 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1134 to internal cpu registers. */
1136 /* Unprivileged PAL call */
1137 if (palcode >= 0x80 && palcode < 0xC0) {
1138 switch (palcode) {
1139 case 0x86:
1140 /* IMB */
1141 /* No-op inside QEMU. */
1142 break;
1143 case 0x9E:
1144 /* RDUNIQUE */
1145 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1146 offsetof(CPUAlphaState, unique));
1147 break;
1148 case 0x9F:
1149 /* WRUNIQUE */
1150 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1151 offsetof(CPUAlphaState, unique));
1152 break;
1153 default:
1154 palcode &= 0xbf;
1155 goto do_call_pal;
1157 return NO_EXIT;
1160 #ifndef CONFIG_USER_ONLY
1161 /* Privileged PAL code */
1162 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1163 TCGv tmp;
1164 switch (palcode) {
1165 case 0x01:
1166 /* CFLUSH */
1167 /* No-op inside QEMU. */
1168 break;
1169 case 0x02:
1170 /* DRAINA */
1171 /* No-op inside QEMU. */
1172 break;
1173 case 0x2D:
1174 /* WRVPTPTR */
1175 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1176 offsetof(CPUAlphaState, vptptr));
1177 break;
1178 case 0x31:
1179 /* WRVAL */
1180 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1181 offsetof(CPUAlphaState, sysval));
1182 break;
1183 case 0x32:
1184 /* RDVAL */
1185 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1186 offsetof(CPUAlphaState, sysval));
1187 break;
1189 case 0x35:
1190 /* SWPIPL */
1191 /* Note that we already know we're in kernel mode, so we know
1192 that PS only contains the 3 IPL bits. */
1193 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1194 offsetof(CPUAlphaState, ps));
1196 /* But make sure and store only the 3 IPL bits from the user. */
1197 tmp = tcg_temp_new();
1198 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1199 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1200 tcg_temp_free(tmp);
1201 break;
1203 case 0x36:
1204 /* RDPS */
1205 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1206 offsetof(CPUAlphaState, ps));
1207 break;
1208 case 0x38:
1209 /* WRUSP */
1210 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1211 offsetof(CPUAlphaState, usp));
1212 break;
1213 case 0x3A:
1214 /* RDUSP */
1215 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1216 offsetof(CPUAlphaState, usp));
1217 break;
1218 case 0x3C:
1219 /* WHAMI */
1220 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1221 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1222 break;
1224 case 0x3E:
1225 /* WTINT */
1226 tmp = tcg_const_i64(1);
1227 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1228 offsetof(CPUState, halted));
1229 tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1230 return gen_excp(ctx, EXCP_HALTED, 0);
1232 default:
1233 palcode &= 0x3f;
1234 goto do_call_pal;
1236 return NO_EXIT;
1238 #endif
1239 return gen_invalid(ctx);
1241 do_call_pal:
1242 #ifdef CONFIG_USER_ONLY
1243 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1244 #else
1246 TCGv tmp = tcg_temp_new();
1247 uint64_t exc_addr = ctx->pc;
1248 uint64_t entry = ctx->palbr;
1250 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
1251 exc_addr |= 1;
1252 } else {
1253 tcg_gen_movi_i64(tmp, 1);
1254 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
1257 tcg_gen_movi_i64(tmp, exc_addr);
1258 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1259 tcg_temp_free(tmp);
1261 entry += (palcode & 0x80
1262 ? 0x2000 + (palcode - 0x80) * 64
1263 : 0x1000 + palcode * 64);
1265 /* Since the destination is running in PALmode, we don't really
1266 need the page permissions check. We'll see the existence of
1267 the page when we create the TB, and we'll flush all TBs if
1268 we change the PAL base register. */
1269 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1270 tcg_gen_goto_tb(0);
1271 tcg_gen_movi_i64(cpu_pc, entry);
1272 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1273 return EXIT_GOTO_TB;
1274 } else {
1275 tcg_gen_movi_i64(cpu_pc, entry);
1276 return EXIT_PC_UPDATED;
1279 #endif
1282 #ifndef CONFIG_USER_ONLY
1284 #define PR_BYTE 0x100000
1285 #define PR_LONG 0x200000
1287 static int cpu_pr_data(int pr)
1289 switch (pr) {
1290 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1291 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1292 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1293 case 3: return offsetof(CPUAlphaState, trap_arg0);
1294 case 4: return offsetof(CPUAlphaState, trap_arg1);
1295 case 5: return offsetof(CPUAlphaState, trap_arg2);
1296 case 6: return offsetof(CPUAlphaState, exc_addr);
1297 case 7: return offsetof(CPUAlphaState, palbr);
1298 case 8: return offsetof(CPUAlphaState, ptbr);
1299 case 9: return offsetof(CPUAlphaState, vptptr);
1300 case 10: return offsetof(CPUAlphaState, unique);
1301 case 11: return offsetof(CPUAlphaState, sysval);
1302 case 12: return offsetof(CPUAlphaState, usp);
1304 case 40 ... 63:
1305 return offsetof(CPUAlphaState, scratch[pr - 40]);
1307 case 251:
1308 return offsetof(CPUAlphaState, alarm_expire);
1310 return 0;
1313 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1315 void (*helper)(TCGv);
1316 int data;
1318 switch (regno) {
1319 case 32 ... 39:
1320 /* Accessing the "non-shadow" general registers. */
1321 regno = regno == 39 ? 25 : regno - 32 + 8;
1322 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1323 break;
1325 case 250: /* WALLTIME */
1326 helper = gen_helper_get_walltime;
1327 goto do_helper;
1328 case 249: /* VMTIME */
1329 helper = gen_helper_get_vmtime;
1330 do_helper:
1331 if (use_icount) {
1332 gen_io_start();
1333 helper(va);
1334 gen_io_end();
1335 return EXIT_PC_STALE;
1336 } else {
1337 helper(va);
1339 break;
1341 default:
1342 /* The basic registers are data only, and unknown registers
1343 are read-zero, write-ignore. */
1344 data = cpu_pr_data(regno);
1345 if (data == 0) {
1346 tcg_gen_movi_i64(va, 0);
1347 } else if (data & PR_BYTE) {
1348 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1349 } else if (data & PR_LONG) {
1350 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1351 } else {
1352 tcg_gen_ld_i64(va, cpu_env, data);
1354 break;
1357 return NO_EXIT;
1360 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1362 TCGv tmp;
1363 int data;
1365 switch (regno) {
1366 case 255:
1367 /* TBIA */
1368 gen_helper_tbia(cpu_env);
1369 break;
1371 case 254:
1372 /* TBIS */
1373 gen_helper_tbis(cpu_env, vb);
1374 break;
1376 case 253:
1377 /* WAIT */
1378 tmp = tcg_const_i64(1);
1379 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1380 offsetof(CPUState, halted));
1381 return gen_excp(ctx, EXCP_HALTED, 0);
1383 case 252:
1384 /* HALT */
1385 gen_helper_halt(vb);
1386 return EXIT_PC_STALE;
1388 case 251:
1389 /* ALARM */
1390 gen_helper_set_alarm(cpu_env, vb);
1391 break;
1393 case 7:
1394 /* PALBR */
1395 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1396 /* Changing the PAL base register implies un-chaining all of the TBs
1397 that ended with a CALL_PAL. Since the base register usually only
1398 changes during boot, flushing everything works well. */
1399 gen_helper_tb_flush(cpu_env);
1400 return EXIT_PC_STALE;
1402 case 32 ... 39:
1403 /* Accessing the "non-shadow" general registers. */
1404 regno = regno == 39 ? 25 : regno - 32 + 8;
1405 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1406 break;
1408 default:
1409 /* The basic registers are data only, and unknown registers
1410 are read-zero, write-ignore. */
1411 data = cpu_pr_data(regno);
1412 if (data != 0) {
1413 if (data & PR_BYTE) {
1414 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1415 } else if (data & PR_LONG) {
1416 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1417 } else {
1418 tcg_gen_st_i64(vb, cpu_env, data);
1421 break;
1424 return NO_EXIT;
1426 #endif /* !USER_ONLY*/
1428 #define REQUIRE_NO_LIT \
1429 do { \
1430 if (real_islit) { \
1431 goto invalid_opc; \
1433 } while (0)
1435 #define REQUIRE_TB_FLAG(FLAG) \
1436 do { \
1437 if ((ctx->tb->flags & (FLAG)) == 0) { \
1438 goto invalid_opc; \
1440 } while (0)
1442 #define REQUIRE_REG_31(WHICH) \
1443 do { \
1444 if (WHICH != 31) { \
1445 goto invalid_opc; \
1447 } while (0)
1449 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1451 int32_t disp21, disp16, disp12 __attribute__((unused));
1452 uint16_t fn11;
1453 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1454 bool islit, real_islit;
1455 TCGv va, vb, vc, tmp, tmp2;
1456 TCGv_i32 t32;
1457 ExitStatus ret;
1459 /* Decode all instruction fields */
1460 opc = extract32(insn, 26, 6);
1461 ra = extract32(insn, 21, 5);
1462 rb = extract32(insn, 16, 5);
1463 rc = extract32(insn, 0, 5);
1464 real_islit = islit = extract32(insn, 12, 1);
1465 lit = extract32(insn, 13, 8);
1467 disp21 = sextract32(insn, 0, 21);
1468 disp16 = sextract32(insn, 0, 16);
1469 disp12 = sextract32(insn, 0, 12);
1471 fn11 = extract32(insn, 5, 11);
1472 fpfn = extract32(insn, 5, 6);
1473 fn7 = extract32(insn, 5, 7);
1475 if (rb == 31 && !islit) {
1476 islit = true;
1477 lit = 0;
1480 ret = NO_EXIT;
1481 switch (opc) {
1482 case 0x00:
1483 /* CALL_PAL */
1484 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1485 break;
1486 case 0x01:
1487 /* OPC01 */
1488 goto invalid_opc;
1489 case 0x02:
1490 /* OPC02 */
1491 goto invalid_opc;
1492 case 0x03:
1493 /* OPC03 */
1494 goto invalid_opc;
1495 case 0x04:
1496 /* OPC04 */
1497 goto invalid_opc;
1498 case 0x05:
1499 /* OPC05 */
1500 goto invalid_opc;
1501 case 0x06:
1502 /* OPC06 */
1503 goto invalid_opc;
1504 case 0x07:
1505 /* OPC07 */
1506 goto invalid_opc;
1508 case 0x09:
1509 /* LDAH */
1510 disp16 = (uint32_t)disp16 << 16;
1511 /* fall through */
1512 case 0x08:
1513 /* LDA */
1514 va = dest_gpr(ctx, ra);
1515 /* It's worth special-casing immediate loads. */
1516 if (rb == 31) {
1517 tcg_gen_movi_i64(va, disp16);
1518 } else {
1519 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1521 break;
1523 case 0x0A:
1524 /* LDBU */
1525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1526 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1527 break;
1528 case 0x0B:
1529 /* LDQ_U */
1530 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1531 break;
1532 case 0x0C:
1533 /* LDWU */
1534 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1535 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1536 break;
1537 case 0x0D:
1538 /* STW */
1539 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1540 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1541 break;
1542 case 0x0E:
1543 /* STB */
1544 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1545 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1546 break;
1547 case 0x0F:
1548 /* STQ_U */
1549 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1550 break;
1552 case 0x10:
1553 vc = dest_gpr(ctx, rc);
1554 vb = load_gpr_lit(ctx, rb, lit, islit);
1556 if (ra == 31) {
1557 if (fn7 == 0x00) {
1558 /* Special case ADDL as SEXTL. */
1559 tcg_gen_ext32s_i64(vc, vb);
1560 break;
1562 if (fn7 == 0x29) {
1563 /* Special case SUBQ as NEGQ. */
1564 tcg_gen_neg_i64(vc, vb);
1565 break;
1569 va = load_gpr(ctx, ra);
1570 switch (fn7) {
1571 case 0x00:
1572 /* ADDL */
1573 tcg_gen_add_i64(vc, va, vb);
1574 tcg_gen_ext32s_i64(vc, vc);
1575 break;
1576 case 0x02:
1577 /* S4ADDL */
1578 tmp = tcg_temp_new();
1579 tcg_gen_shli_i64(tmp, va, 2);
1580 tcg_gen_add_i64(tmp, tmp, vb);
1581 tcg_gen_ext32s_i64(vc, tmp);
1582 tcg_temp_free(tmp);
1583 break;
1584 case 0x09:
1585 /* SUBL */
1586 tcg_gen_sub_i64(vc, va, vb);
1587 tcg_gen_ext32s_i64(vc, vc);
1588 break;
1589 case 0x0B:
1590 /* S4SUBL */
1591 tmp = tcg_temp_new();
1592 tcg_gen_shli_i64(tmp, va, 2);
1593 tcg_gen_sub_i64(tmp, tmp, vb);
1594 tcg_gen_ext32s_i64(vc, tmp);
1595 tcg_temp_free(tmp);
1596 break;
1597 case 0x0F:
1598 /* CMPBGE */
1599 if (ra == 31) {
1600 /* Special case 0 >= X as X == 0. */
1601 gen_helper_cmpbe0(vc, vb);
1602 } else {
1603 gen_helper_cmpbge(vc, va, vb);
1605 break;
1606 case 0x12:
1607 /* S8ADDL */
1608 tmp = tcg_temp_new();
1609 tcg_gen_shli_i64(tmp, va, 3);
1610 tcg_gen_add_i64(tmp, tmp, vb);
1611 tcg_gen_ext32s_i64(vc, tmp);
1612 tcg_temp_free(tmp);
1613 break;
1614 case 0x1B:
1615 /* S8SUBL */
1616 tmp = tcg_temp_new();
1617 tcg_gen_shli_i64(tmp, va, 3);
1618 tcg_gen_sub_i64(tmp, tmp, vb);
1619 tcg_gen_ext32s_i64(vc, tmp);
1620 tcg_temp_free(tmp);
1621 break;
1622 case 0x1D:
1623 /* CMPULT */
1624 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1625 break;
1626 case 0x20:
1627 /* ADDQ */
1628 tcg_gen_add_i64(vc, va, vb);
1629 break;
1630 case 0x22:
1631 /* S4ADDQ */
1632 tmp = tcg_temp_new();
1633 tcg_gen_shli_i64(tmp, va, 2);
1634 tcg_gen_add_i64(vc, tmp, vb);
1635 tcg_temp_free(tmp);
1636 break;
1637 case 0x29:
1638 /* SUBQ */
1639 tcg_gen_sub_i64(vc, va, vb);
1640 break;
1641 case 0x2B:
1642 /* S4SUBQ */
1643 tmp = tcg_temp_new();
1644 tcg_gen_shli_i64(tmp, va, 2);
1645 tcg_gen_sub_i64(vc, tmp, vb);
1646 tcg_temp_free(tmp);
1647 break;
1648 case 0x2D:
1649 /* CMPEQ */
1650 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1651 break;
1652 case 0x32:
1653 /* S8ADDQ */
1654 tmp = tcg_temp_new();
1655 tcg_gen_shli_i64(tmp, va, 3);
1656 tcg_gen_add_i64(vc, tmp, vb);
1657 tcg_temp_free(tmp);
1658 break;
1659 case 0x3B:
1660 /* S8SUBQ */
1661 tmp = tcg_temp_new();
1662 tcg_gen_shli_i64(tmp, va, 3);
1663 tcg_gen_sub_i64(vc, tmp, vb);
1664 tcg_temp_free(tmp);
1665 break;
1666 case 0x3D:
1667 /* CMPULE */
1668 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1669 break;
1670 case 0x40:
1671 /* ADDL/V */
1672 tmp = tcg_temp_new();
1673 tcg_gen_ext32s_i64(tmp, va);
1674 tcg_gen_ext32s_i64(vc, vb);
1675 tcg_gen_add_i64(tmp, tmp, vc);
1676 tcg_gen_ext32s_i64(vc, tmp);
1677 gen_helper_check_overflow(cpu_env, vc, tmp);
1678 tcg_temp_free(tmp);
1679 break;
1680 case 0x49:
1681 /* SUBL/V */
1682 tmp = tcg_temp_new();
1683 tcg_gen_ext32s_i64(tmp, va);
1684 tcg_gen_ext32s_i64(vc, vb);
1685 tcg_gen_sub_i64(tmp, tmp, vc);
1686 tcg_gen_ext32s_i64(vc, tmp);
1687 gen_helper_check_overflow(cpu_env, vc, tmp);
1688 tcg_temp_free(tmp);
1689 break;
1690 case 0x4D:
1691 /* CMPLT */
1692 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1693 break;
1694 case 0x60:
1695 /* ADDQ/V */
1696 tmp = tcg_temp_new();
1697 tmp2 = tcg_temp_new();
1698 tcg_gen_eqv_i64(tmp, va, vb);
1699 tcg_gen_mov_i64(tmp2, va);
1700 tcg_gen_add_i64(vc, va, vb);
1701 tcg_gen_xor_i64(tmp2, tmp2, vc);
1702 tcg_gen_and_i64(tmp, tmp, tmp2);
1703 tcg_gen_shri_i64(tmp, tmp, 63);
1704 tcg_gen_movi_i64(tmp2, 0);
1705 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1706 tcg_temp_free(tmp);
1707 tcg_temp_free(tmp2);
1708 break;
1709 case 0x69:
1710 /* SUBQ/V */
1711 tmp = tcg_temp_new();
1712 tmp2 = tcg_temp_new();
1713 tcg_gen_xor_i64(tmp, va, vb);
1714 tcg_gen_mov_i64(tmp2, va);
1715 tcg_gen_sub_i64(vc, va, vb);
1716 tcg_gen_xor_i64(tmp2, tmp2, vc);
1717 tcg_gen_and_i64(tmp, tmp, tmp2);
1718 tcg_gen_shri_i64(tmp, tmp, 63);
1719 tcg_gen_movi_i64(tmp2, 0);
1720 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1721 tcg_temp_free(tmp);
1722 tcg_temp_free(tmp2);
1723 break;
1724 case 0x6D:
1725 /* CMPLE */
1726 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1727 break;
1728 default:
1729 goto invalid_opc;
1731 break;
1733 case 0x11:
1734 if (fn7 == 0x20) {
1735 if (rc == 31) {
1736 /* Special case BIS as NOP. */
1737 break;
1739 if (ra == 31) {
1740 /* Special case BIS as MOV. */
1741 vc = dest_gpr(ctx, rc);
1742 if (islit) {
1743 tcg_gen_movi_i64(vc, lit);
1744 } else {
1745 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1747 break;
1751 vc = dest_gpr(ctx, rc);
1752 vb = load_gpr_lit(ctx, rb, lit, islit);
1754 if (fn7 == 0x28 && ra == 31) {
1755 /* Special case ORNOT as NOT. */
1756 tcg_gen_not_i64(vc, vb);
1757 break;
1760 va = load_gpr(ctx, ra);
1761 switch (fn7) {
1762 case 0x00:
1763 /* AND */
1764 tcg_gen_and_i64(vc, va, vb);
1765 break;
1766 case 0x08:
1767 /* BIC */
1768 tcg_gen_andc_i64(vc, va, vb);
1769 break;
1770 case 0x14:
1771 /* CMOVLBS */
1772 tmp = tcg_temp_new();
1773 tcg_gen_andi_i64(tmp, va, 1);
1774 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1775 vb, load_gpr(ctx, rc));
1776 tcg_temp_free(tmp);
1777 break;
1778 case 0x16:
1779 /* CMOVLBC */
1780 tmp = tcg_temp_new();
1781 tcg_gen_andi_i64(tmp, va, 1);
1782 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1783 vb, load_gpr(ctx, rc));
1784 tcg_temp_free(tmp);
1785 break;
1786 case 0x20:
1787 /* BIS */
1788 tcg_gen_or_i64(vc, va, vb);
1789 break;
1790 case 0x24:
1791 /* CMOVEQ */
1792 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1793 vb, load_gpr(ctx, rc));
1794 break;
1795 case 0x26:
1796 /* CMOVNE */
1797 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1798 vb, load_gpr(ctx, rc));
1799 break;
1800 case 0x28:
1801 /* ORNOT */
1802 tcg_gen_orc_i64(vc, va, vb);
1803 break;
1804 case 0x40:
1805 /* XOR */
1806 tcg_gen_xor_i64(vc, va, vb);
1807 break;
1808 case 0x44:
1809 /* CMOVLT */
1810 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1811 vb, load_gpr(ctx, rc));
1812 break;
1813 case 0x46:
1814 /* CMOVGE */
1815 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1816 vb, load_gpr(ctx, rc));
1817 break;
1818 case 0x48:
1819 /* EQV */
1820 tcg_gen_eqv_i64(vc, va, vb);
1821 break;
1822 case 0x61:
1823 /* AMASK */
1824 REQUIRE_REG_31(ra);
1826 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1827 tcg_gen_andi_i64(vc, vb, ~amask);
1829 break;
1830 case 0x64:
1831 /* CMOVLE */
1832 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1833 vb, load_gpr(ctx, rc));
1834 break;
1835 case 0x66:
1836 /* CMOVGT */
1837 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1838 vb, load_gpr(ctx, rc));
1839 break;
1840 case 0x6C:
1841 /* IMPLVER */
1842 REQUIRE_REG_31(ra);
1843 tcg_gen_movi_i64(vc, ctx->implver);
1844 break;
1845 default:
1846 goto invalid_opc;
1848 break;
1850 case 0x12:
1851 vc = dest_gpr(ctx, rc);
1852 va = load_gpr(ctx, ra);
1853 switch (fn7) {
1854 case 0x02:
1855 /* MSKBL */
1856 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1857 break;
1858 case 0x06:
1859 /* EXTBL */
1860 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1861 break;
1862 case 0x0B:
1863 /* INSBL */
1864 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1865 break;
1866 case 0x12:
1867 /* MSKWL */
1868 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1869 break;
1870 case 0x16:
1871 /* EXTWL */
1872 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1873 break;
1874 case 0x1B:
1875 /* INSWL */
1876 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1877 break;
1878 case 0x22:
1879 /* MSKLL */
1880 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1881 break;
1882 case 0x26:
1883 /* EXTLL */
1884 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1885 break;
1886 case 0x2B:
1887 /* INSLL */
1888 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1889 break;
1890 case 0x30:
1891 /* ZAP */
1892 if (islit) {
1893 gen_zapnoti(vc, va, ~lit);
1894 } else {
1895 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1897 break;
1898 case 0x31:
1899 /* ZAPNOT */
1900 if (islit) {
1901 gen_zapnoti(vc, va, lit);
1902 } else {
1903 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1905 break;
1906 case 0x32:
1907 /* MSKQL */
1908 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1909 break;
1910 case 0x34:
1911 /* SRL */
1912 if (islit) {
1913 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1914 } else {
1915 tmp = tcg_temp_new();
1916 vb = load_gpr(ctx, rb);
1917 tcg_gen_andi_i64(tmp, vb, 0x3f);
1918 tcg_gen_shr_i64(vc, va, tmp);
1919 tcg_temp_free(tmp);
1921 break;
1922 case 0x36:
1923 /* EXTQL */
1924 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1925 break;
1926 case 0x39:
1927 /* SLL */
1928 if (islit) {
1929 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1930 } else {
1931 tmp = tcg_temp_new();
1932 vb = load_gpr(ctx, rb);
1933 tcg_gen_andi_i64(tmp, vb, 0x3f);
1934 tcg_gen_shl_i64(vc, va, tmp);
1935 tcg_temp_free(tmp);
1937 break;
1938 case 0x3B:
1939 /* INSQL */
1940 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1941 break;
1942 case 0x3C:
1943 /* SRA */
1944 if (islit) {
1945 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1946 } else {
1947 tmp = tcg_temp_new();
1948 vb = load_gpr(ctx, rb);
1949 tcg_gen_andi_i64(tmp, vb, 0x3f);
1950 tcg_gen_sar_i64(vc, va, tmp);
1951 tcg_temp_free(tmp);
1953 break;
1954 case 0x52:
1955 /* MSKWH */
1956 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1957 break;
1958 case 0x57:
1959 /* INSWH */
1960 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1961 break;
1962 case 0x5A:
1963 /* EXTWH */
1964 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1965 break;
1966 case 0x62:
1967 /* MSKLH */
1968 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1969 break;
1970 case 0x67:
1971 /* INSLH */
1972 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1973 break;
1974 case 0x6A:
1975 /* EXTLH */
1976 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1977 break;
1978 case 0x72:
1979 /* MSKQH */
1980 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1981 break;
1982 case 0x77:
1983 /* INSQH */
1984 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1985 break;
1986 case 0x7A:
1987 /* EXTQH */
1988 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1989 break;
1990 default:
1991 goto invalid_opc;
1993 break;
1995 case 0x13:
1996 vc = dest_gpr(ctx, rc);
1997 vb = load_gpr_lit(ctx, rb, lit, islit);
1998 va = load_gpr(ctx, ra);
1999 switch (fn7) {
2000 case 0x00:
2001 /* MULL */
2002 tcg_gen_mul_i64(vc, va, vb);
2003 tcg_gen_ext32s_i64(vc, vc);
2004 break;
2005 case 0x20:
2006 /* MULQ */
2007 tcg_gen_mul_i64(vc, va, vb);
2008 break;
2009 case 0x30:
2010 /* UMULH */
2011 tmp = tcg_temp_new();
2012 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2013 tcg_temp_free(tmp);
2014 break;
2015 case 0x40:
2016 /* MULL/V */
2017 tmp = tcg_temp_new();
2018 tcg_gen_ext32s_i64(tmp, va);
2019 tcg_gen_ext32s_i64(vc, vb);
2020 tcg_gen_mul_i64(tmp, tmp, vc);
2021 tcg_gen_ext32s_i64(vc, tmp);
2022 gen_helper_check_overflow(cpu_env, vc, tmp);
2023 tcg_temp_free(tmp);
2024 break;
2025 case 0x60:
2026 /* MULQ/V */
2027 tmp = tcg_temp_new();
2028 tmp2 = tcg_temp_new();
2029 tcg_gen_muls2_i64(vc, tmp, va, vb);
2030 tcg_gen_sari_i64(tmp2, vc, 63);
2031 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2032 tcg_temp_free(tmp);
2033 tcg_temp_free(tmp2);
2034 break;
2035 default:
2036 goto invalid_opc;
2038 break;
2040 case 0x14:
2041 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2042 vc = dest_fpr(ctx, rc);
2043 switch (fpfn) { /* fn11 & 0x3F */
2044 case 0x04:
2045 /* ITOFS */
2046 REQUIRE_REG_31(rb);
2047 t32 = tcg_temp_new_i32();
2048 va = load_gpr(ctx, ra);
2049 tcg_gen_extrl_i64_i32(t32, va);
2050 gen_helper_memory_to_s(vc, t32);
2051 tcg_temp_free_i32(t32);
2052 break;
2053 case 0x0A:
2054 /* SQRTF */
2055 REQUIRE_REG_31(ra);
2056 vb = load_fpr(ctx, rb);
2057 gen_helper_sqrtf(vc, cpu_env, vb);
2058 break;
2059 case 0x0B:
2060 /* SQRTS */
2061 REQUIRE_REG_31(ra);
2062 gen_sqrts(ctx, rb, rc, fn11);
2063 break;
2064 case 0x14:
2065 /* ITOFF */
2066 REQUIRE_REG_31(rb);
2067 t32 = tcg_temp_new_i32();
2068 va = load_gpr(ctx, ra);
2069 tcg_gen_extrl_i64_i32(t32, va);
2070 gen_helper_memory_to_f(vc, t32);
2071 tcg_temp_free_i32(t32);
2072 break;
2073 case 0x24:
2074 /* ITOFT */
2075 REQUIRE_REG_31(rb);
2076 va = load_gpr(ctx, ra);
2077 tcg_gen_mov_i64(vc, va);
2078 break;
2079 case 0x2A:
2080 /* SQRTG */
2081 REQUIRE_REG_31(ra);
2082 vb = load_fpr(ctx, rb);
2083 gen_helper_sqrtg(vc, cpu_env, vb);
2084 break;
2085 case 0x02B:
2086 /* SQRTT */
2087 REQUIRE_REG_31(ra);
2088 gen_sqrtt(ctx, rb, rc, fn11);
2089 break;
2090 default:
2091 goto invalid_opc;
2093 break;
2095 case 0x15:
2096 /* VAX floating point */
2097 /* XXX: rounding mode and trap are ignored (!) */
2098 vc = dest_fpr(ctx, rc);
2099 vb = load_fpr(ctx, rb);
2100 va = load_fpr(ctx, ra);
2101 switch (fpfn) { /* fn11 & 0x3F */
2102 case 0x00:
2103 /* ADDF */
2104 gen_helper_addf(vc, cpu_env, va, vb);
2105 break;
2106 case 0x01:
2107 /* SUBF */
2108 gen_helper_subf(vc, cpu_env, va, vb);
2109 break;
2110 case 0x02:
2111 /* MULF */
2112 gen_helper_mulf(vc, cpu_env, va, vb);
2113 break;
2114 case 0x03:
2115 /* DIVF */
2116 gen_helper_divf(vc, cpu_env, va, vb);
2117 break;
2118 case 0x1E:
2119 /* CVTDG -- TODO */
2120 REQUIRE_REG_31(ra);
2121 goto invalid_opc;
2122 case 0x20:
2123 /* ADDG */
2124 gen_helper_addg(vc, cpu_env, va, vb);
2125 break;
2126 case 0x21:
2127 /* SUBG */
2128 gen_helper_subg(vc, cpu_env, va, vb);
2129 break;
2130 case 0x22:
2131 /* MULG */
2132 gen_helper_mulg(vc, cpu_env, va, vb);
2133 break;
2134 case 0x23:
2135 /* DIVG */
2136 gen_helper_divg(vc, cpu_env, va, vb);
2137 break;
2138 case 0x25:
2139 /* CMPGEQ */
2140 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2141 break;
2142 case 0x26:
2143 /* CMPGLT */
2144 gen_helper_cmpglt(vc, cpu_env, va, vb);
2145 break;
2146 case 0x27:
2147 /* CMPGLE */
2148 gen_helper_cmpgle(vc, cpu_env, va, vb);
2149 break;
2150 case 0x2C:
2151 /* CVTGF */
2152 REQUIRE_REG_31(ra);
2153 gen_helper_cvtgf(vc, cpu_env, vb);
2154 break;
2155 case 0x2D:
2156 /* CVTGD -- TODO */
2157 REQUIRE_REG_31(ra);
2158 goto invalid_opc;
2159 case 0x2F:
2160 /* CVTGQ */
2161 REQUIRE_REG_31(ra);
2162 gen_helper_cvtgq(vc, cpu_env, vb);
2163 break;
2164 case 0x3C:
2165 /* CVTQF */
2166 REQUIRE_REG_31(ra);
2167 gen_helper_cvtqf(vc, cpu_env, vb);
2168 break;
2169 case 0x3E:
2170 /* CVTQG */
2171 REQUIRE_REG_31(ra);
2172 gen_helper_cvtqg(vc, cpu_env, vb);
2173 break;
2174 default:
2175 goto invalid_opc;
2177 break;
2179 case 0x16:
2180 /* IEEE floating-point */
2181 switch (fpfn) { /* fn11 & 0x3F */
2182 case 0x00:
2183 /* ADDS */
2184 gen_adds(ctx, ra, rb, rc, fn11);
2185 break;
2186 case 0x01:
2187 /* SUBS */
2188 gen_subs(ctx, ra, rb, rc, fn11);
2189 break;
2190 case 0x02:
2191 /* MULS */
2192 gen_muls(ctx, ra, rb, rc, fn11);
2193 break;
2194 case 0x03:
2195 /* DIVS */
2196 gen_divs(ctx, ra, rb, rc, fn11);
2197 break;
2198 case 0x20:
2199 /* ADDT */
2200 gen_addt(ctx, ra, rb, rc, fn11);
2201 break;
2202 case 0x21:
2203 /* SUBT */
2204 gen_subt(ctx, ra, rb, rc, fn11);
2205 break;
2206 case 0x22:
2207 /* MULT */
2208 gen_mult(ctx, ra, rb, rc, fn11);
2209 break;
2210 case 0x23:
2211 /* DIVT */
2212 gen_divt(ctx, ra, rb, rc, fn11);
2213 break;
2214 case 0x24:
2215 /* CMPTUN */
2216 gen_cmptun(ctx, ra, rb, rc, fn11);
2217 break;
2218 case 0x25:
2219 /* CMPTEQ */
2220 gen_cmpteq(ctx, ra, rb, rc, fn11);
2221 break;
2222 case 0x26:
2223 /* CMPTLT */
2224 gen_cmptlt(ctx, ra, rb, rc, fn11);
2225 break;
2226 case 0x27:
2227 /* CMPTLE */
2228 gen_cmptle(ctx, ra, rb, rc, fn11);
2229 break;
2230 case 0x2C:
2231 REQUIRE_REG_31(ra);
2232 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2233 /* CVTST */
2234 gen_cvtst(ctx, rb, rc, fn11);
2235 } else {
2236 /* CVTTS */
2237 gen_cvtts(ctx, rb, rc, fn11);
2239 break;
2240 case 0x2F:
2241 /* CVTTQ */
2242 REQUIRE_REG_31(ra);
2243 gen_cvttq(ctx, rb, rc, fn11);
2244 break;
2245 case 0x3C:
2246 /* CVTQS */
2247 REQUIRE_REG_31(ra);
2248 gen_cvtqs(ctx, rb, rc, fn11);
2249 break;
2250 case 0x3E:
2251 /* CVTQT */
2252 REQUIRE_REG_31(ra);
2253 gen_cvtqt(ctx, rb, rc, fn11);
2254 break;
2255 default:
2256 goto invalid_opc;
2258 break;
2260 case 0x17:
2261 switch (fn11) {
2262 case 0x010:
2263 /* CVTLQ */
2264 REQUIRE_REG_31(ra);
2265 vc = dest_fpr(ctx, rc);
2266 vb = load_fpr(ctx, rb);
2267 gen_cvtlq(vc, vb);
2268 break;
2269 case 0x020:
2270 /* CPYS */
2271 if (rc == 31) {
2272 /* Special case CPYS as FNOP. */
2273 } else {
2274 vc = dest_fpr(ctx, rc);
2275 va = load_fpr(ctx, ra);
2276 if (ra == rb) {
2277 /* Special case CPYS as FMOV. */
2278 tcg_gen_mov_i64(vc, va);
2279 } else {
2280 vb = load_fpr(ctx, rb);
2281 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2284 break;
2285 case 0x021:
2286 /* CPYSN */
2287 vc = dest_fpr(ctx, rc);
2288 vb = load_fpr(ctx, rb);
2289 va = load_fpr(ctx, ra);
2290 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2291 break;
2292 case 0x022:
2293 /* CPYSE */
2294 vc = dest_fpr(ctx, rc);
2295 vb = load_fpr(ctx, rb);
2296 va = load_fpr(ctx, ra);
2297 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2298 break;
2299 case 0x024:
2300 /* MT_FPCR */
2301 va = load_fpr(ctx, ra);
2302 gen_helper_store_fpcr(cpu_env, va);
2303 if (ctx->tb_rm == QUAL_RM_D) {
2304 /* Re-do the copy of the rounding mode to fp_status
2305 the next time we use dynamic rounding. */
2306 ctx->tb_rm = -1;
2308 break;
2309 case 0x025:
2310 /* MF_FPCR */
2311 va = dest_fpr(ctx, ra);
2312 gen_helper_load_fpcr(va, cpu_env);
2313 break;
2314 case 0x02A:
2315 /* FCMOVEQ */
2316 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2317 break;
2318 case 0x02B:
2319 /* FCMOVNE */
2320 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2321 break;
2322 case 0x02C:
2323 /* FCMOVLT */
2324 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2325 break;
2326 case 0x02D:
2327 /* FCMOVGE */
2328 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2329 break;
2330 case 0x02E:
2331 /* FCMOVLE */
2332 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2333 break;
2334 case 0x02F:
2335 /* FCMOVGT */
2336 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2337 break;
2338 case 0x030: /* CVTQL */
2339 case 0x130: /* CVTQL/V */
2340 case 0x530: /* CVTQL/SV */
2341 REQUIRE_REG_31(ra);
2342 vc = dest_fpr(ctx, rc);
2343 vb = load_fpr(ctx, rb);
2344 gen_helper_cvtql(vc, cpu_env, vb);
2345 gen_fp_exc_raise(rc, fn11);
2346 break;
2347 default:
2348 goto invalid_opc;
2350 break;
2352 case 0x18:
2353 switch ((uint16_t)disp16) {
2354 case 0x0000:
2355 /* TRAPB */
2356 /* No-op. */
2357 break;
2358 case 0x0400:
2359 /* EXCB */
2360 /* No-op. */
2361 break;
2362 case 0x4000:
2363 /* MB */
2364 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2365 break;
2366 case 0x4400:
2367 /* WMB */
2368 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2369 break;
2370 case 0x8000:
2371 /* FETCH */
2372 /* No-op */
2373 break;
2374 case 0xA000:
2375 /* FETCH_M */
2376 /* No-op */
2377 break;
2378 case 0xC000:
2379 /* RPCC */
2380 va = dest_gpr(ctx, ra);
2381 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2382 gen_io_start();
2383 gen_helper_load_pcc(va, cpu_env);
2384 gen_io_end();
2385 ret = EXIT_PC_STALE;
2386 } else {
2387 gen_helper_load_pcc(va, cpu_env);
2389 break;
2390 case 0xE000:
2391 /* RC */
2392 gen_rx(ctx, ra, 0);
2393 break;
2394 case 0xE800:
2395 /* ECB */
2396 break;
2397 case 0xF000:
2398 /* RS */
2399 gen_rx(ctx, ra, 1);
2400 break;
2401 case 0xF800:
2402 /* WH64 */
2403 /* No-op */
2404 break;
2405 case 0xFC00:
2406 /* WH64EN */
2407 /* No-op */
2408 break;
2409 default:
2410 goto invalid_opc;
2412 break;
2414 case 0x19:
2415 /* HW_MFPR (PALcode) */
2416 #ifndef CONFIG_USER_ONLY
2417 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2418 va = dest_gpr(ctx, ra);
2419 ret = gen_mfpr(ctx, va, insn & 0xffff);
2420 break;
2421 #else
2422 goto invalid_opc;
2423 #endif
2425 case 0x1A:
2426 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2427 prediction stack action, which of course we don't implement. */
2428 vb = load_gpr(ctx, rb);
2429 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2430 if (ra != 31) {
2431 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
2433 ret = EXIT_PC_UPDATED;
2434 break;
2436 case 0x1B:
2437 /* HW_LD (PALcode) */
2438 #ifndef CONFIG_USER_ONLY
2439 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2441 TCGv addr = tcg_temp_new();
2442 vb = load_gpr(ctx, rb);
2443 va = dest_gpr(ctx, ra);
2445 tcg_gen_addi_i64(addr, vb, disp12);
2446 switch ((insn >> 12) & 0xF) {
2447 case 0x0:
2448 /* Longword physical access (hw_ldl/p) */
2449 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2450 break;
2451 case 0x1:
2452 /* Quadword physical access (hw_ldq/p) */
2453 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2454 break;
2455 case 0x2:
2456 /* Longword physical access with lock (hw_ldl_l/p) */
2457 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2458 break;
2459 case 0x3:
2460 /* Quadword physical access with lock (hw_ldq_l/p) */
2461 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2462 break;
2463 case 0x4:
2464 /* Longword virtual PTE fetch (hw_ldl/v) */
2465 goto invalid_opc;
2466 case 0x5:
2467 /* Quadword virtual PTE fetch (hw_ldq/v) */
2468 goto invalid_opc;
2469 break;
2470 case 0x6:
2471 /* Invalid */
2472 goto invalid_opc;
2473 case 0x7:
2474 /* Invaliid */
2475 goto invalid_opc;
2476 case 0x8:
2477 /* Longword virtual access (hw_ldl) */
2478 goto invalid_opc;
2479 case 0x9:
2480 /* Quadword virtual access (hw_ldq) */
2481 goto invalid_opc;
2482 case 0xA:
2483 /* Longword virtual access with protection check (hw_ldl/w) */
2484 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2485 break;
2486 case 0xB:
2487 /* Quadword virtual access with protection check (hw_ldq/w) */
2488 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2489 break;
2490 case 0xC:
2491 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2492 goto invalid_opc;
2493 case 0xD:
2494 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2495 goto invalid_opc;
2496 case 0xE:
2497 /* Longword virtual access with alternate access mode and
2498 protection checks (hw_ldl/wa) */
2499 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2500 break;
2501 case 0xF:
2502 /* Quadword virtual access with alternate access mode and
2503 protection checks (hw_ldq/wa) */
2504 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2505 break;
2507 tcg_temp_free(addr);
2508 break;
2510 #else
2511 goto invalid_opc;
2512 #endif
2514 case 0x1C:
2515 vc = dest_gpr(ctx, rc);
2516 if (fn7 == 0x70) {
2517 /* FTOIT */
2518 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2519 REQUIRE_REG_31(rb);
2520 va = load_fpr(ctx, ra);
2521 tcg_gen_mov_i64(vc, va);
2522 break;
2523 } else if (fn7 == 0x78) {
2524 /* FTOIS */
2525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2526 REQUIRE_REG_31(rb);
2527 t32 = tcg_temp_new_i32();
2528 va = load_fpr(ctx, ra);
2529 gen_helper_s_to_memory(t32, va);
2530 tcg_gen_ext_i32_i64(vc, t32);
2531 tcg_temp_free_i32(t32);
2532 break;
2535 vb = load_gpr_lit(ctx, rb, lit, islit);
2536 switch (fn7) {
2537 case 0x00:
2538 /* SEXTB */
2539 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2540 REQUIRE_REG_31(ra);
2541 tcg_gen_ext8s_i64(vc, vb);
2542 break;
2543 case 0x01:
2544 /* SEXTW */
2545 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2546 REQUIRE_REG_31(ra);
2547 tcg_gen_ext16s_i64(vc, vb);
2548 break;
2549 case 0x30:
2550 /* CTPOP */
2551 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2552 REQUIRE_REG_31(ra);
2553 REQUIRE_NO_LIT;
2554 tcg_gen_ctpop_i64(vc, vb);
2555 break;
2556 case 0x31:
2557 /* PERR */
2558 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2559 REQUIRE_NO_LIT;
2560 va = load_gpr(ctx, ra);
2561 gen_helper_perr(vc, va, vb);
2562 break;
2563 case 0x32:
2564 /* CTLZ */
2565 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2566 REQUIRE_REG_31(ra);
2567 REQUIRE_NO_LIT;
2568 tcg_gen_clzi_i64(vc, vb, 64);
2569 break;
2570 case 0x33:
2571 /* CTTZ */
2572 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2573 REQUIRE_REG_31(ra);
2574 REQUIRE_NO_LIT;
2575 tcg_gen_ctzi_i64(vc, vb, 64);
2576 break;
2577 case 0x34:
2578 /* UNPKBW */
2579 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2580 REQUIRE_REG_31(ra);
2581 REQUIRE_NO_LIT;
2582 gen_helper_unpkbw(vc, vb);
2583 break;
2584 case 0x35:
2585 /* UNPKBL */
2586 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2587 REQUIRE_REG_31(ra);
2588 REQUIRE_NO_LIT;
2589 gen_helper_unpkbl(vc, vb);
2590 break;
2591 case 0x36:
2592 /* PKWB */
2593 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2594 REQUIRE_REG_31(ra);
2595 REQUIRE_NO_LIT;
2596 gen_helper_pkwb(vc, vb);
2597 break;
2598 case 0x37:
2599 /* PKLB */
2600 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2601 REQUIRE_REG_31(ra);
2602 REQUIRE_NO_LIT;
2603 gen_helper_pklb(vc, vb);
2604 break;
2605 case 0x38:
2606 /* MINSB8 */
2607 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2608 va = load_gpr(ctx, ra);
2609 gen_helper_minsb8(vc, va, vb);
2610 break;
2611 case 0x39:
2612 /* MINSW4 */
2613 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2614 va = load_gpr(ctx, ra);
2615 gen_helper_minsw4(vc, va, vb);
2616 break;
2617 case 0x3A:
2618 /* MINUB8 */
2619 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2620 va = load_gpr(ctx, ra);
2621 gen_helper_minub8(vc, va, vb);
2622 break;
2623 case 0x3B:
2624 /* MINUW4 */
2625 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2626 va = load_gpr(ctx, ra);
2627 gen_helper_minuw4(vc, va, vb);
2628 break;
2629 case 0x3C:
2630 /* MAXUB8 */
2631 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2632 va = load_gpr(ctx, ra);
2633 gen_helper_maxub8(vc, va, vb);
2634 break;
2635 case 0x3D:
2636 /* MAXUW4 */
2637 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2638 va = load_gpr(ctx, ra);
2639 gen_helper_maxuw4(vc, va, vb);
2640 break;
2641 case 0x3E:
2642 /* MAXSB8 */
2643 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2644 va = load_gpr(ctx, ra);
2645 gen_helper_maxsb8(vc, va, vb);
2646 break;
2647 case 0x3F:
2648 /* MAXSW4 */
2649 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2650 va = load_gpr(ctx, ra);
2651 gen_helper_maxsw4(vc, va, vb);
2652 break;
2653 default:
2654 goto invalid_opc;
2656 break;
2658 case 0x1D:
2659 /* HW_MTPR (PALcode) */
2660 #ifndef CONFIG_USER_ONLY
2661 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2662 vb = load_gpr(ctx, rb);
2663 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2664 break;
2665 #else
2666 goto invalid_opc;
2667 #endif
2669 case 0x1E:
2670 /* HW_RET (PALcode) */
2671 #ifndef CONFIG_USER_ONLY
2672 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2673 if (rb == 31) {
2674 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2675 address from EXC_ADDR. This turns out to be useful for our
2676 emulation PALcode, so continue to accept it. */
2677 ctx->lit = vb = tcg_temp_new();
2678 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2679 } else {
2680 vb = load_gpr(ctx, rb);
2682 tmp = tcg_temp_new();
2683 tcg_gen_movi_i64(tmp, 0);
2684 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
2685 tcg_gen_movi_i64(cpu_lock_addr, -1);
2686 tcg_gen_andi_i64(tmp, vb, 1);
2687 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
2688 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2689 ret = EXIT_PC_UPDATED;
2690 break;
2691 #else
2692 goto invalid_opc;
2693 #endif
2695 case 0x1F:
2696 /* HW_ST (PALcode) */
2697 #ifndef CONFIG_USER_ONLY
2698 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2700 switch ((insn >> 12) & 0xF) {
2701 case 0x0:
2702 /* Longword physical access */
2703 va = load_gpr(ctx, ra);
2704 vb = load_gpr(ctx, rb);
2705 tmp = tcg_temp_new();
2706 tcg_gen_addi_i64(tmp, vb, disp12);
2707 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2708 tcg_temp_free(tmp);
2709 break;
2710 case 0x1:
2711 /* Quadword physical access */
2712 va = load_gpr(ctx, ra);
2713 vb = load_gpr(ctx, rb);
2714 tmp = tcg_temp_new();
2715 tcg_gen_addi_i64(tmp, vb, disp12);
2716 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2717 tcg_temp_free(tmp);
2718 break;
2719 case 0x2:
2720 /* Longword physical access with lock */
2721 ret = gen_store_conditional(ctx, ra, rb, disp12,
2722 MMU_PHYS_IDX, MO_LESL);
2723 break;
2724 case 0x3:
2725 /* Quadword physical access with lock */
2726 ret = gen_store_conditional(ctx, ra, rb, disp12,
2727 MMU_PHYS_IDX, MO_LEQ);
2728 break;
2729 case 0x4:
2730 /* Longword virtual access */
2731 goto invalid_opc;
2732 case 0x5:
2733 /* Quadword virtual access */
2734 goto invalid_opc;
2735 case 0x6:
2736 /* Invalid */
2737 goto invalid_opc;
2738 case 0x7:
2739 /* Invalid */
2740 goto invalid_opc;
2741 case 0x8:
2742 /* Invalid */
2743 goto invalid_opc;
2744 case 0x9:
2745 /* Invalid */
2746 goto invalid_opc;
2747 case 0xA:
2748 /* Invalid */
2749 goto invalid_opc;
2750 case 0xB:
2751 /* Invalid */
2752 goto invalid_opc;
2753 case 0xC:
2754 /* Longword virtual access with alternate access mode */
2755 goto invalid_opc;
2756 case 0xD:
2757 /* Quadword virtual access with alternate access mode */
2758 goto invalid_opc;
2759 case 0xE:
2760 /* Invalid */
2761 goto invalid_opc;
2762 case 0xF:
2763 /* Invalid */
2764 goto invalid_opc;
2766 break;
2768 #else
2769 goto invalid_opc;
2770 #endif
2771 case 0x20:
2772 /* LDF */
2773 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2774 break;
2775 case 0x21:
2776 /* LDG */
2777 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2778 break;
2779 case 0x22:
2780 /* LDS */
2781 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2782 break;
2783 case 0x23:
2784 /* LDT */
2785 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2786 break;
2787 case 0x24:
2788 /* STF */
2789 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2790 break;
2791 case 0x25:
2792 /* STG */
2793 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2794 break;
2795 case 0x26:
2796 /* STS */
2797 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2798 break;
2799 case 0x27:
2800 /* STT */
2801 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2802 break;
2803 case 0x28:
2804 /* LDL */
2805 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2806 break;
2807 case 0x29:
2808 /* LDQ */
2809 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2810 break;
2811 case 0x2A:
2812 /* LDL_L */
2813 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2814 break;
2815 case 0x2B:
2816 /* LDQ_L */
2817 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2818 break;
2819 case 0x2C:
2820 /* STL */
2821 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2822 break;
2823 case 0x2D:
2824 /* STQ */
2825 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2826 break;
2827 case 0x2E:
2828 /* STL_C */
2829 ret = gen_store_conditional(ctx, ra, rb, disp16,
2830 ctx->mem_idx, MO_LESL);
2831 break;
2832 case 0x2F:
2833 /* STQ_C */
2834 ret = gen_store_conditional(ctx, ra, rb, disp16,
2835 ctx->mem_idx, MO_LEQ);
2836 break;
2837 case 0x30:
2838 /* BR */
2839 ret = gen_bdirect(ctx, ra, disp21);
2840 break;
2841 case 0x31: /* FBEQ */
2842 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2843 break;
2844 case 0x32: /* FBLT */
2845 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2846 break;
2847 case 0x33: /* FBLE */
2848 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2849 break;
2850 case 0x34:
2851 /* BSR */
2852 ret = gen_bdirect(ctx, ra, disp21);
2853 break;
2854 case 0x35: /* FBNE */
2855 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2856 break;
2857 case 0x36: /* FBGE */
2858 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2859 break;
2860 case 0x37: /* FBGT */
2861 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2862 break;
2863 case 0x38:
2864 /* BLBC */
2865 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2866 break;
2867 case 0x39:
2868 /* BEQ */
2869 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2870 break;
2871 case 0x3A:
2872 /* BLT */
2873 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2874 break;
2875 case 0x3B:
2876 /* BLE */
2877 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2878 break;
2879 case 0x3C:
2880 /* BLBS */
2881 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2882 break;
2883 case 0x3D:
2884 /* BNE */
2885 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2886 break;
2887 case 0x3E:
2888 /* BGE */
2889 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2890 break;
2891 case 0x3F:
2892 /* BGT */
2893 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2894 break;
2895 invalid_opc:
2896 ret = gen_invalid(ctx);
2897 break;
2900 return ret;
2903 void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
2905 AlphaCPU *cpu = alpha_env_get_cpu(env);
2906 CPUState *cs = CPU(cpu);
2907 DisasContext ctx, *ctxp = &ctx;
2908 target_ulong pc_start;
2909 target_ulong pc_mask;
2910 uint32_t insn;
2911 ExitStatus ret;
2912 int num_insns;
2913 int max_insns;
2915 pc_start = tb->pc;
2917 ctx.tb = tb;
2918 ctx.pc = pc_start;
2919 ctx.mem_idx = cpu_mmu_index(env, false);
2920 ctx.implver = env->implver;
2921 ctx.singlestep_enabled = cs->singlestep_enabled;
2923 #ifdef CONFIG_USER_ONLY
2924 ctx.ir = cpu_std_ir;
2925 #else
2926 ctx.palbr = env->palbr;
2927 ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2928 #endif
2930 /* ??? Every TB begins with unset rounding mode, to be initialized on
2931 the first fp insn of the TB. Alternately we could define a proper
2932 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2933 to reset the FP_STATUS to that default at the end of any TB that
2934 changes the default. We could even (gasp) dynamiclly figure out
2935 what default would be most efficient given the running program. */
2936 ctx.tb_rm = -1;
2937 /* Similarly for flush-to-zero. */
2938 ctx.tb_ftz = -1;
2940 TCGV_UNUSED_I64(ctx.zero);
2941 TCGV_UNUSED_I64(ctx.sink);
2942 TCGV_UNUSED_I64(ctx.lit);
2944 num_insns = 0;
2945 max_insns = tb->cflags & CF_COUNT_MASK;
2946 if (max_insns == 0) {
2947 max_insns = CF_COUNT_MASK;
2949 if (max_insns > TCG_MAX_INSNS) {
2950 max_insns = TCG_MAX_INSNS;
2953 if (in_superpage(&ctx, pc_start)) {
2954 pc_mask = (1ULL << 41) - 1;
2955 } else {
2956 pc_mask = ~TARGET_PAGE_MASK;
2959 gen_tb_start(tb);
2960 do {
2961 tcg_gen_insn_start(ctx.pc);
2962 num_insns++;
2964 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2965 ret = gen_excp(&ctx, EXCP_DEBUG, 0);
2966 /* The address covered by the breakpoint must be included in
2967 [tb->pc, tb->pc + tb->size) in order to for it to be
2968 properly cleared -- thus we increment the PC here so that
2969 the logic setting tb->size below does the right thing. */
2970 ctx.pc += 4;
2971 break;
2973 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2974 gen_io_start();
2976 insn = cpu_ldl_code(env, ctx.pc);
2978 ctx.pc += 4;
2979 ret = translate_one(ctxp, insn);
2980 free_context_temps(ctxp);
2982 /* If we reach a page boundary, are single stepping,
2983 or exhaust instruction count, stop generation. */
2984 if (ret == NO_EXIT
2985 && ((ctx.pc & pc_mask) == 0
2986 || tcg_op_buf_full()
2987 || num_insns >= max_insns
2988 || singlestep
2989 || ctx.singlestep_enabled)) {
2990 ret = EXIT_FALLTHRU;
2992 } while (ret == NO_EXIT);
2994 if (tb->cflags & CF_LAST_IO) {
2995 gen_io_end();
2998 switch (ret) {
2999 case EXIT_GOTO_TB:
3000 case EXIT_NORETURN:
3001 break;
3002 case EXIT_FALLTHRU:
3003 if (use_goto_tb(&ctx, ctx.pc)) {
3004 tcg_gen_goto_tb(0);
3005 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3006 tcg_gen_exit_tb((uintptr_t)ctx.tb);
3008 /* FALLTHRU */
3009 case EXIT_PC_STALE:
3010 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3011 /* FALLTHRU */
3012 case EXIT_PC_UPDATED:
3013 if (ctx.singlestep_enabled) {
3014 gen_excp_1(EXCP_DEBUG, 0);
3015 } else {
3016 tcg_gen_exit_tb(0);
3018 break;
3019 default:
3020 g_assert_not_reached();
3023 gen_tb_end(tb, num_insns);
3025 tb->size = ctx.pc - pc_start;
3026 tb->icount = num_insns;
3028 #ifdef DEBUG_DISAS
3029 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3030 && qemu_log_in_addr_range(pc_start)) {
3031 qemu_log_lock();
3032 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3033 log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
3034 qemu_log("\n");
3035 qemu_log_unlock();
3037 #endif
3040 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3041 target_ulong *data)
3043 env->pc = data[0];