qxl: call qemu_spice_display_init_common for secondary devices
[qemu/ar7.git] / target / alpha / translate.c
blobf465752208e06bfadce64a8de689d993cdec6dca
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
33 #include "exec/log.h"
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
39 #ifdef ALPHA_DEBUG_DISAS
40 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 # define LOG_DISAS(...) do { } while (0)
43 #endif
45 typedef struct DisasContext DisasContext;
46 struct DisasContext {
47 struct TranslationBlock *tb;
48 uint64_t pc;
49 #ifndef CONFIG_USER_ONLY
50 uint64_t palbr;
51 #endif
52 uint32_t tbflags;
53 int mem_idx;
55 /* implver and amask values for this CPU. */
56 int implver;
57 int amask;
59 /* Current rounding mode for this TB. */
60 int tb_rm;
61 /* Current flush-to-zero setting for this TB. */
62 int tb_ftz;
64 /* The set of registers active in the current context. */
65 TCGv *ir;
67 /* Temporaries for $31 and $f31 as source and destination. */
68 TCGv zero;
69 TCGv sink;
70 /* Temporary for immediate constants. */
71 TCGv lit;
73 bool singlestep_enabled;
76 /* Return values from translate_one, indicating the state of the TB.
77 Note that zero indicates that we are not exiting the TB. */
79 typedef enum {
80 NO_EXIT,
82 /* We have emitted one or more goto_tb. No fixup required. */
83 EXIT_GOTO_TB,
85 /* We are not using a goto_tb (for whatever reason), but have updated
86 the PC (for whatever reason), so there's no need to do it again on
87 exiting the TB. */
88 EXIT_PC_UPDATED,
89 EXIT_PC_UPDATED_NOCHAIN,
91 /* We are exiting the TB, but have neither emitted a goto_tb, nor
92 updated the PC for the next instruction to be executed. */
93 EXIT_PC_STALE,
95 /* We are exiting the TB due to page crossing or space constraints. */
96 EXIT_FALLTHRU,
98 /* We are ending the TB with a noreturn function call, e.g. longjmp.
99 No following code will be executed. */
100 EXIT_NORETURN,
101 } ExitStatus;
103 /* global register indexes */
104 static TCGv_env cpu_env;
105 static TCGv cpu_std_ir[31];
106 static TCGv cpu_fir[31];
107 static TCGv cpu_pc;
108 static TCGv cpu_lock_addr;
109 static TCGv cpu_lock_value;
111 #ifndef CONFIG_USER_ONLY
112 static TCGv cpu_pal_ir[31];
113 #endif
115 #include "exec/gen-icount.h"
117 void alpha_translate_init(void)
119 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
121 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
122 static const GlobalVar vars[] = {
123 DEF_VAR(pc),
124 DEF_VAR(lock_addr),
125 DEF_VAR(lock_value),
128 #undef DEF_VAR
130 /* Use the symbolic register names that match the disassembler. */
131 static const char greg_names[31][4] = {
132 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
133 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
134 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
135 "t10", "t11", "ra", "t12", "at", "gp", "sp"
137 static const char freg_names[31][4] = {
138 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
139 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
140 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
141 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
143 #ifndef CONFIG_USER_ONLY
144 static const char shadow_names[8][8] = {
145 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
146 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
148 #endif
150 static bool done_init = 0;
151 int i;
153 if (done_init) {
154 return;
156 done_init = 1;
158 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
159 tcg_ctx.tcg_env = cpu_env;
161 for (i = 0; i < 31; i++) {
162 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
163 offsetof(CPUAlphaState, ir[i]),
164 greg_names[i]);
167 for (i = 0; i < 31; i++) {
168 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
169 offsetof(CPUAlphaState, fir[i]),
170 freg_names[i]);
173 #ifndef CONFIG_USER_ONLY
174 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
175 for (i = 0; i < 8; i++) {
176 int r = (i == 7 ? 25 : i + 8);
177 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
178 offsetof(CPUAlphaState,
179 shadow[i]),
180 shadow_names[i]);
182 #endif
184 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
185 const GlobalVar *v = &vars[i];
186 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
190 static TCGv load_zero(DisasContext *ctx)
192 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
193 ctx->zero = tcg_const_i64(0);
195 return ctx->zero;
198 static TCGv dest_sink(DisasContext *ctx)
200 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
201 ctx->sink = tcg_temp_new();
203 return ctx->sink;
206 static void free_context_temps(DisasContext *ctx)
208 if (!TCGV_IS_UNUSED_I64(ctx->sink)) {
209 tcg_gen_discard_i64(ctx->sink);
210 tcg_temp_free(ctx->sink);
211 TCGV_UNUSED_I64(ctx->sink);
213 if (!TCGV_IS_UNUSED_I64(ctx->zero)) {
214 tcg_temp_free(ctx->zero);
215 TCGV_UNUSED_I64(ctx->zero);
217 if (!TCGV_IS_UNUSED_I64(ctx->lit)) {
218 tcg_temp_free(ctx->lit);
219 TCGV_UNUSED_I64(ctx->lit);
223 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
225 if (likely(reg < 31)) {
226 return ctx->ir[reg];
227 } else {
228 return load_zero(ctx);
232 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
233 uint8_t lit, bool islit)
235 if (islit) {
236 ctx->lit = tcg_const_i64(lit);
237 return ctx->lit;
238 } else if (likely(reg < 31)) {
239 return ctx->ir[reg];
240 } else {
241 return load_zero(ctx);
245 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
247 if (likely(reg < 31)) {
248 return ctx->ir[reg];
249 } else {
250 return dest_sink(ctx);
254 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
256 if (likely(reg < 31)) {
257 return cpu_fir[reg];
258 } else {
259 return load_zero(ctx);
263 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
265 if (likely(reg < 31)) {
266 return cpu_fir[reg];
267 } else {
268 return dest_sink(ctx);
272 static int get_flag_ofs(unsigned shift)
274 int ofs = offsetof(CPUAlphaState, flags);
275 #ifdef HOST_WORDS_BIGENDIAN
276 ofs += 3 - (shift / 8);
277 #else
278 ofs += shift / 8;
279 #endif
280 return ofs;
283 static void ld_flag_byte(TCGv val, unsigned shift)
285 tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
288 static void st_flag_byte(TCGv val, unsigned shift)
290 tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
293 static void gen_excp_1(int exception, int error_code)
295 TCGv_i32 tmp1, tmp2;
297 tmp1 = tcg_const_i32(exception);
298 tmp2 = tcg_const_i32(error_code);
299 gen_helper_excp(cpu_env, tmp1, tmp2);
300 tcg_temp_free_i32(tmp2);
301 tcg_temp_free_i32(tmp1);
304 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
306 tcg_gen_movi_i64(cpu_pc, ctx->pc);
307 gen_excp_1(exception, error_code);
308 return EXIT_NORETURN;
311 static inline ExitStatus gen_invalid(DisasContext *ctx)
313 return gen_excp(ctx, EXCP_OPCDEC, 0);
316 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
318 TCGv_i32 tmp32 = tcg_temp_new_i32();
319 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
320 gen_helper_memory_to_f(t0, tmp32);
321 tcg_temp_free_i32(tmp32);
324 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
326 TCGv tmp = tcg_temp_new();
327 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
328 gen_helper_memory_to_g(t0, tmp);
329 tcg_temp_free(tmp);
332 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
334 TCGv_i32 tmp32 = tcg_temp_new_i32();
335 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
336 gen_helper_memory_to_s(t0, tmp32);
337 tcg_temp_free_i32(tmp32);
340 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
342 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
343 tcg_gen_mov_i64(cpu_lock_addr, t1);
344 tcg_gen_mov_i64(cpu_lock_value, t0);
347 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
349 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
350 tcg_gen_mov_i64(cpu_lock_addr, t1);
351 tcg_gen_mov_i64(cpu_lock_value, t0);
354 static inline void gen_load_mem(DisasContext *ctx,
355 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
356 int flags),
357 int ra, int rb, int32_t disp16, bool fp,
358 bool clear)
360 TCGv tmp, addr, va;
362 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
363 prefetches, which we can treat as nops. No worries about
364 missed exceptions here. */
365 if (unlikely(ra == 31)) {
366 return;
369 tmp = tcg_temp_new();
370 addr = load_gpr(ctx, rb);
372 if (disp16) {
373 tcg_gen_addi_i64(tmp, addr, disp16);
374 addr = tmp;
376 if (clear) {
377 tcg_gen_andi_i64(tmp, addr, ~0x7);
378 addr = tmp;
381 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
382 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
384 tcg_temp_free(tmp);
387 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
389 TCGv_i32 tmp32 = tcg_temp_new_i32();
390 gen_helper_f_to_memory(tmp32, t0);
391 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
392 tcg_temp_free_i32(tmp32);
395 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
397 TCGv tmp = tcg_temp_new();
398 gen_helper_g_to_memory(tmp, t0);
399 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
400 tcg_temp_free(tmp);
403 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
405 TCGv_i32 tmp32 = tcg_temp_new_i32();
406 gen_helper_s_to_memory(tmp32, t0);
407 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
408 tcg_temp_free_i32(tmp32);
411 static inline void gen_store_mem(DisasContext *ctx,
412 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
413 int flags),
414 int ra, int rb, int32_t disp16, bool fp,
415 bool clear)
417 TCGv tmp, addr, va;
419 tmp = tcg_temp_new();
420 addr = load_gpr(ctx, rb);
422 if (disp16) {
423 tcg_gen_addi_i64(tmp, addr, disp16);
424 addr = tmp;
426 if (clear) {
427 tcg_gen_andi_i64(tmp, addr, ~0x7);
428 addr = tmp;
431 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
432 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
434 tcg_temp_free(tmp);
437 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
438 int32_t disp16, int mem_idx,
439 TCGMemOp op)
441 TCGLabel *lab_fail, *lab_done;
442 TCGv addr, val;
444 addr = tcg_temp_new_i64();
445 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
446 free_context_temps(ctx);
448 lab_fail = gen_new_label();
449 lab_done = gen_new_label();
450 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
451 tcg_temp_free_i64(addr);
453 val = tcg_temp_new_i64();
454 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
455 load_gpr(ctx, ra), mem_idx, op);
456 free_context_temps(ctx);
458 if (ra != 31) {
459 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
461 tcg_temp_free_i64(val);
462 tcg_gen_br(lab_done);
464 gen_set_label(lab_fail);
465 if (ra != 31) {
466 tcg_gen_movi_i64(ctx->ir[ra], 0);
469 gen_set_label(lab_done);
470 tcg_gen_movi_i64(cpu_lock_addr, -1);
471 return NO_EXIT;
474 static bool in_superpage(DisasContext *ctx, int64_t addr)
476 #ifndef CONFIG_USER_ONLY
477 return ((ctx->tbflags & ENV_FLAG_PS_USER) == 0
478 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
479 && ((addr >> 41) & 3) == 2);
480 #else
481 return false;
482 #endif
485 static bool use_exit_tb(DisasContext *ctx)
487 return ((ctx->tb->cflags & CF_LAST_IO)
488 || ctx->singlestep_enabled
489 || singlestep);
492 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
494 /* Suppress goto_tb in the case of single-steping and IO. */
495 if (unlikely(use_exit_tb(ctx))) {
496 return false;
498 #ifndef CONFIG_USER_ONLY
499 /* If the destination is in the superpage, the page perms can't change. */
500 if (in_superpage(ctx, dest)) {
501 return true;
503 /* Check for the dest on the same page as the start of the TB. */
504 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
505 #else
506 return true;
507 #endif
510 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
512 uint64_t dest = ctx->pc + (disp << 2);
514 if (ra != 31) {
515 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
518 /* Notice branch-to-next; used to initialize RA with the PC. */
519 if (disp == 0) {
520 return 0;
521 } else if (use_goto_tb(ctx, dest)) {
522 tcg_gen_goto_tb(0);
523 tcg_gen_movi_i64(cpu_pc, dest);
524 tcg_gen_exit_tb((uintptr_t)ctx->tb);
525 return EXIT_GOTO_TB;
526 } else {
527 tcg_gen_movi_i64(cpu_pc, dest);
528 return EXIT_PC_UPDATED;
532 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
533 TCGv cmp, int32_t disp)
535 uint64_t dest = ctx->pc + (disp << 2);
536 TCGLabel *lab_true = gen_new_label();
538 if (use_goto_tb(ctx, dest)) {
539 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
541 tcg_gen_goto_tb(0);
542 tcg_gen_movi_i64(cpu_pc, ctx->pc);
543 tcg_gen_exit_tb((uintptr_t)ctx->tb);
545 gen_set_label(lab_true);
546 tcg_gen_goto_tb(1);
547 tcg_gen_movi_i64(cpu_pc, dest);
548 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
550 return EXIT_GOTO_TB;
551 } else {
552 TCGv_i64 z = tcg_const_i64(0);
553 TCGv_i64 d = tcg_const_i64(dest);
554 TCGv_i64 p = tcg_const_i64(ctx->pc);
556 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
558 tcg_temp_free_i64(z);
559 tcg_temp_free_i64(d);
560 tcg_temp_free_i64(p);
561 return EXIT_PC_UPDATED;
565 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
566 int32_t disp, int mask)
568 if (mask) {
569 TCGv tmp = tcg_temp_new();
570 ExitStatus ret;
572 tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
573 ret = gen_bcond_internal(ctx, cond, tmp, disp);
574 tcg_temp_free(tmp);
575 return ret;
577 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
580 /* Fold -0.0 for comparison with COND. */
582 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
584 uint64_t mzero = 1ull << 63;
586 switch (cond) {
587 case TCG_COND_LE:
588 case TCG_COND_GT:
589 /* For <= or >, the -0.0 value directly compares the way we want. */
590 tcg_gen_mov_i64(dest, src);
591 break;
593 case TCG_COND_EQ:
594 case TCG_COND_NE:
595 /* For == or !=, we can simply mask off the sign bit and compare. */
596 tcg_gen_andi_i64(dest, src, mzero - 1);
597 break;
599 case TCG_COND_GE:
600 case TCG_COND_LT:
601 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
602 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
603 tcg_gen_neg_i64(dest, dest);
604 tcg_gen_and_i64(dest, dest, src);
605 break;
607 default:
608 abort();
612 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
613 int32_t disp)
615 TCGv cmp_tmp = tcg_temp_new();
616 ExitStatus ret;
618 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
619 ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
620 tcg_temp_free(cmp_tmp);
621 return ret;
624 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
626 TCGv_i64 va, vb, z;
628 z = load_zero(ctx);
629 vb = load_fpr(ctx, rb);
630 va = tcg_temp_new();
631 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
633 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
635 tcg_temp_free(va);
638 #define QUAL_RM_N 0x080 /* Round mode nearest even */
639 #define QUAL_RM_C 0x000 /* Round mode chopped */
640 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
641 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
642 #define QUAL_RM_MASK 0x0c0
644 #define QUAL_U 0x100 /* Underflow enable (fp output) */
645 #define QUAL_V 0x100 /* Overflow enable (int output) */
646 #define QUAL_S 0x400 /* Software completion enable */
647 #define QUAL_I 0x200 /* Inexact detection enable */
649 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
651 TCGv_i32 tmp;
653 fn11 &= QUAL_RM_MASK;
654 if (fn11 == ctx->tb_rm) {
655 return;
657 ctx->tb_rm = fn11;
659 tmp = tcg_temp_new_i32();
660 switch (fn11) {
661 case QUAL_RM_N:
662 tcg_gen_movi_i32(tmp, float_round_nearest_even);
663 break;
664 case QUAL_RM_C:
665 tcg_gen_movi_i32(tmp, float_round_to_zero);
666 break;
667 case QUAL_RM_M:
668 tcg_gen_movi_i32(tmp, float_round_down);
669 break;
670 case QUAL_RM_D:
671 tcg_gen_ld8u_i32(tmp, cpu_env,
672 offsetof(CPUAlphaState, fpcr_dyn_round));
673 break;
676 #if defined(CONFIG_SOFTFLOAT_INLINE)
677 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
678 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
679 sets the one field. */
680 tcg_gen_st8_i32(tmp, cpu_env,
681 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
682 #else
683 gen_helper_setroundmode(tmp);
684 #endif
686 tcg_temp_free_i32(tmp);
689 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
691 TCGv_i32 tmp;
693 fn11 &= QUAL_U;
694 if (fn11 == ctx->tb_ftz) {
695 return;
697 ctx->tb_ftz = fn11;
699 tmp = tcg_temp_new_i32();
700 if (fn11) {
701 /* Underflow is enabled, use the FPCR setting. */
702 tcg_gen_ld8u_i32(tmp, cpu_env,
703 offsetof(CPUAlphaState, fpcr_flush_to_zero));
704 } else {
705 /* Underflow is disabled, force flush-to-zero. */
706 tcg_gen_movi_i32(tmp, 1);
709 #if defined(CONFIG_SOFTFLOAT_INLINE)
710 tcg_gen_st8_i32(tmp, cpu_env,
711 offsetof(CPUAlphaState, fp_status.flush_to_zero));
712 #else
713 gen_helper_setflushzero(tmp);
714 #endif
716 tcg_temp_free_i32(tmp);
719 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
721 TCGv val;
723 if (unlikely(reg == 31)) {
724 val = load_zero(ctx);
725 } else {
726 val = cpu_fir[reg];
727 if ((fn11 & QUAL_S) == 0) {
728 if (is_cmp) {
729 gen_helper_ieee_input_cmp(cpu_env, val);
730 } else {
731 gen_helper_ieee_input(cpu_env, val);
733 } else {
734 #ifndef CONFIG_USER_ONLY
735 /* In system mode, raise exceptions for denormals like real
736 hardware. In user mode, proceed as if the OS completion
737 handler is handling the denormal as per spec. */
738 gen_helper_ieee_input_s(cpu_env, val);
739 #endif
742 return val;
745 static void gen_fp_exc_raise(int rc, int fn11)
747 /* ??? We ought to be able to do something with imprecise exceptions.
748 E.g. notice we're still in the trap shadow of something within the
749 TB and do not generate the code to signal the exception; end the TB
750 when an exception is forced to arrive, either by consumption of a
751 register value or TRAPB or EXCB. */
752 TCGv_i32 reg, ign;
753 uint32_t ignore = 0;
755 if (!(fn11 & QUAL_U)) {
756 /* Note that QUAL_U == QUAL_V, so ignore either. */
757 ignore |= FPCR_UNF | FPCR_IOV;
759 if (!(fn11 & QUAL_I)) {
760 ignore |= FPCR_INE;
762 ign = tcg_const_i32(ignore);
764 /* ??? Pass in the regno of the destination so that the helper can
765 set EXC_MASK, which contains a bitmask of destination registers
766 that have caused arithmetic traps. A simple userspace emulation
767 does not require this. We do need it for a guest kernel's entArith,
768 or if we were to do something clever with imprecise exceptions. */
769 reg = tcg_const_i32(rc + 32);
770 if (fn11 & QUAL_S) {
771 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
772 } else {
773 gen_helper_fp_exc_raise(cpu_env, ign, reg);
776 tcg_temp_free_i32(reg);
777 tcg_temp_free_i32(ign);
780 static void gen_cvtlq(TCGv vc, TCGv vb)
782 TCGv tmp = tcg_temp_new();
784 /* The arithmetic right shift here, plus the sign-extended mask below
785 yields a sign-extended result without an explicit ext32s_i64. */
786 tcg_gen_shri_i64(tmp, vb, 29);
787 tcg_gen_sari_i64(vc, vb, 32);
788 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
790 tcg_temp_free(tmp);
793 static void gen_ieee_arith2(DisasContext *ctx,
794 void (*helper)(TCGv, TCGv_ptr, TCGv),
795 int rb, int rc, int fn11)
797 TCGv vb;
799 gen_qual_roundmode(ctx, fn11);
800 gen_qual_flushzero(ctx, fn11);
802 vb = gen_ieee_input(ctx, rb, fn11, 0);
803 helper(dest_fpr(ctx, rc), cpu_env, vb);
805 gen_fp_exc_raise(rc, fn11);
808 #define IEEE_ARITH2(name) \
809 static inline void glue(gen_, name)(DisasContext *ctx, \
810 int rb, int rc, int fn11) \
812 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
814 IEEE_ARITH2(sqrts)
815 IEEE_ARITH2(sqrtt)
816 IEEE_ARITH2(cvtst)
817 IEEE_ARITH2(cvtts)
819 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
821 TCGv vb, vc;
823 /* No need to set flushzero, since we have an integer output. */
824 vb = gen_ieee_input(ctx, rb, fn11, 0);
825 vc = dest_fpr(ctx, rc);
827 /* Almost all integer conversions use cropped rounding;
828 special case that. */
829 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
830 gen_helper_cvttq_c(vc, cpu_env, vb);
831 } else {
832 gen_qual_roundmode(ctx, fn11);
833 gen_helper_cvttq(vc, cpu_env, vb);
835 gen_fp_exc_raise(rc, fn11);
838 static void gen_ieee_intcvt(DisasContext *ctx,
839 void (*helper)(TCGv, TCGv_ptr, TCGv),
840 int rb, int rc, int fn11)
842 TCGv vb, vc;
844 gen_qual_roundmode(ctx, fn11);
845 vb = load_fpr(ctx, rb);
846 vc = dest_fpr(ctx, rc);
848 /* The only exception that can be raised by integer conversion
849 is inexact. Thus we only need to worry about exceptions when
850 inexact handling is requested. */
851 if (fn11 & QUAL_I) {
852 helper(vc, cpu_env, vb);
853 gen_fp_exc_raise(rc, fn11);
854 } else {
855 helper(vc, cpu_env, vb);
859 #define IEEE_INTCVT(name) \
860 static inline void glue(gen_, name)(DisasContext *ctx, \
861 int rb, int rc, int fn11) \
863 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
865 IEEE_INTCVT(cvtqs)
866 IEEE_INTCVT(cvtqt)
868 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
870 TCGv vmask = tcg_const_i64(mask);
871 TCGv tmp = tcg_temp_new_i64();
873 if (inv_a) {
874 tcg_gen_andc_i64(tmp, vmask, va);
875 } else {
876 tcg_gen_and_i64(tmp, va, vmask);
879 tcg_gen_andc_i64(vc, vb, vmask);
880 tcg_gen_or_i64(vc, vc, tmp);
882 tcg_temp_free(vmask);
883 tcg_temp_free(tmp);
886 static void gen_ieee_arith3(DisasContext *ctx,
887 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
888 int ra, int rb, int rc, int fn11)
890 TCGv va, vb, vc;
892 gen_qual_roundmode(ctx, fn11);
893 gen_qual_flushzero(ctx, fn11);
895 va = gen_ieee_input(ctx, ra, fn11, 0);
896 vb = gen_ieee_input(ctx, rb, fn11, 0);
897 vc = dest_fpr(ctx, rc);
898 helper(vc, cpu_env, va, vb);
900 gen_fp_exc_raise(rc, fn11);
903 #define IEEE_ARITH3(name) \
904 static inline void glue(gen_, name)(DisasContext *ctx, \
905 int ra, int rb, int rc, int fn11) \
907 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
909 IEEE_ARITH3(adds)
910 IEEE_ARITH3(subs)
911 IEEE_ARITH3(muls)
912 IEEE_ARITH3(divs)
913 IEEE_ARITH3(addt)
914 IEEE_ARITH3(subt)
915 IEEE_ARITH3(mult)
916 IEEE_ARITH3(divt)
918 static void gen_ieee_compare(DisasContext *ctx,
919 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
920 int ra, int rb, int rc, int fn11)
922 TCGv va, vb, vc;
924 va = gen_ieee_input(ctx, ra, fn11, 1);
925 vb = gen_ieee_input(ctx, rb, fn11, 1);
926 vc = dest_fpr(ctx, rc);
927 helper(vc, cpu_env, va, vb);
929 gen_fp_exc_raise(rc, fn11);
932 #define IEEE_CMP3(name) \
933 static inline void glue(gen_, name)(DisasContext *ctx, \
934 int ra, int rb, int rc, int fn11) \
936 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
938 IEEE_CMP3(cmptun)
939 IEEE_CMP3(cmpteq)
940 IEEE_CMP3(cmptlt)
941 IEEE_CMP3(cmptle)
943 static inline uint64_t zapnot_mask(uint8_t lit)
945 uint64_t mask = 0;
946 int i;
948 for (i = 0; i < 8; ++i) {
949 if ((lit >> i) & 1) {
950 mask |= 0xffull << (i * 8);
953 return mask;
956 /* Implement zapnot with an immediate operand, which expands to some
957 form of immediate AND. This is a basic building block in the
958 definition of many of the other byte manipulation instructions. */
959 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
961 switch (lit) {
962 case 0x00:
963 tcg_gen_movi_i64(dest, 0);
964 break;
965 case 0x01:
966 tcg_gen_ext8u_i64(dest, src);
967 break;
968 case 0x03:
969 tcg_gen_ext16u_i64(dest, src);
970 break;
971 case 0x0f:
972 tcg_gen_ext32u_i64(dest, src);
973 break;
974 case 0xff:
975 tcg_gen_mov_i64(dest, src);
976 break;
977 default:
978 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
979 break;
983 /* EXTWH, EXTLH, EXTQH */
984 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
985 uint8_t lit, uint8_t byte_mask)
987 if (islit) {
988 int pos = (64 - lit * 8) & 0x3f;
989 int len = cto32(byte_mask) * 8;
990 if (pos < len) {
991 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
992 } else {
993 tcg_gen_movi_i64(vc, 0);
995 } else {
996 TCGv tmp = tcg_temp_new();
997 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
998 tcg_gen_neg_i64(tmp, tmp);
999 tcg_gen_andi_i64(tmp, tmp, 0x3f);
1000 tcg_gen_shl_i64(vc, va, tmp);
1001 tcg_temp_free(tmp);
1003 gen_zapnoti(vc, vc, byte_mask);
1006 /* EXTBL, EXTWL, EXTLL, EXTQL */
1007 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1008 uint8_t lit, uint8_t byte_mask)
1010 if (islit) {
1011 int pos = (lit & 7) * 8;
1012 int len = cto32(byte_mask) * 8;
1013 if (pos + len >= 64) {
1014 len = 64 - pos;
1016 tcg_gen_extract_i64(vc, va, pos, len);
1017 } else {
1018 TCGv tmp = tcg_temp_new();
1019 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
1020 tcg_gen_shli_i64(tmp, tmp, 3);
1021 tcg_gen_shr_i64(vc, va, tmp);
1022 tcg_temp_free(tmp);
1023 gen_zapnoti(vc, vc, byte_mask);
1027 /* INSWH, INSLH, INSQH */
1028 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1029 uint8_t lit, uint8_t byte_mask)
1031 if (islit) {
1032 int pos = 64 - (lit & 7) * 8;
1033 int len = cto32(byte_mask) * 8;
1034 if (pos < len) {
1035 tcg_gen_extract_i64(vc, va, pos, len - pos);
1036 } else {
1037 tcg_gen_movi_i64(vc, 0);
1039 } else {
1040 TCGv tmp = tcg_temp_new();
1041 TCGv shift = tcg_temp_new();
1043 /* The instruction description has us left-shift the byte mask
1044 and extract bits <15:8> and apply that zap at the end. This
1045 is equivalent to simply performing the zap first and shifting
1046 afterward. */
1047 gen_zapnoti(tmp, va, byte_mask);
1049 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1050 portably by splitting the shift into two parts: shift_count-1 and 1.
1051 Arrange for the -1 by using ones-complement instead of
1052 twos-complement in the negation: ~(B * 8) & 63. */
1054 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1055 tcg_gen_not_i64(shift, shift);
1056 tcg_gen_andi_i64(shift, shift, 0x3f);
1058 tcg_gen_shr_i64(vc, tmp, shift);
1059 tcg_gen_shri_i64(vc, vc, 1);
1060 tcg_temp_free(shift);
1061 tcg_temp_free(tmp);
1065 /* INSBL, INSWL, INSLL, INSQL */
1066 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1067 uint8_t lit, uint8_t byte_mask)
1069 if (islit) {
1070 int pos = (lit & 7) * 8;
1071 int len = cto32(byte_mask) * 8;
1072 if (pos + len > 64) {
1073 len = 64 - pos;
1075 tcg_gen_deposit_z_i64(vc, va, pos, len);
1076 } else {
1077 TCGv tmp = tcg_temp_new();
1078 TCGv shift = tcg_temp_new();
1080 /* The instruction description has us left-shift the byte mask
1081 and extract bits <15:8> and apply that zap at the end. This
1082 is equivalent to simply performing the zap first and shifting
1083 afterward. */
1084 gen_zapnoti(tmp, va, byte_mask);
1086 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1087 tcg_gen_shli_i64(shift, shift, 3);
1088 tcg_gen_shl_i64(vc, tmp, shift);
1089 tcg_temp_free(shift);
1090 tcg_temp_free(tmp);
1094 /* MSKWH, MSKLH, MSKQH */
1095 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1096 uint8_t lit, uint8_t byte_mask)
1098 if (islit) {
1099 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1100 } else {
1101 TCGv shift = tcg_temp_new();
1102 TCGv mask = tcg_temp_new();
1104 /* The instruction description is as above, where the byte_mask
1105 is shifted left, and then we extract bits <15:8>. This can be
1106 emulated with a right-shift on the expanded byte mask. This
1107 requires extra care because for an input <2:0> == 0 we need a
1108 shift of 64 bits in order to generate a zero. This is done by
1109 splitting the shift into two parts, the variable shift - 1
1110 followed by a constant 1 shift. The code we expand below is
1111 equivalent to ~(B * 8) & 63. */
1113 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1114 tcg_gen_not_i64(shift, shift);
1115 tcg_gen_andi_i64(shift, shift, 0x3f);
1116 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1117 tcg_gen_shr_i64(mask, mask, shift);
1118 tcg_gen_shri_i64(mask, mask, 1);
1120 tcg_gen_andc_i64(vc, va, mask);
1122 tcg_temp_free(mask);
1123 tcg_temp_free(shift);
1127 /* MSKBL, MSKWL, MSKLL, MSKQL */
1128 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1129 uint8_t lit, uint8_t byte_mask)
1131 if (islit) {
1132 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1133 } else {
1134 TCGv shift = tcg_temp_new();
1135 TCGv mask = tcg_temp_new();
1137 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1138 tcg_gen_shli_i64(shift, shift, 3);
1139 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1140 tcg_gen_shl_i64(mask, mask, shift);
1142 tcg_gen_andc_i64(vc, va, mask);
1144 tcg_temp_free(mask);
1145 tcg_temp_free(shift);
1149 static void gen_rx(DisasContext *ctx, int ra, int set)
1151 TCGv tmp;
1153 if (ra != 31) {
1154 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1157 tmp = tcg_const_i64(set);
1158 st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1159 tcg_temp_free(tmp);
1162 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1164 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1165 to internal cpu registers. */
1167 /* Unprivileged PAL call */
1168 if (palcode >= 0x80 && palcode < 0xC0) {
1169 switch (palcode) {
1170 case 0x86:
1171 /* IMB */
1172 /* No-op inside QEMU. */
1173 break;
1174 case 0x9E:
1175 /* RDUNIQUE */
1176 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1177 offsetof(CPUAlphaState, unique));
1178 break;
1179 case 0x9F:
1180 /* WRUNIQUE */
1181 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1182 offsetof(CPUAlphaState, unique));
1183 break;
1184 default:
1185 palcode &= 0xbf;
1186 goto do_call_pal;
1188 return NO_EXIT;
1191 #ifndef CONFIG_USER_ONLY
1192 /* Privileged PAL code */
1193 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1194 switch (palcode) {
1195 case 0x01:
1196 /* CFLUSH */
1197 /* No-op inside QEMU. */
1198 break;
1199 case 0x02:
1200 /* DRAINA */
1201 /* No-op inside QEMU. */
1202 break;
1203 case 0x2D:
1204 /* WRVPTPTR */
1205 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1206 offsetof(CPUAlphaState, vptptr));
1207 break;
1208 case 0x31:
1209 /* WRVAL */
1210 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1211 offsetof(CPUAlphaState, sysval));
1212 break;
1213 case 0x32:
1214 /* RDVAL */
1215 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1216 offsetof(CPUAlphaState, sysval));
1217 break;
1219 case 0x35:
1220 /* SWPIPL */
1221 /* Note that we already know we're in kernel mode, so we know
1222 that PS only contains the 3 IPL bits. */
1223 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1225 /* But make sure and store only the 3 IPL bits from the user. */
1227 TCGv tmp = tcg_temp_new();
1228 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1229 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1230 tcg_temp_free(tmp);
1233 /* Allow interrupts to be recognized right away. */
1234 tcg_gen_movi_i64(cpu_pc, ctx->pc);
1235 return EXIT_PC_UPDATED_NOCHAIN;
1237 case 0x36:
1238 /* RDPS */
1239 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1240 break;
1242 case 0x38:
1243 /* WRUSP */
1244 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1245 offsetof(CPUAlphaState, usp));
1246 break;
1247 case 0x3A:
1248 /* RDUSP */
1249 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1250 offsetof(CPUAlphaState, usp));
1251 break;
1252 case 0x3C:
1253 /* WHAMI */
1254 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1255 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1256 break;
1258 case 0x3E:
1259 /* WTINT */
1261 TCGv_i32 tmp = tcg_const_i32(1);
1262 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1263 offsetof(CPUState, halted));
1264 tcg_temp_free_i32(tmp);
1266 tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1267 return gen_excp(ctx, EXCP_HALTED, 0);
1269 default:
1270 palcode &= 0x3f;
1271 goto do_call_pal;
1273 return NO_EXIT;
1275 #endif
1276 return gen_invalid(ctx);
1278 do_call_pal:
1279 #ifdef CONFIG_USER_ONLY
1280 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1281 #else
1283 TCGv tmp = tcg_temp_new();
1284 uint64_t exc_addr = ctx->pc;
1285 uint64_t entry = ctx->palbr;
1287 if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1288 exc_addr |= 1;
1289 } else {
1290 tcg_gen_movi_i64(tmp, 1);
1291 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1294 tcg_gen_movi_i64(tmp, exc_addr);
1295 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1296 tcg_temp_free(tmp);
1298 entry += (palcode & 0x80
1299 ? 0x2000 + (palcode - 0x80) * 64
1300 : 0x1000 + palcode * 64);
1302 /* Since the destination is running in PALmode, we don't really
1303 need the page permissions check. We'll see the existence of
1304 the page when we create the TB, and we'll flush all TBs if
1305 we change the PAL base register. */
1306 if (!use_exit_tb(ctx)) {
1307 tcg_gen_goto_tb(0);
1308 tcg_gen_movi_i64(cpu_pc, entry);
1309 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1310 return EXIT_GOTO_TB;
1311 } else {
1312 tcg_gen_movi_i64(cpu_pc, entry);
1313 return EXIT_PC_UPDATED;
1316 #endif
1319 #ifndef CONFIG_USER_ONLY
1321 #define PR_LONG 0x200000
1323 static int cpu_pr_data(int pr)
1325 switch (pr) {
1326 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1327 case 3: return offsetof(CPUAlphaState, trap_arg0);
1328 case 4: return offsetof(CPUAlphaState, trap_arg1);
1329 case 5: return offsetof(CPUAlphaState, trap_arg2);
1330 case 6: return offsetof(CPUAlphaState, exc_addr);
1331 case 7: return offsetof(CPUAlphaState, palbr);
1332 case 8: return offsetof(CPUAlphaState, ptbr);
1333 case 9: return offsetof(CPUAlphaState, vptptr);
1334 case 10: return offsetof(CPUAlphaState, unique);
1335 case 11: return offsetof(CPUAlphaState, sysval);
1336 case 12: return offsetof(CPUAlphaState, usp);
1338 case 40 ... 63:
1339 return offsetof(CPUAlphaState, scratch[pr - 40]);
1341 case 251:
1342 return offsetof(CPUAlphaState, alarm_expire);
1344 return 0;
1347 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1349 void (*helper)(TCGv);
1350 int data;
1352 switch (regno) {
1353 case 32 ... 39:
1354 /* Accessing the "non-shadow" general registers. */
1355 regno = regno == 39 ? 25 : regno - 32 + 8;
1356 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1357 break;
1359 case 250: /* WALLTIME */
1360 helper = gen_helper_get_walltime;
1361 goto do_helper;
1362 case 249: /* VMTIME */
1363 helper = gen_helper_get_vmtime;
1364 do_helper:
1365 if (use_icount) {
1366 gen_io_start();
1367 helper(va);
1368 gen_io_end();
1369 return EXIT_PC_STALE;
1370 } else {
1371 helper(va);
1373 break;
1375 case 0: /* PS */
1376 ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1377 break;
1378 case 1: /* FEN */
1379 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1380 break;
1382 default:
1383 /* The basic registers are data only, and unknown registers
1384 are read-zero, write-ignore. */
1385 data = cpu_pr_data(regno);
1386 if (data == 0) {
1387 tcg_gen_movi_i64(va, 0);
1388 } else if (data & PR_LONG) {
1389 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1390 } else {
1391 tcg_gen_ld_i64(va, cpu_env, data);
1393 break;
1396 return NO_EXIT;
1399 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1401 int data;
1403 switch (regno) {
1404 case 255:
1405 /* TBIA */
1406 gen_helper_tbia(cpu_env);
1407 break;
1409 case 254:
1410 /* TBIS */
1411 gen_helper_tbis(cpu_env, vb);
1412 break;
1414 case 253:
1415 /* WAIT */
1417 TCGv_i32 tmp = tcg_const_i32(1);
1418 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1419 offsetof(CPUState, halted));
1420 tcg_temp_free_i32(tmp);
1422 return gen_excp(ctx, EXCP_HALTED, 0);
1424 case 252:
1425 /* HALT */
1426 gen_helper_halt(vb);
1427 return EXIT_PC_STALE;
1429 case 251:
1430 /* ALARM */
1431 gen_helper_set_alarm(cpu_env, vb);
1432 break;
1434 case 7:
1435 /* PALBR */
1436 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1437 /* Changing the PAL base register implies un-chaining all of the TBs
1438 that ended with a CALL_PAL. Since the base register usually only
1439 changes during boot, flushing everything works well. */
1440 gen_helper_tb_flush(cpu_env);
1441 return EXIT_PC_STALE;
1443 case 32 ... 39:
1444 /* Accessing the "non-shadow" general registers. */
1445 regno = regno == 39 ? 25 : regno - 32 + 8;
1446 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1447 break;
1449 case 0: /* PS */
1450 st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1451 break;
1452 case 1: /* FEN */
1453 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1454 break;
1456 default:
1457 /* The basic registers are data only, and unknown registers
1458 are read-zero, write-ignore. */
1459 data = cpu_pr_data(regno);
1460 if (data != 0) {
1461 if (data & PR_LONG) {
1462 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1463 } else {
1464 tcg_gen_st_i64(vb, cpu_env, data);
1467 break;
1470 return NO_EXIT;
1472 #endif /* !USER_ONLY*/
1474 #define REQUIRE_NO_LIT \
1475 do { \
1476 if (real_islit) { \
1477 goto invalid_opc; \
1479 } while (0)
1481 #define REQUIRE_AMASK(FLAG) \
1482 do { \
1483 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1484 goto invalid_opc; \
1486 } while (0)
1488 #define REQUIRE_TB_FLAG(FLAG) \
1489 do { \
1490 if ((ctx->tbflags & (FLAG)) == 0) { \
1491 goto invalid_opc; \
1493 } while (0)
1495 #define REQUIRE_REG_31(WHICH) \
1496 do { \
1497 if (WHICH != 31) { \
1498 goto invalid_opc; \
1500 } while (0)
1502 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1504 int32_t disp21, disp16, disp12 __attribute__((unused));
1505 uint16_t fn11;
1506 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1507 bool islit, real_islit;
1508 TCGv va, vb, vc, tmp, tmp2;
1509 TCGv_i32 t32;
1510 ExitStatus ret;
1512 /* Decode all instruction fields */
1513 opc = extract32(insn, 26, 6);
1514 ra = extract32(insn, 21, 5);
1515 rb = extract32(insn, 16, 5);
1516 rc = extract32(insn, 0, 5);
1517 real_islit = islit = extract32(insn, 12, 1);
1518 lit = extract32(insn, 13, 8);
1520 disp21 = sextract32(insn, 0, 21);
1521 disp16 = sextract32(insn, 0, 16);
1522 disp12 = sextract32(insn, 0, 12);
1524 fn11 = extract32(insn, 5, 11);
1525 fpfn = extract32(insn, 5, 6);
1526 fn7 = extract32(insn, 5, 7);
1528 if (rb == 31 && !islit) {
1529 islit = true;
1530 lit = 0;
1533 ret = NO_EXIT;
1534 switch (opc) {
1535 case 0x00:
1536 /* CALL_PAL */
1537 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1538 break;
1539 case 0x01:
1540 /* OPC01 */
1541 goto invalid_opc;
1542 case 0x02:
1543 /* OPC02 */
1544 goto invalid_opc;
1545 case 0x03:
1546 /* OPC03 */
1547 goto invalid_opc;
1548 case 0x04:
1549 /* OPC04 */
1550 goto invalid_opc;
1551 case 0x05:
1552 /* OPC05 */
1553 goto invalid_opc;
1554 case 0x06:
1555 /* OPC06 */
1556 goto invalid_opc;
1557 case 0x07:
1558 /* OPC07 */
1559 goto invalid_opc;
1561 case 0x09:
1562 /* LDAH */
1563 disp16 = (uint32_t)disp16 << 16;
1564 /* fall through */
1565 case 0x08:
1566 /* LDA */
1567 va = dest_gpr(ctx, ra);
1568 /* It's worth special-casing immediate loads. */
1569 if (rb == 31) {
1570 tcg_gen_movi_i64(va, disp16);
1571 } else {
1572 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1574 break;
1576 case 0x0A:
1577 /* LDBU */
1578 REQUIRE_AMASK(BWX);
1579 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1580 break;
1581 case 0x0B:
1582 /* LDQ_U */
1583 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1584 break;
1585 case 0x0C:
1586 /* LDWU */
1587 REQUIRE_AMASK(BWX);
1588 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1589 break;
1590 case 0x0D:
1591 /* STW */
1592 REQUIRE_AMASK(BWX);
1593 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1594 break;
1595 case 0x0E:
1596 /* STB */
1597 REQUIRE_AMASK(BWX);
1598 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1599 break;
1600 case 0x0F:
1601 /* STQ_U */
1602 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1603 break;
1605 case 0x10:
1606 vc = dest_gpr(ctx, rc);
1607 vb = load_gpr_lit(ctx, rb, lit, islit);
1609 if (ra == 31) {
1610 if (fn7 == 0x00) {
1611 /* Special case ADDL as SEXTL. */
1612 tcg_gen_ext32s_i64(vc, vb);
1613 break;
1615 if (fn7 == 0x29) {
1616 /* Special case SUBQ as NEGQ. */
1617 tcg_gen_neg_i64(vc, vb);
1618 break;
1622 va = load_gpr(ctx, ra);
1623 switch (fn7) {
1624 case 0x00:
1625 /* ADDL */
1626 tcg_gen_add_i64(vc, va, vb);
1627 tcg_gen_ext32s_i64(vc, vc);
1628 break;
1629 case 0x02:
1630 /* S4ADDL */
1631 tmp = tcg_temp_new();
1632 tcg_gen_shli_i64(tmp, va, 2);
1633 tcg_gen_add_i64(tmp, tmp, vb);
1634 tcg_gen_ext32s_i64(vc, tmp);
1635 tcg_temp_free(tmp);
1636 break;
1637 case 0x09:
1638 /* SUBL */
1639 tcg_gen_sub_i64(vc, va, vb);
1640 tcg_gen_ext32s_i64(vc, vc);
1641 break;
1642 case 0x0B:
1643 /* S4SUBL */
1644 tmp = tcg_temp_new();
1645 tcg_gen_shli_i64(tmp, va, 2);
1646 tcg_gen_sub_i64(tmp, tmp, vb);
1647 tcg_gen_ext32s_i64(vc, tmp);
1648 tcg_temp_free(tmp);
1649 break;
1650 case 0x0F:
1651 /* CMPBGE */
1652 if (ra == 31) {
1653 /* Special case 0 >= X as X == 0. */
1654 gen_helper_cmpbe0(vc, vb);
1655 } else {
1656 gen_helper_cmpbge(vc, va, vb);
1658 break;
1659 case 0x12:
1660 /* S8ADDL */
1661 tmp = tcg_temp_new();
1662 tcg_gen_shli_i64(tmp, va, 3);
1663 tcg_gen_add_i64(tmp, tmp, vb);
1664 tcg_gen_ext32s_i64(vc, tmp);
1665 tcg_temp_free(tmp);
1666 break;
1667 case 0x1B:
1668 /* S8SUBL */
1669 tmp = tcg_temp_new();
1670 tcg_gen_shli_i64(tmp, va, 3);
1671 tcg_gen_sub_i64(tmp, tmp, vb);
1672 tcg_gen_ext32s_i64(vc, tmp);
1673 tcg_temp_free(tmp);
1674 break;
1675 case 0x1D:
1676 /* CMPULT */
1677 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1678 break;
1679 case 0x20:
1680 /* ADDQ */
1681 tcg_gen_add_i64(vc, va, vb);
1682 break;
1683 case 0x22:
1684 /* S4ADDQ */
1685 tmp = tcg_temp_new();
1686 tcg_gen_shli_i64(tmp, va, 2);
1687 tcg_gen_add_i64(vc, tmp, vb);
1688 tcg_temp_free(tmp);
1689 break;
1690 case 0x29:
1691 /* SUBQ */
1692 tcg_gen_sub_i64(vc, va, vb);
1693 break;
1694 case 0x2B:
1695 /* S4SUBQ */
1696 tmp = tcg_temp_new();
1697 tcg_gen_shli_i64(tmp, va, 2);
1698 tcg_gen_sub_i64(vc, tmp, vb);
1699 tcg_temp_free(tmp);
1700 break;
1701 case 0x2D:
1702 /* CMPEQ */
1703 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1704 break;
1705 case 0x32:
1706 /* S8ADDQ */
1707 tmp = tcg_temp_new();
1708 tcg_gen_shli_i64(tmp, va, 3);
1709 tcg_gen_add_i64(vc, tmp, vb);
1710 tcg_temp_free(tmp);
1711 break;
1712 case 0x3B:
1713 /* S8SUBQ */
1714 tmp = tcg_temp_new();
1715 tcg_gen_shli_i64(tmp, va, 3);
1716 tcg_gen_sub_i64(vc, tmp, vb);
1717 tcg_temp_free(tmp);
1718 break;
1719 case 0x3D:
1720 /* CMPULE */
1721 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1722 break;
1723 case 0x40:
1724 /* ADDL/V */
1725 tmp = tcg_temp_new();
1726 tcg_gen_ext32s_i64(tmp, va);
1727 tcg_gen_ext32s_i64(vc, vb);
1728 tcg_gen_add_i64(tmp, tmp, vc);
1729 tcg_gen_ext32s_i64(vc, tmp);
1730 gen_helper_check_overflow(cpu_env, vc, tmp);
1731 tcg_temp_free(tmp);
1732 break;
1733 case 0x49:
1734 /* SUBL/V */
1735 tmp = tcg_temp_new();
1736 tcg_gen_ext32s_i64(tmp, va);
1737 tcg_gen_ext32s_i64(vc, vb);
1738 tcg_gen_sub_i64(tmp, tmp, vc);
1739 tcg_gen_ext32s_i64(vc, tmp);
1740 gen_helper_check_overflow(cpu_env, vc, tmp);
1741 tcg_temp_free(tmp);
1742 break;
1743 case 0x4D:
1744 /* CMPLT */
1745 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1746 break;
1747 case 0x60:
1748 /* ADDQ/V */
1749 tmp = tcg_temp_new();
1750 tmp2 = tcg_temp_new();
1751 tcg_gen_eqv_i64(tmp, va, vb);
1752 tcg_gen_mov_i64(tmp2, va);
1753 tcg_gen_add_i64(vc, va, vb);
1754 tcg_gen_xor_i64(tmp2, tmp2, vc);
1755 tcg_gen_and_i64(tmp, tmp, tmp2);
1756 tcg_gen_shri_i64(tmp, tmp, 63);
1757 tcg_gen_movi_i64(tmp2, 0);
1758 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1759 tcg_temp_free(tmp);
1760 tcg_temp_free(tmp2);
1761 break;
1762 case 0x69:
1763 /* SUBQ/V */
1764 tmp = tcg_temp_new();
1765 tmp2 = tcg_temp_new();
1766 tcg_gen_xor_i64(tmp, va, vb);
1767 tcg_gen_mov_i64(tmp2, va);
1768 tcg_gen_sub_i64(vc, va, vb);
1769 tcg_gen_xor_i64(tmp2, tmp2, vc);
1770 tcg_gen_and_i64(tmp, tmp, tmp2);
1771 tcg_gen_shri_i64(tmp, tmp, 63);
1772 tcg_gen_movi_i64(tmp2, 0);
1773 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1774 tcg_temp_free(tmp);
1775 tcg_temp_free(tmp2);
1776 break;
1777 case 0x6D:
1778 /* CMPLE */
1779 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1780 break;
1781 default:
1782 goto invalid_opc;
1784 break;
1786 case 0x11:
1787 if (fn7 == 0x20) {
1788 if (rc == 31) {
1789 /* Special case BIS as NOP. */
1790 break;
1792 if (ra == 31) {
1793 /* Special case BIS as MOV. */
1794 vc = dest_gpr(ctx, rc);
1795 if (islit) {
1796 tcg_gen_movi_i64(vc, lit);
1797 } else {
1798 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1800 break;
1804 vc = dest_gpr(ctx, rc);
1805 vb = load_gpr_lit(ctx, rb, lit, islit);
1807 if (fn7 == 0x28 && ra == 31) {
1808 /* Special case ORNOT as NOT. */
1809 tcg_gen_not_i64(vc, vb);
1810 break;
1813 va = load_gpr(ctx, ra);
1814 switch (fn7) {
1815 case 0x00:
1816 /* AND */
1817 tcg_gen_and_i64(vc, va, vb);
1818 break;
1819 case 0x08:
1820 /* BIC */
1821 tcg_gen_andc_i64(vc, va, vb);
1822 break;
1823 case 0x14:
1824 /* CMOVLBS */
1825 tmp = tcg_temp_new();
1826 tcg_gen_andi_i64(tmp, va, 1);
1827 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1828 vb, load_gpr(ctx, rc));
1829 tcg_temp_free(tmp);
1830 break;
1831 case 0x16:
1832 /* CMOVLBC */
1833 tmp = tcg_temp_new();
1834 tcg_gen_andi_i64(tmp, va, 1);
1835 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1836 vb, load_gpr(ctx, rc));
1837 tcg_temp_free(tmp);
1838 break;
1839 case 0x20:
1840 /* BIS */
1841 tcg_gen_or_i64(vc, va, vb);
1842 break;
1843 case 0x24:
1844 /* CMOVEQ */
1845 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1846 vb, load_gpr(ctx, rc));
1847 break;
1848 case 0x26:
1849 /* CMOVNE */
1850 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1851 vb, load_gpr(ctx, rc));
1852 break;
1853 case 0x28:
1854 /* ORNOT */
1855 tcg_gen_orc_i64(vc, va, vb);
1856 break;
1857 case 0x40:
1858 /* XOR */
1859 tcg_gen_xor_i64(vc, va, vb);
1860 break;
1861 case 0x44:
1862 /* CMOVLT */
1863 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1864 vb, load_gpr(ctx, rc));
1865 break;
1866 case 0x46:
1867 /* CMOVGE */
1868 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1869 vb, load_gpr(ctx, rc));
1870 break;
1871 case 0x48:
1872 /* EQV */
1873 tcg_gen_eqv_i64(vc, va, vb);
1874 break;
1875 case 0x61:
1876 /* AMASK */
1877 REQUIRE_REG_31(ra);
1878 tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1879 break;
1880 case 0x64:
1881 /* CMOVLE */
1882 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1883 vb, load_gpr(ctx, rc));
1884 break;
1885 case 0x66:
1886 /* CMOVGT */
1887 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1888 vb, load_gpr(ctx, rc));
1889 break;
1890 case 0x6C:
1891 /* IMPLVER */
1892 REQUIRE_REG_31(ra);
1893 tcg_gen_movi_i64(vc, ctx->implver);
1894 break;
1895 default:
1896 goto invalid_opc;
1898 break;
1900 case 0x12:
1901 vc = dest_gpr(ctx, rc);
1902 va = load_gpr(ctx, ra);
1903 switch (fn7) {
1904 case 0x02:
1905 /* MSKBL */
1906 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1907 break;
1908 case 0x06:
1909 /* EXTBL */
1910 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1911 break;
1912 case 0x0B:
1913 /* INSBL */
1914 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1915 break;
1916 case 0x12:
1917 /* MSKWL */
1918 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1919 break;
1920 case 0x16:
1921 /* EXTWL */
1922 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1923 break;
1924 case 0x1B:
1925 /* INSWL */
1926 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1927 break;
1928 case 0x22:
1929 /* MSKLL */
1930 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1931 break;
1932 case 0x26:
1933 /* EXTLL */
1934 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1935 break;
1936 case 0x2B:
1937 /* INSLL */
1938 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1939 break;
1940 case 0x30:
1941 /* ZAP */
1942 if (islit) {
1943 gen_zapnoti(vc, va, ~lit);
1944 } else {
1945 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1947 break;
1948 case 0x31:
1949 /* ZAPNOT */
1950 if (islit) {
1951 gen_zapnoti(vc, va, lit);
1952 } else {
1953 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1955 break;
1956 case 0x32:
1957 /* MSKQL */
1958 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1959 break;
1960 case 0x34:
1961 /* SRL */
1962 if (islit) {
1963 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1964 } else {
1965 tmp = tcg_temp_new();
1966 vb = load_gpr(ctx, rb);
1967 tcg_gen_andi_i64(tmp, vb, 0x3f);
1968 tcg_gen_shr_i64(vc, va, tmp);
1969 tcg_temp_free(tmp);
1971 break;
1972 case 0x36:
1973 /* EXTQL */
1974 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1975 break;
1976 case 0x39:
1977 /* SLL */
1978 if (islit) {
1979 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1980 } else {
1981 tmp = tcg_temp_new();
1982 vb = load_gpr(ctx, rb);
1983 tcg_gen_andi_i64(tmp, vb, 0x3f);
1984 tcg_gen_shl_i64(vc, va, tmp);
1985 tcg_temp_free(tmp);
1987 break;
1988 case 0x3B:
1989 /* INSQL */
1990 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1991 break;
1992 case 0x3C:
1993 /* SRA */
1994 if (islit) {
1995 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1996 } else {
1997 tmp = tcg_temp_new();
1998 vb = load_gpr(ctx, rb);
1999 tcg_gen_andi_i64(tmp, vb, 0x3f);
2000 tcg_gen_sar_i64(vc, va, tmp);
2001 tcg_temp_free(tmp);
2003 break;
2004 case 0x52:
2005 /* MSKWH */
2006 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
2007 break;
2008 case 0x57:
2009 /* INSWH */
2010 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
2011 break;
2012 case 0x5A:
2013 /* EXTWH */
2014 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
2015 break;
2016 case 0x62:
2017 /* MSKLH */
2018 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
2019 break;
2020 case 0x67:
2021 /* INSLH */
2022 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
2023 break;
2024 case 0x6A:
2025 /* EXTLH */
2026 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
2027 break;
2028 case 0x72:
2029 /* MSKQH */
2030 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
2031 break;
2032 case 0x77:
2033 /* INSQH */
2034 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
2035 break;
2036 case 0x7A:
2037 /* EXTQH */
2038 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
2039 break;
2040 default:
2041 goto invalid_opc;
2043 break;
2045 case 0x13:
2046 vc = dest_gpr(ctx, rc);
2047 vb = load_gpr_lit(ctx, rb, lit, islit);
2048 va = load_gpr(ctx, ra);
2049 switch (fn7) {
2050 case 0x00:
2051 /* MULL */
2052 tcg_gen_mul_i64(vc, va, vb);
2053 tcg_gen_ext32s_i64(vc, vc);
2054 break;
2055 case 0x20:
2056 /* MULQ */
2057 tcg_gen_mul_i64(vc, va, vb);
2058 break;
2059 case 0x30:
2060 /* UMULH */
2061 tmp = tcg_temp_new();
2062 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2063 tcg_temp_free(tmp);
2064 break;
2065 case 0x40:
2066 /* MULL/V */
2067 tmp = tcg_temp_new();
2068 tcg_gen_ext32s_i64(tmp, va);
2069 tcg_gen_ext32s_i64(vc, vb);
2070 tcg_gen_mul_i64(tmp, tmp, vc);
2071 tcg_gen_ext32s_i64(vc, tmp);
2072 gen_helper_check_overflow(cpu_env, vc, tmp);
2073 tcg_temp_free(tmp);
2074 break;
2075 case 0x60:
2076 /* MULQ/V */
2077 tmp = tcg_temp_new();
2078 tmp2 = tcg_temp_new();
2079 tcg_gen_muls2_i64(vc, tmp, va, vb);
2080 tcg_gen_sari_i64(tmp2, vc, 63);
2081 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2082 tcg_temp_free(tmp);
2083 tcg_temp_free(tmp2);
2084 break;
2085 default:
2086 goto invalid_opc;
2088 break;
2090 case 0x14:
2091 REQUIRE_AMASK(FIX);
2092 vc = dest_fpr(ctx, rc);
2093 switch (fpfn) { /* fn11 & 0x3F */
2094 case 0x04:
2095 /* ITOFS */
2096 REQUIRE_REG_31(rb);
2097 t32 = tcg_temp_new_i32();
2098 va = load_gpr(ctx, ra);
2099 tcg_gen_extrl_i64_i32(t32, va);
2100 gen_helper_memory_to_s(vc, t32);
2101 tcg_temp_free_i32(t32);
2102 break;
2103 case 0x0A:
2104 /* SQRTF */
2105 REQUIRE_REG_31(ra);
2106 vb = load_fpr(ctx, rb);
2107 gen_helper_sqrtf(vc, cpu_env, vb);
2108 break;
2109 case 0x0B:
2110 /* SQRTS */
2111 REQUIRE_REG_31(ra);
2112 gen_sqrts(ctx, rb, rc, fn11);
2113 break;
2114 case 0x14:
2115 /* ITOFF */
2116 REQUIRE_REG_31(rb);
2117 t32 = tcg_temp_new_i32();
2118 va = load_gpr(ctx, ra);
2119 tcg_gen_extrl_i64_i32(t32, va);
2120 gen_helper_memory_to_f(vc, t32);
2121 tcg_temp_free_i32(t32);
2122 break;
2123 case 0x24:
2124 /* ITOFT */
2125 REQUIRE_REG_31(rb);
2126 va = load_gpr(ctx, ra);
2127 tcg_gen_mov_i64(vc, va);
2128 break;
2129 case 0x2A:
2130 /* SQRTG */
2131 REQUIRE_REG_31(ra);
2132 vb = load_fpr(ctx, rb);
2133 gen_helper_sqrtg(vc, cpu_env, vb);
2134 break;
2135 case 0x02B:
2136 /* SQRTT */
2137 REQUIRE_REG_31(ra);
2138 gen_sqrtt(ctx, rb, rc, fn11);
2139 break;
2140 default:
2141 goto invalid_opc;
2143 break;
2145 case 0x15:
2146 /* VAX floating point */
2147 /* XXX: rounding mode and trap are ignored (!) */
2148 vc = dest_fpr(ctx, rc);
2149 vb = load_fpr(ctx, rb);
2150 va = load_fpr(ctx, ra);
2151 switch (fpfn) { /* fn11 & 0x3F */
2152 case 0x00:
2153 /* ADDF */
2154 gen_helper_addf(vc, cpu_env, va, vb);
2155 break;
2156 case 0x01:
2157 /* SUBF */
2158 gen_helper_subf(vc, cpu_env, va, vb);
2159 break;
2160 case 0x02:
2161 /* MULF */
2162 gen_helper_mulf(vc, cpu_env, va, vb);
2163 break;
2164 case 0x03:
2165 /* DIVF */
2166 gen_helper_divf(vc, cpu_env, va, vb);
2167 break;
2168 case 0x1E:
2169 /* CVTDG -- TODO */
2170 REQUIRE_REG_31(ra);
2171 goto invalid_opc;
2172 case 0x20:
2173 /* ADDG */
2174 gen_helper_addg(vc, cpu_env, va, vb);
2175 break;
2176 case 0x21:
2177 /* SUBG */
2178 gen_helper_subg(vc, cpu_env, va, vb);
2179 break;
2180 case 0x22:
2181 /* MULG */
2182 gen_helper_mulg(vc, cpu_env, va, vb);
2183 break;
2184 case 0x23:
2185 /* DIVG */
2186 gen_helper_divg(vc, cpu_env, va, vb);
2187 break;
2188 case 0x25:
2189 /* CMPGEQ */
2190 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2191 break;
2192 case 0x26:
2193 /* CMPGLT */
2194 gen_helper_cmpglt(vc, cpu_env, va, vb);
2195 break;
2196 case 0x27:
2197 /* CMPGLE */
2198 gen_helper_cmpgle(vc, cpu_env, va, vb);
2199 break;
2200 case 0x2C:
2201 /* CVTGF */
2202 REQUIRE_REG_31(ra);
2203 gen_helper_cvtgf(vc, cpu_env, vb);
2204 break;
2205 case 0x2D:
2206 /* CVTGD -- TODO */
2207 REQUIRE_REG_31(ra);
2208 goto invalid_opc;
2209 case 0x2F:
2210 /* CVTGQ */
2211 REQUIRE_REG_31(ra);
2212 gen_helper_cvtgq(vc, cpu_env, vb);
2213 break;
2214 case 0x3C:
2215 /* CVTQF */
2216 REQUIRE_REG_31(ra);
2217 gen_helper_cvtqf(vc, cpu_env, vb);
2218 break;
2219 case 0x3E:
2220 /* CVTQG */
2221 REQUIRE_REG_31(ra);
2222 gen_helper_cvtqg(vc, cpu_env, vb);
2223 break;
2224 default:
2225 goto invalid_opc;
2227 break;
2229 case 0x16:
2230 /* IEEE floating-point */
2231 switch (fpfn) { /* fn11 & 0x3F */
2232 case 0x00:
2233 /* ADDS */
2234 gen_adds(ctx, ra, rb, rc, fn11);
2235 break;
2236 case 0x01:
2237 /* SUBS */
2238 gen_subs(ctx, ra, rb, rc, fn11);
2239 break;
2240 case 0x02:
2241 /* MULS */
2242 gen_muls(ctx, ra, rb, rc, fn11);
2243 break;
2244 case 0x03:
2245 /* DIVS */
2246 gen_divs(ctx, ra, rb, rc, fn11);
2247 break;
2248 case 0x20:
2249 /* ADDT */
2250 gen_addt(ctx, ra, rb, rc, fn11);
2251 break;
2252 case 0x21:
2253 /* SUBT */
2254 gen_subt(ctx, ra, rb, rc, fn11);
2255 break;
2256 case 0x22:
2257 /* MULT */
2258 gen_mult(ctx, ra, rb, rc, fn11);
2259 break;
2260 case 0x23:
2261 /* DIVT */
2262 gen_divt(ctx, ra, rb, rc, fn11);
2263 break;
2264 case 0x24:
2265 /* CMPTUN */
2266 gen_cmptun(ctx, ra, rb, rc, fn11);
2267 break;
2268 case 0x25:
2269 /* CMPTEQ */
2270 gen_cmpteq(ctx, ra, rb, rc, fn11);
2271 break;
2272 case 0x26:
2273 /* CMPTLT */
2274 gen_cmptlt(ctx, ra, rb, rc, fn11);
2275 break;
2276 case 0x27:
2277 /* CMPTLE */
2278 gen_cmptle(ctx, ra, rb, rc, fn11);
2279 break;
2280 case 0x2C:
2281 REQUIRE_REG_31(ra);
2282 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2283 /* CVTST */
2284 gen_cvtst(ctx, rb, rc, fn11);
2285 } else {
2286 /* CVTTS */
2287 gen_cvtts(ctx, rb, rc, fn11);
2289 break;
2290 case 0x2F:
2291 /* CVTTQ */
2292 REQUIRE_REG_31(ra);
2293 gen_cvttq(ctx, rb, rc, fn11);
2294 break;
2295 case 0x3C:
2296 /* CVTQS */
2297 REQUIRE_REG_31(ra);
2298 gen_cvtqs(ctx, rb, rc, fn11);
2299 break;
2300 case 0x3E:
2301 /* CVTQT */
2302 REQUIRE_REG_31(ra);
2303 gen_cvtqt(ctx, rb, rc, fn11);
2304 break;
2305 default:
2306 goto invalid_opc;
2308 break;
2310 case 0x17:
2311 switch (fn11) {
2312 case 0x010:
2313 /* CVTLQ */
2314 REQUIRE_REG_31(ra);
2315 vc = dest_fpr(ctx, rc);
2316 vb = load_fpr(ctx, rb);
2317 gen_cvtlq(vc, vb);
2318 break;
2319 case 0x020:
2320 /* CPYS */
2321 if (rc == 31) {
2322 /* Special case CPYS as FNOP. */
2323 } else {
2324 vc = dest_fpr(ctx, rc);
2325 va = load_fpr(ctx, ra);
2326 if (ra == rb) {
2327 /* Special case CPYS as FMOV. */
2328 tcg_gen_mov_i64(vc, va);
2329 } else {
2330 vb = load_fpr(ctx, rb);
2331 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2334 break;
2335 case 0x021:
2336 /* CPYSN */
2337 vc = dest_fpr(ctx, rc);
2338 vb = load_fpr(ctx, rb);
2339 va = load_fpr(ctx, ra);
2340 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2341 break;
2342 case 0x022:
2343 /* CPYSE */
2344 vc = dest_fpr(ctx, rc);
2345 vb = load_fpr(ctx, rb);
2346 va = load_fpr(ctx, ra);
2347 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2348 break;
2349 case 0x024:
2350 /* MT_FPCR */
2351 va = load_fpr(ctx, ra);
2352 gen_helper_store_fpcr(cpu_env, va);
2353 if (ctx->tb_rm == QUAL_RM_D) {
2354 /* Re-do the copy of the rounding mode to fp_status
2355 the next time we use dynamic rounding. */
2356 ctx->tb_rm = -1;
2358 break;
2359 case 0x025:
2360 /* MF_FPCR */
2361 va = dest_fpr(ctx, ra);
2362 gen_helper_load_fpcr(va, cpu_env);
2363 break;
2364 case 0x02A:
2365 /* FCMOVEQ */
2366 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2367 break;
2368 case 0x02B:
2369 /* FCMOVNE */
2370 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2371 break;
2372 case 0x02C:
2373 /* FCMOVLT */
2374 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2375 break;
2376 case 0x02D:
2377 /* FCMOVGE */
2378 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2379 break;
2380 case 0x02E:
2381 /* FCMOVLE */
2382 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2383 break;
2384 case 0x02F:
2385 /* FCMOVGT */
2386 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2387 break;
2388 case 0x030: /* CVTQL */
2389 case 0x130: /* CVTQL/V */
2390 case 0x530: /* CVTQL/SV */
2391 REQUIRE_REG_31(ra);
2392 vc = dest_fpr(ctx, rc);
2393 vb = load_fpr(ctx, rb);
2394 gen_helper_cvtql(vc, cpu_env, vb);
2395 gen_fp_exc_raise(rc, fn11);
2396 break;
2397 default:
2398 goto invalid_opc;
2400 break;
2402 case 0x18:
2403 switch ((uint16_t)disp16) {
2404 case 0x0000:
2405 /* TRAPB */
2406 /* No-op. */
2407 break;
2408 case 0x0400:
2409 /* EXCB */
2410 /* No-op. */
2411 break;
2412 case 0x4000:
2413 /* MB */
2414 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2415 break;
2416 case 0x4400:
2417 /* WMB */
2418 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2419 break;
2420 case 0x8000:
2421 /* FETCH */
2422 /* No-op */
2423 break;
2424 case 0xA000:
2425 /* FETCH_M */
2426 /* No-op */
2427 break;
2428 case 0xC000:
2429 /* RPCC */
2430 va = dest_gpr(ctx, ra);
2431 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2432 gen_io_start();
2433 gen_helper_load_pcc(va, cpu_env);
2434 gen_io_end();
2435 ret = EXIT_PC_STALE;
2436 } else {
2437 gen_helper_load_pcc(va, cpu_env);
2439 break;
2440 case 0xE000:
2441 /* RC */
2442 gen_rx(ctx, ra, 0);
2443 break;
2444 case 0xE800:
2445 /* ECB */
2446 break;
2447 case 0xF000:
2448 /* RS */
2449 gen_rx(ctx, ra, 1);
2450 break;
2451 case 0xF800:
2452 /* WH64 */
2453 /* No-op */
2454 break;
2455 case 0xFC00:
2456 /* WH64EN */
2457 /* No-op */
2458 break;
2459 default:
2460 goto invalid_opc;
2462 break;
2464 case 0x19:
2465 /* HW_MFPR (PALcode) */
2466 #ifndef CONFIG_USER_ONLY
2467 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2468 va = dest_gpr(ctx, ra);
2469 ret = gen_mfpr(ctx, va, insn & 0xffff);
2470 break;
2471 #else
2472 goto invalid_opc;
2473 #endif
2475 case 0x1A:
2476 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2477 prediction stack action, which of course we don't implement. */
2478 vb = load_gpr(ctx, rb);
2479 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2480 if (ra != 31) {
2481 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
2483 ret = EXIT_PC_UPDATED;
2484 break;
2486 case 0x1B:
2487 /* HW_LD (PALcode) */
2488 #ifndef CONFIG_USER_ONLY
2489 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2491 TCGv addr = tcg_temp_new();
2492 vb = load_gpr(ctx, rb);
2493 va = dest_gpr(ctx, ra);
2495 tcg_gen_addi_i64(addr, vb, disp12);
2496 switch ((insn >> 12) & 0xF) {
2497 case 0x0:
2498 /* Longword physical access (hw_ldl/p) */
2499 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2500 break;
2501 case 0x1:
2502 /* Quadword physical access (hw_ldq/p) */
2503 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2504 break;
2505 case 0x2:
2506 /* Longword physical access with lock (hw_ldl_l/p) */
2507 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2508 break;
2509 case 0x3:
2510 /* Quadword physical access with lock (hw_ldq_l/p) */
2511 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2512 break;
2513 case 0x4:
2514 /* Longword virtual PTE fetch (hw_ldl/v) */
2515 goto invalid_opc;
2516 case 0x5:
2517 /* Quadword virtual PTE fetch (hw_ldq/v) */
2518 goto invalid_opc;
2519 break;
2520 case 0x6:
2521 /* Invalid */
2522 goto invalid_opc;
2523 case 0x7:
2524 /* Invaliid */
2525 goto invalid_opc;
2526 case 0x8:
2527 /* Longword virtual access (hw_ldl) */
2528 goto invalid_opc;
2529 case 0x9:
2530 /* Quadword virtual access (hw_ldq) */
2531 goto invalid_opc;
2532 case 0xA:
2533 /* Longword virtual access with protection check (hw_ldl/w) */
2534 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2535 break;
2536 case 0xB:
2537 /* Quadword virtual access with protection check (hw_ldq/w) */
2538 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2539 break;
2540 case 0xC:
2541 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2542 goto invalid_opc;
2543 case 0xD:
2544 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2545 goto invalid_opc;
2546 case 0xE:
2547 /* Longword virtual access with alternate access mode and
2548 protection checks (hw_ldl/wa) */
2549 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2550 break;
2551 case 0xF:
2552 /* Quadword virtual access with alternate access mode and
2553 protection checks (hw_ldq/wa) */
2554 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2555 break;
2557 tcg_temp_free(addr);
2558 break;
2560 #else
2561 goto invalid_opc;
2562 #endif
2564 case 0x1C:
2565 vc = dest_gpr(ctx, rc);
2566 if (fn7 == 0x70) {
2567 /* FTOIT */
2568 REQUIRE_AMASK(FIX);
2569 REQUIRE_REG_31(rb);
2570 va = load_fpr(ctx, ra);
2571 tcg_gen_mov_i64(vc, va);
2572 break;
2573 } else if (fn7 == 0x78) {
2574 /* FTOIS */
2575 REQUIRE_AMASK(FIX);
2576 REQUIRE_REG_31(rb);
2577 t32 = tcg_temp_new_i32();
2578 va = load_fpr(ctx, ra);
2579 gen_helper_s_to_memory(t32, va);
2580 tcg_gen_ext_i32_i64(vc, t32);
2581 tcg_temp_free_i32(t32);
2582 break;
2585 vb = load_gpr_lit(ctx, rb, lit, islit);
2586 switch (fn7) {
2587 case 0x00:
2588 /* SEXTB */
2589 REQUIRE_AMASK(BWX);
2590 REQUIRE_REG_31(ra);
2591 tcg_gen_ext8s_i64(vc, vb);
2592 break;
2593 case 0x01:
2594 /* SEXTW */
2595 REQUIRE_AMASK(BWX);
2596 REQUIRE_REG_31(ra);
2597 tcg_gen_ext16s_i64(vc, vb);
2598 break;
2599 case 0x30:
2600 /* CTPOP */
2601 REQUIRE_AMASK(CIX);
2602 REQUIRE_REG_31(ra);
2603 REQUIRE_NO_LIT;
2604 tcg_gen_ctpop_i64(vc, vb);
2605 break;
2606 case 0x31:
2607 /* PERR */
2608 REQUIRE_AMASK(MVI);
2609 REQUIRE_NO_LIT;
2610 va = load_gpr(ctx, ra);
2611 gen_helper_perr(vc, va, vb);
2612 break;
2613 case 0x32:
2614 /* CTLZ */
2615 REQUIRE_AMASK(CIX);
2616 REQUIRE_REG_31(ra);
2617 REQUIRE_NO_LIT;
2618 tcg_gen_clzi_i64(vc, vb, 64);
2619 break;
2620 case 0x33:
2621 /* CTTZ */
2622 REQUIRE_AMASK(CIX);
2623 REQUIRE_REG_31(ra);
2624 REQUIRE_NO_LIT;
2625 tcg_gen_ctzi_i64(vc, vb, 64);
2626 break;
2627 case 0x34:
2628 /* UNPKBW */
2629 REQUIRE_AMASK(MVI);
2630 REQUIRE_REG_31(ra);
2631 REQUIRE_NO_LIT;
2632 gen_helper_unpkbw(vc, vb);
2633 break;
2634 case 0x35:
2635 /* UNPKBL */
2636 REQUIRE_AMASK(MVI);
2637 REQUIRE_REG_31(ra);
2638 REQUIRE_NO_LIT;
2639 gen_helper_unpkbl(vc, vb);
2640 break;
2641 case 0x36:
2642 /* PKWB */
2643 REQUIRE_AMASK(MVI);
2644 REQUIRE_REG_31(ra);
2645 REQUIRE_NO_LIT;
2646 gen_helper_pkwb(vc, vb);
2647 break;
2648 case 0x37:
2649 /* PKLB */
2650 REQUIRE_AMASK(MVI);
2651 REQUIRE_REG_31(ra);
2652 REQUIRE_NO_LIT;
2653 gen_helper_pklb(vc, vb);
2654 break;
2655 case 0x38:
2656 /* MINSB8 */
2657 REQUIRE_AMASK(MVI);
2658 va = load_gpr(ctx, ra);
2659 gen_helper_minsb8(vc, va, vb);
2660 break;
2661 case 0x39:
2662 /* MINSW4 */
2663 REQUIRE_AMASK(MVI);
2664 va = load_gpr(ctx, ra);
2665 gen_helper_minsw4(vc, va, vb);
2666 break;
2667 case 0x3A:
2668 /* MINUB8 */
2669 REQUIRE_AMASK(MVI);
2670 va = load_gpr(ctx, ra);
2671 gen_helper_minub8(vc, va, vb);
2672 break;
2673 case 0x3B:
2674 /* MINUW4 */
2675 REQUIRE_AMASK(MVI);
2676 va = load_gpr(ctx, ra);
2677 gen_helper_minuw4(vc, va, vb);
2678 break;
2679 case 0x3C:
2680 /* MAXUB8 */
2681 REQUIRE_AMASK(MVI);
2682 va = load_gpr(ctx, ra);
2683 gen_helper_maxub8(vc, va, vb);
2684 break;
2685 case 0x3D:
2686 /* MAXUW4 */
2687 REQUIRE_AMASK(MVI);
2688 va = load_gpr(ctx, ra);
2689 gen_helper_maxuw4(vc, va, vb);
2690 break;
2691 case 0x3E:
2692 /* MAXSB8 */
2693 REQUIRE_AMASK(MVI);
2694 va = load_gpr(ctx, ra);
2695 gen_helper_maxsb8(vc, va, vb);
2696 break;
2697 case 0x3F:
2698 /* MAXSW4 */
2699 REQUIRE_AMASK(MVI);
2700 va = load_gpr(ctx, ra);
2701 gen_helper_maxsw4(vc, va, vb);
2702 break;
2703 default:
2704 goto invalid_opc;
2706 break;
2708 case 0x1D:
2709 /* HW_MTPR (PALcode) */
2710 #ifndef CONFIG_USER_ONLY
2711 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2712 vb = load_gpr(ctx, rb);
2713 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2714 break;
2715 #else
2716 goto invalid_opc;
2717 #endif
2719 case 0x1E:
2720 /* HW_RET (PALcode) */
2721 #ifndef CONFIG_USER_ONLY
2722 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2723 if (rb == 31) {
2724 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2725 address from EXC_ADDR. This turns out to be useful for our
2726 emulation PALcode, so continue to accept it. */
2727 ctx->lit = vb = tcg_temp_new();
2728 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2729 } else {
2730 vb = load_gpr(ctx, rb);
2732 tcg_gen_movi_i64(cpu_lock_addr, -1);
2733 tmp = tcg_temp_new();
2734 tcg_gen_movi_i64(tmp, 0);
2735 st_flag_byte(tmp, ENV_FLAG_RX_SHIFT);
2736 tcg_gen_andi_i64(tmp, vb, 1);
2737 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2738 tcg_temp_free(tmp);
2739 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2740 /* Allow interrupts to be recognized right away. */
2741 ret = EXIT_PC_UPDATED_NOCHAIN;
2742 break;
2743 #else
2744 goto invalid_opc;
2745 #endif
2747 case 0x1F:
2748 /* HW_ST (PALcode) */
2749 #ifndef CONFIG_USER_ONLY
2750 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2752 switch ((insn >> 12) & 0xF) {
2753 case 0x0:
2754 /* Longword physical access */
2755 va = load_gpr(ctx, ra);
2756 vb = load_gpr(ctx, rb);
2757 tmp = tcg_temp_new();
2758 tcg_gen_addi_i64(tmp, vb, disp12);
2759 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2760 tcg_temp_free(tmp);
2761 break;
2762 case 0x1:
2763 /* Quadword physical access */
2764 va = load_gpr(ctx, ra);
2765 vb = load_gpr(ctx, rb);
2766 tmp = tcg_temp_new();
2767 tcg_gen_addi_i64(tmp, vb, disp12);
2768 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2769 tcg_temp_free(tmp);
2770 break;
2771 case 0x2:
2772 /* Longword physical access with lock */
2773 ret = gen_store_conditional(ctx, ra, rb, disp12,
2774 MMU_PHYS_IDX, MO_LESL);
2775 break;
2776 case 0x3:
2777 /* Quadword physical access with lock */
2778 ret = gen_store_conditional(ctx, ra, rb, disp12,
2779 MMU_PHYS_IDX, MO_LEQ);
2780 break;
2781 case 0x4:
2782 /* Longword virtual access */
2783 goto invalid_opc;
2784 case 0x5:
2785 /* Quadword virtual access */
2786 goto invalid_opc;
2787 case 0x6:
2788 /* Invalid */
2789 goto invalid_opc;
2790 case 0x7:
2791 /* Invalid */
2792 goto invalid_opc;
2793 case 0x8:
2794 /* Invalid */
2795 goto invalid_opc;
2796 case 0x9:
2797 /* Invalid */
2798 goto invalid_opc;
2799 case 0xA:
2800 /* Invalid */
2801 goto invalid_opc;
2802 case 0xB:
2803 /* Invalid */
2804 goto invalid_opc;
2805 case 0xC:
2806 /* Longword virtual access with alternate access mode */
2807 goto invalid_opc;
2808 case 0xD:
2809 /* Quadword virtual access with alternate access mode */
2810 goto invalid_opc;
2811 case 0xE:
2812 /* Invalid */
2813 goto invalid_opc;
2814 case 0xF:
2815 /* Invalid */
2816 goto invalid_opc;
2818 break;
2820 #else
2821 goto invalid_opc;
2822 #endif
2823 case 0x20:
2824 /* LDF */
2825 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2826 break;
2827 case 0x21:
2828 /* LDG */
2829 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2830 break;
2831 case 0x22:
2832 /* LDS */
2833 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2834 break;
2835 case 0x23:
2836 /* LDT */
2837 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2838 break;
2839 case 0x24:
2840 /* STF */
2841 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2842 break;
2843 case 0x25:
2844 /* STG */
2845 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2846 break;
2847 case 0x26:
2848 /* STS */
2849 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2850 break;
2851 case 0x27:
2852 /* STT */
2853 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2854 break;
2855 case 0x28:
2856 /* LDL */
2857 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2858 break;
2859 case 0x29:
2860 /* LDQ */
2861 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2862 break;
2863 case 0x2A:
2864 /* LDL_L */
2865 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2866 break;
2867 case 0x2B:
2868 /* LDQ_L */
2869 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2870 break;
2871 case 0x2C:
2872 /* STL */
2873 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2874 break;
2875 case 0x2D:
2876 /* STQ */
2877 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2878 break;
2879 case 0x2E:
2880 /* STL_C */
2881 ret = gen_store_conditional(ctx, ra, rb, disp16,
2882 ctx->mem_idx, MO_LESL);
2883 break;
2884 case 0x2F:
2885 /* STQ_C */
2886 ret = gen_store_conditional(ctx, ra, rb, disp16,
2887 ctx->mem_idx, MO_LEQ);
2888 break;
2889 case 0x30:
2890 /* BR */
2891 ret = gen_bdirect(ctx, ra, disp21);
2892 break;
2893 case 0x31: /* FBEQ */
2894 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2895 break;
2896 case 0x32: /* FBLT */
2897 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2898 break;
2899 case 0x33: /* FBLE */
2900 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2901 break;
2902 case 0x34:
2903 /* BSR */
2904 ret = gen_bdirect(ctx, ra, disp21);
2905 break;
2906 case 0x35: /* FBNE */
2907 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2908 break;
2909 case 0x36: /* FBGE */
2910 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2911 break;
2912 case 0x37: /* FBGT */
2913 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2914 break;
2915 case 0x38:
2916 /* BLBC */
2917 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2918 break;
2919 case 0x39:
2920 /* BEQ */
2921 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2922 break;
2923 case 0x3A:
2924 /* BLT */
2925 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2926 break;
2927 case 0x3B:
2928 /* BLE */
2929 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2930 break;
2931 case 0x3C:
2932 /* BLBS */
2933 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2934 break;
2935 case 0x3D:
2936 /* BNE */
2937 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2938 break;
2939 case 0x3E:
2940 /* BGE */
2941 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2942 break;
2943 case 0x3F:
2944 /* BGT */
2945 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2946 break;
2947 invalid_opc:
2948 ret = gen_invalid(ctx);
2949 break;
2952 return ret;
2955 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
2957 CPUAlphaState *env = cs->env_ptr;
2958 DisasContext ctx, *ctxp = &ctx;
2959 target_ulong pc_start;
2960 target_ulong pc_mask;
2961 uint32_t insn;
2962 ExitStatus ret;
2963 int num_insns;
2964 int max_insns;
2966 pc_start = tb->pc;
2968 ctx.tb = tb;
2969 ctx.pc = pc_start;
2970 ctx.tbflags = tb->flags;
2971 ctx.mem_idx = cpu_mmu_index(env, false);
2972 ctx.implver = env->implver;
2973 ctx.amask = env->amask;
2974 ctx.singlestep_enabled = cs->singlestep_enabled;
2976 #ifdef CONFIG_USER_ONLY
2977 ctx.ir = cpu_std_ir;
2978 #else
2979 ctx.palbr = env->palbr;
2980 ctx.ir = (ctx.tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2981 #endif
2983 /* ??? Every TB begins with unset rounding mode, to be initialized on
2984 the first fp insn of the TB. Alternately we could define a proper
2985 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2986 to reset the FP_STATUS to that default at the end of any TB that
2987 changes the default. We could even (gasp) dynamiclly figure out
2988 what default would be most efficient given the running program. */
2989 ctx.tb_rm = -1;
2990 /* Similarly for flush-to-zero. */
2991 ctx.tb_ftz = -1;
2993 TCGV_UNUSED_I64(ctx.zero);
2994 TCGV_UNUSED_I64(ctx.sink);
2995 TCGV_UNUSED_I64(ctx.lit);
2997 num_insns = 0;
2998 max_insns = tb->cflags & CF_COUNT_MASK;
2999 if (max_insns == 0) {
3000 max_insns = CF_COUNT_MASK;
3002 if (max_insns > TCG_MAX_INSNS) {
3003 max_insns = TCG_MAX_INSNS;
3006 if (in_superpage(&ctx, pc_start)) {
3007 pc_mask = (1ULL << 41) - 1;
3008 } else {
3009 pc_mask = ~TARGET_PAGE_MASK;
3012 gen_tb_start(tb);
3013 tcg_clear_temp_count();
3015 do {
3016 tcg_gen_insn_start(ctx.pc);
3017 num_insns++;
3019 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
3020 ret = gen_excp(&ctx, EXCP_DEBUG, 0);
3021 /* The address covered by the breakpoint must be included in
3022 [tb->pc, tb->pc + tb->size) in order to for it to be
3023 properly cleared -- thus we increment the PC here so that
3024 the logic setting tb->size below does the right thing. */
3025 ctx.pc += 4;
3026 break;
3028 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
3029 gen_io_start();
3031 insn = cpu_ldl_code(env, ctx.pc);
3033 ctx.pc += 4;
3034 ret = translate_one(ctxp, insn);
3035 free_context_temps(ctxp);
3037 if (tcg_check_temp_count()) {
3038 qemu_log("TCG temporary leak before "TARGET_FMT_lx"\n", ctx.pc);
3041 /* If we reach a page boundary, are single stepping,
3042 or exhaust instruction count, stop generation. */
3043 if (ret == NO_EXIT
3044 && ((ctx.pc & pc_mask) == 0
3045 || tcg_op_buf_full()
3046 || num_insns >= max_insns
3047 || singlestep
3048 || ctx.singlestep_enabled)) {
3049 ret = EXIT_FALLTHRU;
3051 } while (ret == NO_EXIT);
3053 if (tb->cflags & CF_LAST_IO) {
3054 gen_io_end();
3057 switch (ret) {
3058 case EXIT_GOTO_TB:
3059 case EXIT_NORETURN:
3060 break;
3061 case EXIT_FALLTHRU:
3062 if (use_goto_tb(&ctx, ctx.pc)) {
3063 tcg_gen_goto_tb(0);
3064 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3065 tcg_gen_exit_tb((uintptr_t)ctx.tb);
3067 /* FALLTHRU */
3068 case EXIT_PC_STALE:
3069 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3070 /* FALLTHRU */
3071 case EXIT_PC_UPDATED:
3072 if (!use_exit_tb(&ctx)) {
3073 tcg_gen_lookup_and_goto_ptr(cpu_pc);
3074 break;
3076 /* FALLTHRU */
3077 case EXIT_PC_UPDATED_NOCHAIN:
3078 if (ctx.singlestep_enabled) {
3079 gen_excp_1(EXCP_DEBUG, 0);
3080 } else {
3081 tcg_gen_exit_tb(0);
3083 break;
3084 default:
3085 g_assert_not_reached();
3088 gen_tb_end(tb, num_insns);
3090 tb->size = ctx.pc - pc_start;
3091 tb->icount = num_insns;
3093 #ifdef DEBUG_DISAS
3094 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
3095 && qemu_log_in_addr_range(pc_start)) {
3096 qemu_log_lock();
3097 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3098 log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
3099 qemu_log("\n");
3100 qemu_log_unlock();
3102 #endif
3105 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3106 target_ulong *data)
3108 env->pc = data[0];