migration: Rename abbreviated macro MIG_STATE_* to MIGRATION_STATUS_*
[qemu/ar7.git] / target-alpha / translate.c
blobefeeb050cc2e6aa937629741b96350e64e547b67
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
23 #include "tcg-op.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "trace-tcg.h"
32 #undef ALPHA_DEBUG_DISAS
33 #define CONFIG_SOFTFLOAT_INLINE
35 #ifdef ALPHA_DEBUG_DISAS
36 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
37 #else
38 # define LOG_DISAS(...) do { } while (0)
39 #endif
41 typedef struct DisasContext DisasContext;
42 struct DisasContext {
43 struct TranslationBlock *tb;
44 uint64_t pc;
45 int mem_idx;
47 /* Current rounding mode for this TB. */
48 int tb_rm;
49 /* Current flush-to-zero setting for this TB. */
50 int tb_ftz;
52 /* implver value for this CPU. */
53 int implver;
55 /* Temporaries for $31 and $f31 as source and destination. */
56 TCGv zero;
57 TCGv sink;
58 /* Temporary for immediate constants. */
59 TCGv lit;
61 bool singlestep_enabled;
64 /* Return values from translate_one, indicating the state of the TB.
65 Note that zero indicates that we are not exiting the TB. */
67 typedef enum {
68 NO_EXIT,
70 /* We have emitted one or more goto_tb. No fixup required. */
71 EXIT_GOTO_TB,
73 /* We are not using a goto_tb (for whatever reason), but have updated
74 the PC (for whatever reason), so there's no need to do it again on
75 exiting the TB. */
76 EXIT_PC_UPDATED,
78 /* We are exiting the TB, but have neither emitted a goto_tb, nor
79 updated the PC for the next instruction to be executed. */
80 EXIT_PC_STALE,
82 /* We are ending the TB with a noreturn function call, e.g. longjmp.
83 No following code will be executed. */
84 EXIT_NORETURN,
85 } ExitStatus;
87 /* global register indexes */
88 static TCGv_ptr cpu_env;
89 static TCGv cpu_ir[31];
90 static TCGv cpu_fir[31];
91 static TCGv cpu_pc;
92 static TCGv cpu_lock_addr;
93 static TCGv cpu_lock_st_addr;
94 static TCGv cpu_lock_value;
96 #include "exec/gen-icount.h"
98 void alpha_translate_init(void)
100 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
102 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
103 static const GlobalVar vars[] = {
104 DEF_VAR(pc),
105 DEF_VAR(lock_addr),
106 DEF_VAR(lock_st_addr),
107 DEF_VAR(lock_value),
110 #undef DEF_VAR
112 /* Use the symbolic register names that match the disassembler. */
113 static const char greg_names[31][4] = {
114 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
115 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
116 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
117 "t10", "t11", "ra", "t12", "at", "gp", "sp"
119 static const char freg_names[31][4] = {
120 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
121 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
122 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
123 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
126 static bool done_init = 0;
127 int i;
129 if (done_init) {
130 return;
132 done_init = 1;
134 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
136 for (i = 0; i < 31; i++) {
137 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
138 offsetof(CPUAlphaState, ir[i]),
139 greg_names[i]);
142 for (i = 0; i < 31; i++) {
143 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
144 offsetof(CPUAlphaState, fir[i]),
145 freg_names[i]);
148 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
149 const GlobalVar *v = &vars[i];
150 *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
154 static TCGv load_zero(DisasContext *ctx)
156 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
157 ctx->zero = tcg_const_i64(0);
159 return ctx->zero;
162 static TCGv dest_sink(DisasContext *ctx)
164 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
165 ctx->sink = tcg_temp_new();
167 return ctx->sink;
170 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
172 if (likely(reg < 31)) {
173 return cpu_ir[reg];
174 } else {
175 return load_zero(ctx);
179 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
180 uint8_t lit, bool islit)
182 if (islit) {
183 ctx->lit = tcg_const_i64(lit);
184 return ctx->lit;
185 } else if (likely(reg < 31)) {
186 return cpu_ir[reg];
187 } else {
188 return load_zero(ctx);
192 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
194 if (likely(reg < 31)) {
195 return cpu_ir[reg];
196 } else {
197 return dest_sink(ctx);
201 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
203 if (likely(reg < 31)) {
204 return cpu_fir[reg];
205 } else {
206 return load_zero(ctx);
210 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
212 if (likely(reg < 31)) {
213 return cpu_fir[reg];
214 } else {
215 return dest_sink(ctx);
219 static void gen_excp_1(int exception, int error_code)
221 TCGv_i32 tmp1, tmp2;
223 tmp1 = tcg_const_i32(exception);
224 tmp2 = tcg_const_i32(error_code);
225 gen_helper_excp(cpu_env, tmp1, tmp2);
226 tcg_temp_free_i32(tmp2);
227 tcg_temp_free_i32(tmp1);
230 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
232 tcg_gen_movi_i64(cpu_pc, ctx->pc);
233 gen_excp_1(exception, error_code);
234 return EXIT_NORETURN;
237 static inline ExitStatus gen_invalid(DisasContext *ctx)
239 return gen_excp(ctx, EXCP_OPCDEC, 0);
242 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
244 TCGv_i32 tmp32 = tcg_temp_new_i32();
245 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
246 gen_helper_memory_to_f(t0, tmp32);
247 tcg_temp_free_i32(tmp32);
250 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
252 TCGv tmp = tcg_temp_new();
253 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
254 gen_helper_memory_to_g(t0, tmp);
255 tcg_temp_free(tmp);
258 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
260 TCGv_i32 tmp32 = tcg_temp_new_i32();
261 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
262 gen_helper_memory_to_s(t0, tmp32);
263 tcg_temp_free_i32(tmp32);
266 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
268 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
269 tcg_gen_mov_i64(cpu_lock_addr, t1);
270 tcg_gen_mov_i64(cpu_lock_value, t0);
273 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
275 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
276 tcg_gen_mov_i64(cpu_lock_addr, t1);
277 tcg_gen_mov_i64(cpu_lock_value, t0);
280 static inline void gen_load_mem(DisasContext *ctx,
281 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
282 int flags),
283 int ra, int rb, int32_t disp16, bool fp,
284 bool clear)
286 TCGv tmp, addr, va;
288 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
289 prefetches, which we can treat as nops. No worries about
290 missed exceptions here. */
291 if (unlikely(ra == 31)) {
292 return;
295 tmp = tcg_temp_new();
296 addr = load_gpr(ctx, rb);
298 if (disp16) {
299 tcg_gen_addi_i64(tmp, addr, disp16);
300 addr = tmp;
302 if (clear) {
303 tcg_gen_andi_i64(tmp, addr, ~0x7);
304 addr = tmp;
307 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
308 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
310 tcg_temp_free(tmp);
313 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
315 TCGv_i32 tmp32 = tcg_temp_new_i32();
316 gen_helper_f_to_memory(tmp32, t0);
317 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
318 tcg_temp_free_i32(tmp32);
321 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
323 TCGv tmp = tcg_temp_new();
324 gen_helper_g_to_memory(tmp, t0);
325 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
326 tcg_temp_free(tmp);
329 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
331 TCGv_i32 tmp32 = tcg_temp_new_i32();
332 gen_helper_s_to_memory(tmp32, t0);
333 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
334 tcg_temp_free_i32(tmp32);
337 static inline void gen_store_mem(DisasContext *ctx,
338 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
339 int flags),
340 int ra, int rb, int32_t disp16, bool fp,
341 bool clear)
343 TCGv tmp, addr, va;
345 tmp = tcg_temp_new();
346 addr = load_gpr(ctx, rb);
348 if (disp16) {
349 tcg_gen_addi_i64(tmp, addr, disp16);
350 addr = tmp;
352 if (clear) {
353 tcg_gen_andi_i64(tmp, addr, ~0x7);
354 addr = tmp;
357 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
358 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
360 tcg_temp_free(tmp);
363 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
364 int32_t disp16, int quad)
366 TCGv addr;
368 if (ra == 31) {
369 /* ??? Don't bother storing anything. The user can't tell
370 the difference, since the zero register always reads zero. */
371 return NO_EXIT;
374 #if defined(CONFIG_USER_ONLY)
375 addr = cpu_lock_st_addr;
376 #else
377 addr = tcg_temp_local_new();
378 #endif
380 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
382 #if defined(CONFIG_USER_ONLY)
383 /* ??? This is handled via a complicated version of compare-and-swap
384 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
385 in TCG so that this isn't necessary. */
386 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
387 #else
388 /* ??? In system mode we are never multi-threaded, so CAS can be
389 implemented via a non-atomic load-compare-store sequence. */
391 TCGLabel *lab_fail, *lab_done;
392 TCGv val;
394 lab_fail = gen_new_label();
395 lab_done = gen_new_label();
396 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
398 val = tcg_temp_new();
399 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
400 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
402 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
403 quad ? MO_LEQ : MO_LEUL);
404 tcg_gen_movi_i64(cpu_ir[ra], 1);
405 tcg_gen_br(lab_done);
407 gen_set_label(lab_fail);
408 tcg_gen_movi_i64(cpu_ir[ra], 0);
410 gen_set_label(lab_done);
411 tcg_gen_movi_i64(cpu_lock_addr, -1);
413 tcg_temp_free(addr);
414 return NO_EXIT;
416 #endif
419 static bool in_superpage(DisasContext *ctx, int64_t addr)
421 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
422 && addr < 0
423 && ((addr >> 41) & 3) == 2
424 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
427 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
429 /* Suppress goto_tb in the case of single-steping and IO. */
430 if ((ctx->tb->cflags & CF_LAST_IO)
431 || ctx->singlestep_enabled || singlestep) {
432 return false;
434 /* If the destination is in the superpage, the page perms can't change. */
435 if (in_superpage(ctx, dest)) {
436 return true;
438 /* Check for the dest on the same page as the start of the TB. */
439 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
442 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
444 uint64_t dest = ctx->pc + (disp << 2);
446 if (ra != 31) {
447 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
450 /* Notice branch-to-next; used to initialize RA with the PC. */
451 if (disp == 0) {
452 return 0;
453 } else if (use_goto_tb(ctx, dest)) {
454 tcg_gen_goto_tb(0);
455 tcg_gen_movi_i64(cpu_pc, dest);
456 tcg_gen_exit_tb((uintptr_t)ctx->tb);
457 return EXIT_GOTO_TB;
458 } else {
459 tcg_gen_movi_i64(cpu_pc, dest);
460 return EXIT_PC_UPDATED;
464 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
465 TCGv cmp, int32_t disp)
467 uint64_t dest = ctx->pc + (disp << 2);
468 TCGLabel *lab_true = gen_new_label();
470 if (use_goto_tb(ctx, dest)) {
471 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
473 tcg_gen_goto_tb(0);
474 tcg_gen_movi_i64(cpu_pc, ctx->pc);
475 tcg_gen_exit_tb((uintptr_t)ctx->tb);
477 gen_set_label(lab_true);
478 tcg_gen_goto_tb(1);
479 tcg_gen_movi_i64(cpu_pc, dest);
480 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
482 return EXIT_GOTO_TB;
483 } else {
484 TCGv_i64 z = tcg_const_i64(0);
485 TCGv_i64 d = tcg_const_i64(dest);
486 TCGv_i64 p = tcg_const_i64(ctx->pc);
488 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
490 tcg_temp_free_i64(z);
491 tcg_temp_free_i64(d);
492 tcg_temp_free_i64(p);
493 return EXIT_PC_UPDATED;
497 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
498 int32_t disp, int mask)
500 TCGv cmp_tmp;
502 if (mask) {
503 cmp_tmp = tcg_temp_new();
504 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
505 } else {
506 cmp_tmp = load_gpr(ctx, ra);
509 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
512 /* Fold -0.0 for comparison with COND. */
514 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
516 uint64_t mzero = 1ull << 63;
518 switch (cond) {
519 case TCG_COND_LE:
520 case TCG_COND_GT:
521 /* For <= or >, the -0.0 value directly compares the way we want. */
522 tcg_gen_mov_i64(dest, src);
523 break;
525 case TCG_COND_EQ:
526 case TCG_COND_NE:
527 /* For == or !=, we can simply mask off the sign bit and compare. */
528 tcg_gen_andi_i64(dest, src, mzero - 1);
529 break;
531 case TCG_COND_GE:
532 case TCG_COND_LT:
533 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
534 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
535 tcg_gen_neg_i64(dest, dest);
536 tcg_gen_and_i64(dest, dest, src);
537 break;
539 default:
540 abort();
544 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
545 int32_t disp)
547 TCGv cmp_tmp = tcg_temp_new();
548 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
549 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
552 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
554 TCGv_i64 va, vb, z;
556 z = load_zero(ctx);
557 vb = load_fpr(ctx, rb);
558 va = tcg_temp_new();
559 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
561 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
563 tcg_temp_free(va);
566 #define QUAL_RM_N 0x080 /* Round mode nearest even */
567 #define QUAL_RM_C 0x000 /* Round mode chopped */
568 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
569 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
570 #define QUAL_RM_MASK 0x0c0
572 #define QUAL_U 0x100 /* Underflow enable (fp output) */
573 #define QUAL_V 0x100 /* Overflow enable (int output) */
574 #define QUAL_S 0x400 /* Software completion enable */
575 #define QUAL_I 0x200 /* Inexact detection enable */
577 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
579 TCGv_i32 tmp;
581 fn11 &= QUAL_RM_MASK;
582 if (fn11 == ctx->tb_rm) {
583 return;
585 ctx->tb_rm = fn11;
587 tmp = tcg_temp_new_i32();
588 switch (fn11) {
589 case QUAL_RM_N:
590 tcg_gen_movi_i32(tmp, float_round_nearest_even);
591 break;
592 case QUAL_RM_C:
593 tcg_gen_movi_i32(tmp, float_round_to_zero);
594 break;
595 case QUAL_RM_M:
596 tcg_gen_movi_i32(tmp, float_round_down);
597 break;
598 case QUAL_RM_D:
599 tcg_gen_ld8u_i32(tmp, cpu_env,
600 offsetof(CPUAlphaState, fpcr_dyn_round));
601 break;
604 #if defined(CONFIG_SOFTFLOAT_INLINE)
605 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
606 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
607 sets the one field. */
608 tcg_gen_st8_i32(tmp, cpu_env,
609 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
610 #else
611 gen_helper_setroundmode(tmp);
612 #endif
614 tcg_temp_free_i32(tmp);
617 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
619 TCGv_i32 tmp;
621 fn11 &= QUAL_U;
622 if (fn11 == ctx->tb_ftz) {
623 return;
625 ctx->tb_ftz = fn11;
627 tmp = tcg_temp_new_i32();
628 if (fn11) {
629 /* Underflow is enabled, use the FPCR setting. */
630 tcg_gen_ld8u_i32(tmp, cpu_env,
631 offsetof(CPUAlphaState, fpcr_flush_to_zero));
632 } else {
633 /* Underflow is disabled, force flush-to-zero. */
634 tcg_gen_movi_i32(tmp, 1);
637 #if defined(CONFIG_SOFTFLOAT_INLINE)
638 tcg_gen_st8_i32(tmp, cpu_env,
639 offsetof(CPUAlphaState, fp_status.flush_to_zero));
640 #else
641 gen_helper_setflushzero(tmp);
642 #endif
644 tcg_temp_free_i32(tmp);
647 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
649 TCGv val;
651 if (unlikely(reg == 31)) {
652 val = load_zero(ctx);
653 } else {
654 val = cpu_fir[reg];
655 if ((fn11 & QUAL_S) == 0) {
656 if (is_cmp) {
657 gen_helper_ieee_input_cmp(cpu_env, val);
658 } else {
659 gen_helper_ieee_input(cpu_env, val);
663 return val;
666 static void gen_fp_exc_clear(void)
668 #if defined(CONFIG_SOFTFLOAT_INLINE)
669 TCGv_i32 zero = tcg_const_i32(0);
670 tcg_gen_st8_i32(zero, cpu_env,
671 offsetof(CPUAlphaState, fp_status.float_exception_flags));
672 tcg_temp_free_i32(zero);
673 #else
674 gen_helper_fp_exc_clear(cpu_env);
675 #endif
678 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
680 /* ??? We ought to be able to do something with imprecise exceptions.
681 E.g. notice we're still in the trap shadow of something within the
682 TB and do not generate the code to signal the exception; end the TB
683 when an exception is forced to arrive, either by consumption of a
684 register value or TRAPB or EXCB. */
685 TCGv_i32 exc = tcg_temp_new_i32();
686 TCGv_i32 reg;
688 #if defined(CONFIG_SOFTFLOAT_INLINE)
689 tcg_gen_ld8u_i32(exc, cpu_env,
690 offsetof(CPUAlphaState, fp_status.float_exception_flags));
691 #else
692 gen_helper_fp_exc_get(exc, cpu_env);
693 #endif
695 if (ignore) {
696 tcg_gen_andi_i32(exc, exc, ~ignore);
699 /* ??? Pass in the regno of the destination so that the helper can
700 set EXC_MASK, which contains a bitmask of destination registers
701 that have caused arithmetic traps. A simple userspace emulation
702 does not require this. We do need it for a guest kernel's entArith,
703 or if we were to do something clever with imprecise exceptions. */
704 reg = tcg_const_i32(rc + 32);
706 if (fn11 & QUAL_S) {
707 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
708 } else {
709 gen_helper_fp_exc_raise(cpu_env, exc, reg);
712 tcg_temp_free_i32(reg);
713 tcg_temp_free_i32(exc);
716 static inline void gen_fp_exc_raise(int rc, int fn11)
718 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
721 static void gen_fcvtlq(TCGv vc, TCGv vb)
723 TCGv tmp = tcg_temp_new();
725 /* The arithmetic right shift here, plus the sign-extended mask below
726 yields a sign-extended result without an explicit ext32s_i64. */
727 tcg_gen_sari_i64(tmp, vb, 32);
728 tcg_gen_shri_i64(vc, vb, 29);
729 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
730 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
731 tcg_gen_or_i64(vc, vc, tmp);
733 tcg_temp_free(tmp);
736 static void gen_fcvtql(TCGv vc, TCGv vb)
738 TCGv tmp = tcg_temp_new();
740 tcg_gen_andi_i64(tmp, vb, (int32_t)0xc0000000);
741 tcg_gen_andi_i64(vc, vb, 0x3FFFFFFF);
742 tcg_gen_shli_i64(tmp, tmp, 32);
743 tcg_gen_shli_i64(vc, vc, 29);
744 tcg_gen_or_i64(vc, vc, tmp);
746 tcg_temp_free(tmp);
749 static void gen_ieee_arith2(DisasContext *ctx,
750 void (*helper)(TCGv, TCGv_ptr, TCGv),
751 int rb, int rc, int fn11)
753 TCGv vb;
755 gen_qual_roundmode(ctx, fn11);
756 gen_qual_flushzero(ctx, fn11);
757 gen_fp_exc_clear();
759 vb = gen_ieee_input(ctx, rb, fn11, 0);
760 helper(dest_fpr(ctx, rc), cpu_env, vb);
762 gen_fp_exc_raise(rc, fn11);
765 #define IEEE_ARITH2(name) \
766 static inline void glue(gen_f, name)(DisasContext *ctx, \
767 int rb, int rc, int fn11) \
769 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
771 IEEE_ARITH2(sqrts)
772 IEEE_ARITH2(sqrtt)
773 IEEE_ARITH2(cvtst)
774 IEEE_ARITH2(cvtts)
776 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
778 TCGv vb, vc;
779 int ignore = 0;
781 /* No need to set flushzero, since we have an integer output. */
782 gen_fp_exc_clear();
783 vb = gen_ieee_input(ctx, rb, fn11, 0);
784 vc = dest_fpr(ctx, rc);
786 /* Almost all integer conversions use cropped rounding, and most
787 also do not have integer overflow enabled. Special case that. */
788 switch (fn11) {
789 case QUAL_RM_C:
790 gen_helper_cvttq_c(vc, cpu_env, vb);
791 break;
792 case QUAL_V | QUAL_RM_C:
793 case QUAL_S | QUAL_V | QUAL_RM_C:
794 ignore = float_flag_inexact;
795 /* FALLTHRU */
796 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
797 gen_helper_cvttq_svic(vc, cpu_env, vb);
798 break;
799 default:
800 gen_qual_roundmode(ctx, fn11);
801 gen_helper_cvttq(vc, cpu_env, vb);
802 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
803 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
804 break;
807 gen_fp_exc_raise_ignore(rc, fn11, ignore);
810 static void gen_ieee_intcvt(DisasContext *ctx,
811 void (*helper)(TCGv, TCGv_ptr, TCGv),
812 int rb, int rc, int fn11)
814 TCGv vb, vc;
816 gen_qual_roundmode(ctx, fn11);
817 vb = load_fpr(ctx, rb);
818 vc = dest_fpr(ctx, rc);
820 /* The only exception that can be raised by integer conversion
821 is inexact. Thus we only need to worry about exceptions when
822 inexact handling is requested. */
823 if (fn11 & QUAL_I) {
824 gen_fp_exc_clear();
825 helper(vc, cpu_env, vb);
826 gen_fp_exc_raise(rc, fn11);
827 } else {
828 helper(vc, cpu_env, vb);
832 #define IEEE_INTCVT(name) \
833 static inline void glue(gen_f, name)(DisasContext *ctx, \
834 int rb, int rc, int fn11) \
836 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
838 IEEE_INTCVT(cvtqs)
839 IEEE_INTCVT(cvtqt)
841 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
843 TCGv vmask = tcg_const_i64(mask);
844 TCGv tmp = tcg_temp_new_i64();
846 if (inv_a) {
847 tcg_gen_andc_i64(tmp, vmask, va);
848 } else {
849 tcg_gen_and_i64(tmp, va, vmask);
852 tcg_gen_andc_i64(vc, vb, vmask);
853 tcg_gen_or_i64(vc, vc, tmp);
855 tcg_temp_free(vmask);
856 tcg_temp_free(tmp);
859 static void gen_ieee_arith3(DisasContext *ctx,
860 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
861 int ra, int rb, int rc, int fn11)
863 TCGv va, vb, vc;
865 gen_qual_roundmode(ctx, fn11);
866 gen_qual_flushzero(ctx, fn11);
867 gen_fp_exc_clear();
869 va = gen_ieee_input(ctx, ra, fn11, 0);
870 vb = gen_ieee_input(ctx, rb, fn11, 0);
871 vc = dest_fpr(ctx, rc);
872 helper(vc, cpu_env, va, vb);
874 gen_fp_exc_raise(rc, fn11);
877 #define IEEE_ARITH3(name) \
878 static inline void glue(gen_f, name)(DisasContext *ctx, \
879 int ra, int rb, int rc, int fn11) \
881 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
883 IEEE_ARITH3(adds)
884 IEEE_ARITH3(subs)
885 IEEE_ARITH3(muls)
886 IEEE_ARITH3(divs)
887 IEEE_ARITH3(addt)
888 IEEE_ARITH3(subt)
889 IEEE_ARITH3(mult)
890 IEEE_ARITH3(divt)
892 static void gen_ieee_compare(DisasContext *ctx,
893 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
894 int ra, int rb, int rc, int fn11)
896 TCGv va, vb, vc;
898 gen_fp_exc_clear();
900 va = gen_ieee_input(ctx, ra, fn11, 1);
901 vb = gen_ieee_input(ctx, rb, fn11, 1);
902 vc = dest_fpr(ctx, rc);
903 helper(vc, cpu_env, va, vb);
905 gen_fp_exc_raise(rc, fn11);
908 #define IEEE_CMP3(name) \
909 static inline void glue(gen_f, name)(DisasContext *ctx, \
910 int ra, int rb, int rc, int fn11) \
912 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
914 IEEE_CMP3(cmptun)
915 IEEE_CMP3(cmpteq)
916 IEEE_CMP3(cmptlt)
917 IEEE_CMP3(cmptle)
919 static inline uint64_t zapnot_mask(uint8_t lit)
921 uint64_t mask = 0;
922 int i;
924 for (i = 0; i < 8; ++i) {
925 if ((lit >> i) & 1) {
926 mask |= 0xffull << (i * 8);
929 return mask;
932 /* Implement zapnot with an immediate operand, which expands to some
933 form of immediate AND. This is a basic building block in the
934 definition of many of the other byte manipulation instructions. */
935 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
937 switch (lit) {
938 case 0x00:
939 tcg_gen_movi_i64(dest, 0);
940 break;
941 case 0x01:
942 tcg_gen_ext8u_i64(dest, src);
943 break;
944 case 0x03:
945 tcg_gen_ext16u_i64(dest, src);
946 break;
947 case 0x0f:
948 tcg_gen_ext32u_i64(dest, src);
949 break;
950 case 0xff:
951 tcg_gen_mov_i64(dest, src);
952 break;
953 default:
954 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
955 break;
959 /* EXTWH, EXTLH, EXTQH */
960 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
961 uint8_t lit, uint8_t byte_mask)
963 if (islit) {
964 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
965 } else {
966 TCGv tmp = tcg_temp_new();
967 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
968 tcg_gen_neg_i64(tmp, tmp);
969 tcg_gen_andi_i64(tmp, tmp, 0x3f);
970 tcg_gen_shl_i64(vc, va, tmp);
971 tcg_temp_free(tmp);
973 gen_zapnoti(vc, vc, byte_mask);
976 /* EXTBL, EXTWL, EXTLL, EXTQL */
977 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
978 uint8_t lit, uint8_t byte_mask)
980 if (islit) {
981 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
982 } else {
983 TCGv tmp = tcg_temp_new();
984 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
985 tcg_gen_shli_i64(tmp, tmp, 3);
986 tcg_gen_shr_i64(vc, va, tmp);
987 tcg_temp_free(tmp);
989 gen_zapnoti(vc, vc, byte_mask);
992 /* INSWH, INSLH, INSQH */
993 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
994 uint8_t lit, uint8_t byte_mask)
996 TCGv tmp = tcg_temp_new();
998 /* The instruction description has us left-shift the byte mask and extract
999 bits <15:8> and apply that zap at the end. This is equivalent to simply
1000 performing the zap first and shifting afterward. */
1001 gen_zapnoti(tmp, va, byte_mask);
1003 if (islit) {
1004 lit &= 7;
1005 if (unlikely(lit == 0)) {
1006 tcg_gen_movi_i64(vc, 0);
1007 } else {
1008 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
1010 } else {
1011 TCGv shift = tcg_temp_new();
1013 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1014 portably by splitting the shift into two parts: shift_count-1 and 1.
1015 Arrange for the -1 by using ones-complement instead of
1016 twos-complement in the negation: ~(B * 8) & 63. */
1018 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1019 tcg_gen_not_i64(shift, shift);
1020 tcg_gen_andi_i64(shift, shift, 0x3f);
1022 tcg_gen_shr_i64(vc, tmp, shift);
1023 tcg_gen_shri_i64(vc, vc, 1);
1024 tcg_temp_free(shift);
1026 tcg_temp_free(tmp);
1029 /* INSBL, INSWL, INSLL, INSQL */
1030 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1031 uint8_t lit, uint8_t byte_mask)
1033 TCGv tmp = tcg_temp_new();
1035 /* The instruction description has us left-shift the byte mask
1036 the same number of byte slots as the data and apply the zap
1037 at the end. This is equivalent to simply performing the zap
1038 first and shifting afterward. */
1039 gen_zapnoti(tmp, va, byte_mask);
1041 if (islit) {
1042 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1043 } else {
1044 TCGv shift = tcg_temp_new();
1045 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1046 tcg_gen_shli_i64(shift, shift, 3);
1047 tcg_gen_shl_i64(vc, tmp, shift);
1048 tcg_temp_free(shift);
1050 tcg_temp_free(tmp);
1053 /* MSKWH, MSKLH, MSKQH */
1054 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1055 uint8_t lit, uint8_t byte_mask)
1057 if (islit) {
1058 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1059 } else {
1060 TCGv shift = tcg_temp_new();
1061 TCGv mask = tcg_temp_new();
1063 /* The instruction description is as above, where the byte_mask
1064 is shifted left, and then we extract bits <15:8>. This can be
1065 emulated with a right-shift on the expanded byte mask. This
1066 requires extra care because for an input <2:0> == 0 we need a
1067 shift of 64 bits in order to generate a zero. This is done by
1068 splitting the shift into two parts, the variable shift - 1
1069 followed by a constant 1 shift. The code we expand below is
1070 equivalent to ~(B * 8) & 63. */
1072 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1073 tcg_gen_not_i64(shift, shift);
1074 tcg_gen_andi_i64(shift, shift, 0x3f);
1075 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1076 tcg_gen_shr_i64(mask, mask, shift);
1077 tcg_gen_shri_i64(mask, mask, 1);
1079 tcg_gen_andc_i64(vc, va, mask);
1081 tcg_temp_free(mask);
1082 tcg_temp_free(shift);
1086 /* MSKBL, MSKWL, MSKLL, MSKQL */
1087 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1088 uint8_t lit, uint8_t byte_mask)
1090 if (islit) {
1091 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1092 } else {
1093 TCGv shift = tcg_temp_new();
1094 TCGv mask = tcg_temp_new();
1096 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1097 tcg_gen_shli_i64(shift, shift, 3);
1098 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1099 tcg_gen_shl_i64(mask, mask, shift);
1101 tcg_gen_andc_i64(vc, va, mask);
1103 tcg_temp_free(mask);
1104 tcg_temp_free(shift);
1108 static void gen_rx(int ra, int set)
1110 TCGv_i32 tmp;
1112 if (ra != 31) {
1113 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1116 tmp = tcg_const_i32(set);
1117 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1118 tcg_temp_free_i32(tmp);
1121 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1123 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1124 to internal cpu registers. */
1126 /* Unprivileged PAL call */
1127 if (palcode >= 0x80 && palcode < 0xC0) {
1128 switch (palcode) {
1129 case 0x86:
1130 /* IMB */
1131 /* No-op inside QEMU. */
1132 break;
1133 case 0x9E:
1134 /* RDUNIQUE */
1135 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1136 offsetof(CPUAlphaState, unique));
1137 break;
1138 case 0x9F:
1139 /* WRUNIQUE */
1140 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1141 offsetof(CPUAlphaState, unique));
1142 break;
1143 default:
1144 palcode &= 0xbf;
1145 goto do_call_pal;
1147 return NO_EXIT;
1150 #ifndef CONFIG_USER_ONLY
1151 /* Privileged PAL code */
1152 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1153 switch (palcode) {
1154 case 0x01:
1155 /* CFLUSH */
1156 /* No-op inside QEMU. */
1157 break;
1158 case 0x02:
1159 /* DRAINA */
1160 /* No-op inside QEMU. */
1161 break;
1162 case 0x2D:
1163 /* WRVPTPTR */
1164 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1165 offsetof(CPUAlphaState, vptptr));
1166 break;
1167 case 0x31:
1168 /* WRVAL */
1169 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1170 offsetof(CPUAlphaState, sysval));
1171 break;
1172 case 0x32:
1173 /* RDVAL */
1174 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1175 offsetof(CPUAlphaState, sysval));
1176 break;
1178 case 0x35: {
1179 /* SWPIPL */
1180 TCGv tmp;
1182 /* Note that we already know we're in kernel mode, so we know
1183 that PS only contains the 3 IPL bits. */
1184 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1185 offsetof(CPUAlphaState, ps));
1187 /* But make sure and store only the 3 IPL bits from the user. */
1188 tmp = tcg_temp_new();
1189 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1190 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1191 tcg_temp_free(tmp);
1192 break;
1195 case 0x36:
1196 /* RDPS */
1197 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1198 offsetof(CPUAlphaState, ps));
1199 break;
1200 case 0x38:
1201 /* WRUSP */
1202 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1203 offsetof(CPUAlphaState, usp));
1204 break;
1205 case 0x3A:
1206 /* RDUSP */
1207 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1208 offsetof(CPUAlphaState, usp));
1209 break;
1210 case 0x3C:
1211 /* WHAMI */
1212 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1213 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1214 break;
1216 default:
1217 palcode &= 0x3f;
1218 goto do_call_pal;
1220 return NO_EXIT;
1222 #endif
1223 return gen_invalid(ctx);
1225 do_call_pal:
1226 #ifdef CONFIG_USER_ONLY
1227 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1228 #else
1230 TCGv pc = tcg_const_i64(ctx->pc);
1231 TCGv entry = tcg_const_i64(palcode & 0x80
1232 ? 0x2000 + (palcode - 0x80) * 64
1233 : 0x1000 + palcode * 64);
1235 gen_helper_call_pal(cpu_env, pc, entry);
1237 tcg_temp_free(entry);
1238 tcg_temp_free(pc);
1240 /* Since the destination is running in PALmode, we don't really
1241 need the page permissions check. We'll see the existence of
1242 the page when we create the TB, and we'll flush all TBs if
1243 we change the PAL base register. */
1244 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1245 tcg_gen_goto_tb(0);
1246 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1247 return EXIT_GOTO_TB;
1250 return EXIT_PC_UPDATED;
1252 #endif
1255 #ifndef CONFIG_USER_ONLY
1257 #define PR_BYTE 0x100000
1258 #define PR_LONG 0x200000
1260 static int cpu_pr_data(int pr)
1262 switch (pr) {
1263 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1264 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1265 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1266 case 3: return offsetof(CPUAlphaState, trap_arg0);
1267 case 4: return offsetof(CPUAlphaState, trap_arg1);
1268 case 5: return offsetof(CPUAlphaState, trap_arg2);
1269 case 6: return offsetof(CPUAlphaState, exc_addr);
1270 case 7: return offsetof(CPUAlphaState, palbr);
1271 case 8: return offsetof(CPUAlphaState, ptbr);
1272 case 9: return offsetof(CPUAlphaState, vptptr);
1273 case 10: return offsetof(CPUAlphaState, unique);
1274 case 11: return offsetof(CPUAlphaState, sysval);
1275 case 12: return offsetof(CPUAlphaState, usp);
1277 case 32 ... 39:
1278 return offsetof(CPUAlphaState, shadow[pr - 32]);
1279 case 40 ... 63:
1280 return offsetof(CPUAlphaState, scratch[pr - 40]);
1282 case 251:
1283 return offsetof(CPUAlphaState, alarm_expire);
1285 return 0;
1288 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1290 int data = cpu_pr_data(regno);
1292 /* Special help for VMTIME and WALLTIME. */
1293 if (regno == 250 || regno == 249) {
1294 void (*helper)(TCGv) = gen_helper_get_walltime;
1295 if (regno == 249) {
1296 helper = gen_helper_get_vmtime;
1298 if (ctx->tb->cflags & CF_USE_ICOUNT) {
1299 gen_io_start();
1300 helper(va);
1301 gen_io_end();
1302 return EXIT_PC_STALE;
1303 } else {
1304 helper(va);
1305 return NO_EXIT;
1309 /* The basic registers are data only, and unknown registers
1310 are read-zero, write-ignore. */
1311 if (data == 0) {
1312 tcg_gen_movi_i64(va, 0);
1313 } else if (data & PR_BYTE) {
1314 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1315 } else if (data & PR_LONG) {
1316 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1317 } else {
1318 tcg_gen_ld_i64(va, cpu_env, data);
1320 return NO_EXIT;
1323 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1325 TCGv tmp;
1326 int data;
1328 switch (regno) {
1329 case 255:
1330 /* TBIA */
1331 gen_helper_tbia(cpu_env);
1332 break;
1334 case 254:
1335 /* TBIS */
1336 gen_helper_tbis(cpu_env, vb);
1337 break;
1339 case 253:
1340 /* WAIT */
1341 tmp = tcg_const_i64(1);
1342 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1343 offsetof(CPUState, halted));
1344 return gen_excp(ctx, EXCP_HLT, 0);
1346 case 252:
1347 /* HALT */
1348 gen_helper_halt(vb);
1349 return EXIT_PC_STALE;
1351 case 251:
1352 /* ALARM */
1353 gen_helper_set_alarm(cpu_env, vb);
1354 break;
1356 case 7:
1357 /* PALBR */
1358 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1359 /* Changing the PAL base register implies un-chaining all of the TBs
1360 that ended with a CALL_PAL. Since the base register usually only
1361 changes during boot, flushing everything works well. */
1362 gen_helper_tb_flush(cpu_env);
1363 return EXIT_PC_STALE;
1365 default:
1366 /* The basic registers are data only, and unknown registers
1367 are read-zero, write-ignore. */
1368 data = cpu_pr_data(regno);
1369 if (data != 0) {
1370 if (data & PR_BYTE) {
1371 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1372 } else if (data & PR_LONG) {
1373 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1374 } else {
1375 tcg_gen_st_i64(vb, cpu_env, data);
1378 break;
1381 return NO_EXIT;
1383 #endif /* !USER_ONLY*/
1385 #define REQUIRE_TB_FLAG(FLAG) \
1386 do { \
1387 if ((ctx->tb->flags & (FLAG)) == 0) { \
1388 goto invalid_opc; \
1390 } while (0)
1392 #define REQUIRE_REG_31(WHICH) \
1393 do { \
1394 if (WHICH != 31) { \
1395 goto invalid_opc; \
1397 } while (0)
1399 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1401 int32_t disp21, disp16, disp12 __attribute__((unused));
1402 uint16_t fn11;
1403 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1404 bool islit;
1405 TCGv va, vb, vc, tmp;
1406 TCGv_i32 t32;
1407 ExitStatus ret;
1409 /* Decode all instruction fields */
1410 opc = extract32(insn, 26, 6);
1411 ra = extract32(insn, 21, 5);
1412 rb = extract32(insn, 16, 5);
1413 rc = extract32(insn, 0, 5);
1414 islit = extract32(insn, 12, 1);
1415 lit = extract32(insn, 13, 8);
1417 disp21 = sextract32(insn, 0, 21);
1418 disp16 = sextract32(insn, 0, 16);
1419 disp12 = sextract32(insn, 0, 12);
1421 fn11 = extract32(insn, 5, 11);
1422 fpfn = extract32(insn, 5, 6);
1423 fn7 = extract32(insn, 5, 7);
1425 if (rb == 31 && !islit) {
1426 islit = true;
1427 lit = 0;
1430 ret = NO_EXIT;
1431 switch (opc) {
1432 case 0x00:
1433 /* CALL_PAL */
1434 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1435 break;
1436 case 0x01:
1437 /* OPC01 */
1438 goto invalid_opc;
1439 case 0x02:
1440 /* OPC02 */
1441 goto invalid_opc;
1442 case 0x03:
1443 /* OPC03 */
1444 goto invalid_opc;
1445 case 0x04:
1446 /* OPC04 */
1447 goto invalid_opc;
1448 case 0x05:
1449 /* OPC05 */
1450 goto invalid_opc;
1451 case 0x06:
1452 /* OPC06 */
1453 goto invalid_opc;
1454 case 0x07:
1455 /* OPC07 */
1456 goto invalid_opc;
1458 case 0x09:
1459 /* LDAH */
1460 disp16 = (uint32_t)disp16 << 16;
1461 /* fall through */
1462 case 0x08:
1463 /* LDA */
1464 va = dest_gpr(ctx, ra);
1465 /* It's worth special-casing immediate loads. */
1466 if (rb == 31) {
1467 tcg_gen_movi_i64(va, disp16);
1468 } else {
1469 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1471 break;
1473 case 0x0A:
1474 /* LDBU */
1475 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1476 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1477 break;
1478 case 0x0B:
1479 /* LDQ_U */
1480 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1481 break;
1482 case 0x0C:
1483 /* LDWU */
1484 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1485 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1486 break;
1487 case 0x0D:
1488 /* STW */
1489 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1490 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1491 break;
1492 case 0x0E:
1493 /* STB */
1494 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1495 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1496 break;
1497 case 0x0F:
1498 /* STQ_U */
1499 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1500 break;
1502 case 0x10:
1503 vc = dest_gpr(ctx, rc);
1504 vb = load_gpr_lit(ctx, rb, lit, islit);
1506 if (ra == 31) {
1507 if (fn7 == 0x00) {
1508 /* Special case ADDL as SEXTL. */
1509 tcg_gen_ext32s_i64(vc, vb);
1510 break;
1512 if (fn7 == 0x29) {
1513 /* Special case SUBQ as NEGQ. */
1514 tcg_gen_neg_i64(vc, vb);
1515 break;
1519 va = load_gpr(ctx, ra);
1520 switch (fn7) {
1521 case 0x00:
1522 /* ADDL */
1523 tcg_gen_add_i64(vc, va, vb);
1524 tcg_gen_ext32s_i64(vc, vc);
1525 break;
1526 case 0x02:
1527 /* S4ADDL */
1528 tmp = tcg_temp_new();
1529 tcg_gen_shli_i64(tmp, va, 2);
1530 tcg_gen_add_i64(tmp, tmp, vb);
1531 tcg_gen_ext32s_i64(vc, tmp);
1532 tcg_temp_free(tmp);
1533 break;
1534 case 0x09:
1535 /* SUBL */
1536 tcg_gen_sub_i64(vc, va, vb);
1537 tcg_gen_ext32s_i64(vc, vc);
1538 break;
1539 case 0x0B:
1540 /* S4SUBL */
1541 tmp = tcg_temp_new();
1542 tcg_gen_shli_i64(tmp, va, 2);
1543 tcg_gen_sub_i64(tmp, tmp, vb);
1544 tcg_gen_ext32s_i64(vc, tmp);
1545 tcg_temp_free(tmp);
1546 break;
1547 case 0x0F:
1548 /* CMPBGE */
1549 gen_helper_cmpbge(vc, va, vb);
1550 break;
1551 case 0x12:
1552 /* S8ADDL */
1553 tmp = tcg_temp_new();
1554 tcg_gen_shli_i64(tmp, va, 3);
1555 tcg_gen_add_i64(tmp, tmp, vb);
1556 tcg_gen_ext32s_i64(vc, tmp);
1557 tcg_temp_free(tmp);
1558 break;
1559 case 0x1B:
1560 /* S8SUBL */
1561 tmp = tcg_temp_new();
1562 tcg_gen_shli_i64(tmp, va, 3);
1563 tcg_gen_sub_i64(tmp, tmp, vb);
1564 tcg_gen_ext32s_i64(vc, tmp);
1565 tcg_temp_free(tmp);
1566 break;
1567 case 0x1D:
1568 /* CMPULT */
1569 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1570 break;
1571 case 0x20:
1572 /* ADDQ */
1573 tcg_gen_add_i64(vc, va, vb);
1574 break;
1575 case 0x22:
1576 /* S4ADDQ */
1577 tmp = tcg_temp_new();
1578 tcg_gen_shli_i64(tmp, va, 2);
1579 tcg_gen_add_i64(vc, tmp, vb);
1580 tcg_temp_free(tmp);
1581 break;
1582 case 0x29:
1583 /* SUBQ */
1584 tcg_gen_sub_i64(vc, va, vb);
1585 break;
1586 case 0x2B:
1587 /* S4SUBQ */
1588 tmp = tcg_temp_new();
1589 tcg_gen_shli_i64(tmp, va, 2);
1590 tcg_gen_sub_i64(vc, tmp, vb);
1591 tcg_temp_free(tmp);
1592 break;
1593 case 0x2D:
1594 /* CMPEQ */
1595 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1596 break;
1597 case 0x32:
1598 /* S8ADDQ */
1599 tmp = tcg_temp_new();
1600 tcg_gen_shli_i64(tmp, va, 3);
1601 tcg_gen_add_i64(vc, tmp, vb);
1602 tcg_temp_free(tmp);
1603 break;
1604 case 0x3B:
1605 /* S8SUBQ */
1606 tmp = tcg_temp_new();
1607 tcg_gen_shli_i64(tmp, va, 3);
1608 tcg_gen_sub_i64(vc, tmp, vb);
1609 tcg_temp_free(tmp);
1610 break;
1611 case 0x3D:
1612 /* CMPULE */
1613 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1614 break;
1615 case 0x40:
1616 /* ADDL/V */
1617 gen_helper_addlv(vc, cpu_env, va, vb);
1618 break;
1619 case 0x49:
1620 /* SUBL/V */
1621 gen_helper_sublv(vc, cpu_env, va, vb);
1622 break;
1623 case 0x4D:
1624 /* CMPLT */
1625 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1626 break;
1627 case 0x60:
1628 /* ADDQ/V */
1629 gen_helper_addqv(vc, cpu_env, va, vb);
1630 break;
1631 case 0x69:
1632 /* SUBQ/V */
1633 gen_helper_subqv(vc, cpu_env, va, vb);
1634 break;
1635 case 0x6D:
1636 /* CMPLE */
1637 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1638 break;
1639 default:
1640 goto invalid_opc;
1642 break;
1644 case 0x11:
1645 if (fn7 == 0x20) {
1646 if (rc == 31) {
1647 /* Special case BIS as NOP. */
1648 break;
1650 if (ra == 31) {
1651 /* Special case BIS as MOV. */
1652 vc = dest_gpr(ctx, rc);
1653 if (islit) {
1654 tcg_gen_movi_i64(vc, lit);
1655 } else {
1656 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1658 break;
1662 vc = dest_gpr(ctx, rc);
1663 vb = load_gpr_lit(ctx, rb, lit, islit);
1665 if (fn7 == 0x28 && ra == 31) {
1666 /* Special case ORNOT as NOT. */
1667 tcg_gen_not_i64(vc, vb);
1668 break;
1671 va = load_gpr(ctx, ra);
1672 switch (fn7) {
1673 case 0x00:
1674 /* AND */
1675 tcg_gen_and_i64(vc, va, vb);
1676 break;
1677 case 0x08:
1678 /* BIC */
1679 tcg_gen_andc_i64(vc, va, vb);
1680 break;
1681 case 0x14:
1682 /* CMOVLBS */
1683 tmp = tcg_temp_new();
1684 tcg_gen_andi_i64(tmp, va, 1);
1685 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1686 vb, load_gpr(ctx, rc));
1687 tcg_temp_free(tmp);
1688 break;
1689 case 0x16:
1690 /* CMOVLBC */
1691 tmp = tcg_temp_new();
1692 tcg_gen_andi_i64(tmp, va, 1);
1693 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1694 vb, load_gpr(ctx, rc));
1695 tcg_temp_free(tmp);
1696 break;
1697 case 0x20:
1698 /* BIS */
1699 tcg_gen_or_i64(vc, va, vb);
1700 break;
1701 case 0x24:
1702 /* CMOVEQ */
1703 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1704 vb, load_gpr(ctx, rc));
1705 break;
1706 case 0x26:
1707 /* CMOVNE */
1708 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1709 vb, load_gpr(ctx, rc));
1710 break;
1711 case 0x28:
1712 /* ORNOT */
1713 tcg_gen_orc_i64(vc, va, vb);
1714 break;
1715 case 0x40:
1716 /* XOR */
1717 tcg_gen_xor_i64(vc, va, vb);
1718 break;
1719 case 0x44:
1720 /* CMOVLT */
1721 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1722 vb, load_gpr(ctx, rc));
1723 break;
1724 case 0x46:
1725 /* CMOVGE */
1726 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1727 vb, load_gpr(ctx, rc));
1728 break;
1729 case 0x48:
1730 /* EQV */
1731 tcg_gen_eqv_i64(vc, va, vb);
1732 break;
1733 case 0x61:
1734 /* AMASK */
1735 REQUIRE_REG_31(ra);
1737 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1738 tcg_gen_andi_i64(vc, vb, ~amask);
1740 break;
1741 case 0x64:
1742 /* CMOVLE */
1743 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1744 vb, load_gpr(ctx, rc));
1745 break;
1746 case 0x66:
1747 /* CMOVGT */
1748 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1749 vb, load_gpr(ctx, rc));
1750 break;
1751 case 0x6C:
1752 /* IMPLVER */
1753 REQUIRE_REG_31(ra);
1754 tcg_gen_movi_i64(vc, ctx->implver);
1755 break;
1756 default:
1757 goto invalid_opc;
1759 break;
1761 case 0x12:
1762 vc = dest_gpr(ctx, rc);
1763 va = load_gpr(ctx, ra);
1764 switch (fn7) {
1765 case 0x02:
1766 /* MSKBL */
1767 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1768 break;
1769 case 0x06:
1770 /* EXTBL */
1771 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1772 break;
1773 case 0x0B:
1774 /* INSBL */
1775 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1776 break;
1777 case 0x12:
1778 /* MSKWL */
1779 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1780 break;
1781 case 0x16:
1782 /* EXTWL */
1783 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1784 break;
1785 case 0x1B:
1786 /* INSWL */
1787 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1788 break;
1789 case 0x22:
1790 /* MSKLL */
1791 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1792 break;
1793 case 0x26:
1794 /* EXTLL */
1795 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1796 break;
1797 case 0x2B:
1798 /* INSLL */
1799 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1800 break;
1801 case 0x30:
1802 /* ZAP */
1803 if (islit) {
1804 gen_zapnoti(vc, va, ~lit);
1805 } else {
1806 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1808 break;
1809 case 0x31:
1810 /* ZAPNOT */
1811 if (islit) {
1812 gen_zapnoti(vc, va, lit);
1813 } else {
1814 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1816 break;
1817 case 0x32:
1818 /* MSKQL */
1819 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1820 break;
1821 case 0x34:
1822 /* SRL */
1823 if (islit) {
1824 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1825 } else {
1826 tmp = tcg_temp_new();
1827 vb = load_gpr(ctx, rb);
1828 tcg_gen_andi_i64(tmp, vb, 0x3f);
1829 tcg_gen_shr_i64(vc, va, tmp);
1830 tcg_temp_free(tmp);
1832 break;
1833 case 0x36:
1834 /* EXTQL */
1835 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1836 break;
1837 case 0x39:
1838 /* SLL */
1839 if (islit) {
1840 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1841 } else {
1842 tmp = tcg_temp_new();
1843 vb = load_gpr(ctx, rb);
1844 tcg_gen_andi_i64(tmp, vb, 0x3f);
1845 tcg_gen_shl_i64(vc, va, tmp);
1846 tcg_temp_free(tmp);
1848 break;
1849 case 0x3B:
1850 /* INSQL */
1851 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1852 break;
1853 case 0x3C:
1854 /* SRA */
1855 if (islit) {
1856 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1857 } else {
1858 tmp = tcg_temp_new();
1859 vb = load_gpr(ctx, rb);
1860 tcg_gen_andi_i64(tmp, vb, 0x3f);
1861 tcg_gen_sar_i64(vc, va, tmp);
1862 tcg_temp_free(tmp);
1864 break;
1865 case 0x52:
1866 /* MSKWH */
1867 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1868 break;
1869 case 0x57:
1870 /* INSWH */
1871 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1872 break;
1873 case 0x5A:
1874 /* EXTWH */
1875 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1876 break;
1877 case 0x62:
1878 /* MSKLH */
1879 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1880 break;
1881 case 0x67:
1882 /* INSLH */
1883 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1884 break;
1885 case 0x6A:
1886 /* EXTLH */
1887 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1888 break;
1889 case 0x72:
1890 /* MSKQH */
1891 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1892 break;
1893 case 0x77:
1894 /* INSQH */
1895 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1896 break;
1897 case 0x7A:
1898 /* EXTQH */
1899 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1900 break;
1901 default:
1902 goto invalid_opc;
1904 break;
1906 case 0x13:
1907 vc = dest_gpr(ctx, rc);
1908 vb = load_gpr_lit(ctx, rb, lit, islit);
1909 va = load_gpr(ctx, ra);
1910 switch (fn7) {
1911 case 0x00:
1912 /* MULL */
1913 tcg_gen_mul_i64(vc, va, vb);
1914 tcg_gen_ext32s_i64(vc, vc);
1915 break;
1916 case 0x20:
1917 /* MULQ */
1918 tcg_gen_mul_i64(vc, va, vb);
1919 break;
1920 case 0x30:
1921 /* UMULH */
1922 tmp = tcg_temp_new();
1923 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1924 tcg_temp_free(tmp);
1925 break;
1926 case 0x40:
1927 /* MULL/V */
1928 gen_helper_mullv(vc, cpu_env, va, vb);
1929 break;
1930 case 0x60:
1931 /* MULQ/V */
1932 gen_helper_mulqv(vc, cpu_env, va, vb);
1933 break;
1934 default:
1935 goto invalid_opc;
1937 break;
1939 case 0x14:
1940 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
1941 vc = dest_fpr(ctx, rc);
1942 switch (fpfn) { /* fn11 & 0x3F */
1943 case 0x04:
1944 /* ITOFS */
1945 REQUIRE_REG_31(rb);
1946 t32 = tcg_temp_new_i32();
1947 va = load_gpr(ctx, ra);
1948 tcg_gen_trunc_i64_i32(t32, va);
1949 gen_helper_memory_to_s(vc, t32);
1950 tcg_temp_free_i32(t32);
1951 break;
1952 case 0x0A:
1953 /* SQRTF */
1954 REQUIRE_REG_31(ra);
1955 vb = load_fpr(ctx, rb);
1956 gen_helper_sqrtf(vc, cpu_env, vb);
1957 break;
1958 case 0x0B:
1959 /* SQRTS */
1960 REQUIRE_REG_31(ra);
1961 gen_fsqrts(ctx, rb, rc, fn11);
1962 break;
1963 case 0x14:
1964 /* ITOFF */
1965 REQUIRE_REG_31(rb);
1966 t32 = tcg_temp_new_i32();
1967 va = load_gpr(ctx, ra);
1968 tcg_gen_trunc_i64_i32(t32, va);
1969 gen_helper_memory_to_f(vc, t32);
1970 tcg_temp_free_i32(t32);
1971 break;
1972 case 0x24:
1973 /* ITOFT */
1974 REQUIRE_REG_31(rb);
1975 va = load_gpr(ctx, ra);
1976 tcg_gen_mov_i64(vc, va);
1977 break;
1978 case 0x2A:
1979 /* SQRTG */
1980 REQUIRE_REG_31(ra);
1981 vb = load_fpr(ctx, rb);
1982 gen_helper_sqrtg(vc, cpu_env, vb);
1983 break;
1984 case 0x02B:
1985 /* SQRTT */
1986 REQUIRE_REG_31(ra);
1987 gen_fsqrtt(ctx, rb, rc, fn11);
1988 break;
1989 default:
1990 goto invalid_opc;
1992 break;
1994 case 0x15:
1995 /* VAX floating point */
1996 /* XXX: rounding mode and trap are ignored (!) */
1997 vc = dest_fpr(ctx, rc);
1998 vb = load_fpr(ctx, rb);
1999 va = load_fpr(ctx, ra);
2000 switch (fpfn) { /* fn11 & 0x3F */
2001 case 0x00:
2002 /* ADDF */
2003 gen_helper_addf(vc, cpu_env, va, vb);
2004 break;
2005 case 0x01:
2006 /* SUBF */
2007 gen_helper_subf(vc, cpu_env, va, vb);
2008 break;
2009 case 0x02:
2010 /* MULF */
2011 gen_helper_mulf(vc, cpu_env, va, vb);
2012 break;
2013 case 0x03:
2014 /* DIVF */
2015 gen_helper_divf(vc, cpu_env, va, vb);
2016 break;
2017 case 0x1E:
2018 /* CVTDG -- TODO */
2019 REQUIRE_REG_31(ra);
2020 goto invalid_opc;
2021 case 0x20:
2022 /* ADDG */
2023 gen_helper_addg(vc, cpu_env, va, vb);
2024 break;
2025 case 0x21:
2026 /* SUBG */
2027 gen_helper_subg(vc, cpu_env, va, vb);
2028 break;
2029 case 0x22:
2030 /* MULG */
2031 gen_helper_mulg(vc, cpu_env, va, vb);
2032 break;
2033 case 0x23:
2034 /* DIVG */
2035 gen_helper_divg(vc, cpu_env, va, vb);
2036 break;
2037 case 0x25:
2038 /* CMPGEQ */
2039 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2040 break;
2041 case 0x26:
2042 /* CMPGLT */
2043 gen_helper_cmpglt(vc, cpu_env, va, vb);
2044 break;
2045 case 0x27:
2046 /* CMPGLE */
2047 gen_helper_cmpgle(vc, cpu_env, va, vb);
2048 break;
2049 case 0x2C:
2050 /* CVTGF */
2051 REQUIRE_REG_31(ra);
2052 gen_helper_cvtgf(vc, cpu_env, vb);
2053 break;
2054 case 0x2D:
2055 /* CVTGD -- TODO */
2056 REQUIRE_REG_31(ra);
2057 goto invalid_opc;
2058 case 0x2F:
2059 /* CVTGQ */
2060 REQUIRE_REG_31(ra);
2061 gen_helper_cvtgq(vc, cpu_env, vb);
2062 break;
2063 case 0x3C:
2064 /* CVTQF */
2065 REQUIRE_REG_31(ra);
2066 gen_helper_cvtqf(vc, cpu_env, vb);
2067 break;
2068 case 0x3E:
2069 /* CVTQG */
2070 REQUIRE_REG_31(ra);
2071 gen_helper_cvtqg(vc, cpu_env, vb);
2072 break;
2073 default:
2074 goto invalid_opc;
2076 break;
2078 case 0x16:
2079 /* IEEE floating-point */
2080 switch (fpfn) { /* fn11 & 0x3F */
2081 case 0x00:
2082 /* ADDS */
2083 gen_fadds(ctx, ra, rb, rc, fn11);
2084 break;
2085 case 0x01:
2086 /* SUBS */
2087 gen_fsubs(ctx, ra, rb, rc, fn11);
2088 break;
2089 case 0x02:
2090 /* MULS */
2091 gen_fmuls(ctx, ra, rb, rc, fn11);
2092 break;
2093 case 0x03:
2094 /* DIVS */
2095 gen_fdivs(ctx, ra, rb, rc, fn11);
2096 break;
2097 case 0x20:
2098 /* ADDT */
2099 gen_faddt(ctx, ra, rb, rc, fn11);
2100 break;
2101 case 0x21:
2102 /* SUBT */
2103 gen_fsubt(ctx, ra, rb, rc, fn11);
2104 break;
2105 case 0x22:
2106 /* MULT */
2107 gen_fmult(ctx, ra, rb, rc, fn11);
2108 break;
2109 case 0x23:
2110 /* DIVT */
2111 gen_fdivt(ctx, ra, rb, rc, fn11);
2112 break;
2113 case 0x24:
2114 /* CMPTUN */
2115 gen_fcmptun(ctx, ra, rb, rc, fn11);
2116 break;
2117 case 0x25:
2118 /* CMPTEQ */
2119 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2120 break;
2121 case 0x26:
2122 /* CMPTLT */
2123 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2124 break;
2125 case 0x27:
2126 /* CMPTLE */
2127 gen_fcmptle(ctx, ra, rb, rc, fn11);
2128 break;
2129 case 0x2C:
2130 REQUIRE_REG_31(ra);
2131 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2132 /* CVTST */
2133 gen_fcvtst(ctx, rb, rc, fn11);
2134 } else {
2135 /* CVTTS */
2136 gen_fcvtts(ctx, rb, rc, fn11);
2138 break;
2139 case 0x2F:
2140 /* CVTTQ */
2141 REQUIRE_REG_31(ra);
2142 gen_fcvttq(ctx, rb, rc, fn11);
2143 break;
2144 case 0x3C:
2145 /* CVTQS */
2146 REQUIRE_REG_31(ra);
2147 gen_fcvtqs(ctx, rb, rc, fn11);
2148 break;
2149 case 0x3E:
2150 /* CVTQT */
2151 REQUIRE_REG_31(ra);
2152 gen_fcvtqt(ctx, rb, rc, fn11);
2153 break;
2154 default:
2155 goto invalid_opc;
2157 break;
2159 case 0x17:
2160 switch (fn11) {
2161 case 0x010:
2162 /* CVTLQ */
2163 REQUIRE_REG_31(ra);
2164 vc = dest_fpr(ctx, rc);
2165 vb = load_fpr(ctx, rb);
2166 gen_fcvtlq(vc, vb);
2167 break;
2168 case 0x020:
2169 /* CPYS */
2170 if (rc == 31) {
2171 /* Special case CPYS as FNOP. */
2172 } else {
2173 vc = dest_fpr(ctx, rc);
2174 va = load_fpr(ctx, ra);
2175 if (ra == rb) {
2176 /* Special case CPYS as FMOV. */
2177 tcg_gen_mov_i64(vc, va);
2178 } else {
2179 vb = load_fpr(ctx, rb);
2180 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2183 break;
2184 case 0x021:
2185 /* CPYSN */
2186 vc = dest_fpr(ctx, rc);
2187 vb = load_fpr(ctx, rb);
2188 va = load_fpr(ctx, ra);
2189 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2190 break;
2191 case 0x022:
2192 /* CPYSE */
2193 vc = dest_fpr(ctx, rc);
2194 vb = load_fpr(ctx, rb);
2195 va = load_fpr(ctx, ra);
2196 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2197 break;
2198 case 0x024:
2199 /* MT_FPCR */
2200 va = load_fpr(ctx, ra);
2201 gen_helper_store_fpcr(cpu_env, va);
2202 break;
2203 case 0x025:
2204 /* MF_FPCR */
2205 va = dest_fpr(ctx, ra);
2206 gen_helper_load_fpcr(va, cpu_env);
2207 break;
2208 case 0x02A:
2209 /* FCMOVEQ */
2210 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2211 break;
2212 case 0x02B:
2213 /* FCMOVNE */
2214 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2215 break;
2216 case 0x02C:
2217 /* FCMOVLT */
2218 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2219 break;
2220 case 0x02D:
2221 /* FCMOVGE */
2222 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2223 break;
2224 case 0x02E:
2225 /* FCMOVLE */
2226 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2227 break;
2228 case 0x02F:
2229 /* FCMOVGT */
2230 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2231 break;
2232 case 0x030:
2233 /* CVTQL */
2234 REQUIRE_REG_31(ra);
2235 vc = dest_fpr(ctx, rc);
2236 vb = load_fpr(ctx, rb);
2237 gen_fcvtql(vc, vb);
2238 break;
2239 case 0x130:
2240 /* CVTQL/V */
2241 case 0x530:
2242 /* CVTQL/SV */
2243 REQUIRE_REG_31(ra);
2244 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2245 /v doesn't do. The only thing I can think is that /sv is a
2246 valid instruction merely for completeness in the ISA. */
2247 vc = dest_fpr(ctx, rc);
2248 vb = load_fpr(ctx, rb);
2249 gen_helper_fcvtql_v_input(cpu_env, vb);
2250 gen_fcvtql(vc, vb);
2251 break;
2252 default:
2253 goto invalid_opc;
2255 break;
2257 case 0x18:
2258 switch ((uint16_t)disp16) {
2259 case 0x0000:
2260 /* TRAPB */
2261 /* No-op. */
2262 break;
2263 case 0x0400:
2264 /* EXCB */
2265 /* No-op. */
2266 break;
2267 case 0x4000:
2268 /* MB */
2269 /* No-op */
2270 break;
2271 case 0x4400:
2272 /* WMB */
2273 /* No-op */
2274 break;
2275 case 0x8000:
2276 /* FETCH */
2277 /* No-op */
2278 break;
2279 case 0xA000:
2280 /* FETCH_M */
2281 /* No-op */
2282 break;
2283 case 0xC000:
2284 /* RPCC */
2285 va = dest_gpr(ctx, ra);
2286 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2287 gen_io_start();
2288 gen_helper_load_pcc(va, cpu_env);
2289 gen_io_end();
2290 ret = EXIT_PC_STALE;
2291 } else {
2292 gen_helper_load_pcc(va, cpu_env);
2294 break;
2295 case 0xE000:
2296 /* RC */
2297 gen_rx(ra, 0);
2298 break;
2299 case 0xE800:
2300 /* ECB */
2301 break;
2302 case 0xF000:
2303 /* RS */
2304 gen_rx(ra, 1);
2305 break;
2306 case 0xF800:
2307 /* WH64 */
2308 /* No-op */
2309 break;
2310 default:
2311 goto invalid_opc;
2313 break;
2315 case 0x19:
2316 /* HW_MFPR (PALcode) */
2317 #ifndef CONFIG_USER_ONLY
2318 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2319 va = dest_gpr(ctx, ra);
2320 ret = gen_mfpr(ctx, va, insn & 0xffff);
2321 break;
2322 #else
2323 goto invalid_opc;
2324 #endif
2326 case 0x1A:
2327 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2328 prediction stack action, which of course we don't implement. */
2329 vb = load_gpr(ctx, rb);
2330 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2331 if (ra != 31) {
2332 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2334 ret = EXIT_PC_UPDATED;
2335 break;
2337 case 0x1B:
2338 /* HW_LD (PALcode) */
2339 #ifndef CONFIG_USER_ONLY
2340 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2342 TCGv addr = tcg_temp_new();
2343 vb = load_gpr(ctx, rb);
2344 va = dest_gpr(ctx, ra);
2346 tcg_gen_addi_i64(addr, vb, disp12);
2347 switch ((insn >> 12) & 0xF) {
2348 case 0x0:
2349 /* Longword physical access (hw_ldl/p) */
2350 gen_helper_ldl_phys(va, cpu_env, addr);
2351 break;
2352 case 0x1:
2353 /* Quadword physical access (hw_ldq/p) */
2354 gen_helper_ldq_phys(va, cpu_env, addr);
2355 break;
2356 case 0x2:
2357 /* Longword physical access with lock (hw_ldl_l/p) */
2358 gen_helper_ldl_l_phys(va, cpu_env, addr);
2359 break;
2360 case 0x3:
2361 /* Quadword physical access with lock (hw_ldq_l/p) */
2362 gen_helper_ldq_l_phys(va, cpu_env, addr);
2363 break;
2364 case 0x4:
2365 /* Longword virtual PTE fetch (hw_ldl/v) */
2366 goto invalid_opc;
2367 case 0x5:
2368 /* Quadword virtual PTE fetch (hw_ldq/v) */
2369 goto invalid_opc;
2370 break;
2371 case 0x6:
2372 /* Incpu_ir[ra]id */
2373 goto invalid_opc;
2374 case 0x7:
2375 /* Incpu_ir[ra]id */
2376 goto invalid_opc;
2377 case 0x8:
2378 /* Longword virtual access (hw_ldl) */
2379 goto invalid_opc;
2380 case 0x9:
2381 /* Quadword virtual access (hw_ldq) */
2382 goto invalid_opc;
2383 case 0xA:
2384 /* Longword virtual access with protection check (hw_ldl/w) */
2385 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2386 break;
2387 case 0xB:
2388 /* Quadword virtual access with protection check (hw_ldq/w) */
2389 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2390 break;
2391 case 0xC:
2392 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2393 goto invalid_opc;
2394 case 0xD:
2395 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2396 goto invalid_opc;
2397 case 0xE:
2398 /* Longword virtual access with alternate access mode and
2399 protection checks (hw_ldl/wa) */
2400 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2401 break;
2402 case 0xF:
2403 /* Quadword virtual access with alternate access mode and
2404 protection checks (hw_ldq/wa) */
2405 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2406 break;
2408 tcg_temp_free(addr);
2409 break;
2411 #else
2412 goto invalid_opc;
2413 #endif
2415 case 0x1C:
2416 vc = dest_gpr(ctx, rc);
2417 if (fn7 == 0x70) {
2418 /* FTOIT */
2419 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2420 REQUIRE_REG_31(rb);
2421 va = load_fpr(ctx, ra);
2422 tcg_gen_mov_i64(vc, va);
2423 break;
2424 } else if (fn7 == 0x78) {
2425 /* FTOIS */
2426 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2427 REQUIRE_REG_31(rb);
2428 t32 = tcg_temp_new_i32();
2429 va = load_fpr(ctx, ra);
2430 gen_helper_s_to_memory(t32, va);
2431 tcg_gen_ext_i32_i64(vc, t32);
2432 tcg_temp_free_i32(t32);
2433 break;
2436 vb = load_gpr_lit(ctx, rb, lit, islit);
2437 switch (fn7) {
2438 case 0x00:
2439 /* SEXTB */
2440 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2441 REQUIRE_REG_31(ra);
2442 tcg_gen_ext8s_i64(vc, vb);
2443 break;
2444 case 0x01:
2445 /* SEXTW */
2446 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2447 REQUIRE_REG_31(ra);
2448 tcg_gen_ext16s_i64(vc, vb);
2449 break;
2450 case 0x30:
2451 /* CTPOP */
2452 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2453 REQUIRE_REG_31(ra);
2454 gen_helper_ctpop(vc, vb);
2455 break;
2456 case 0x31:
2457 /* PERR */
2458 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2459 va = load_gpr(ctx, ra);
2460 gen_helper_perr(vc, va, vb);
2461 break;
2462 case 0x32:
2463 /* CTLZ */
2464 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2465 REQUIRE_REG_31(ra);
2466 gen_helper_ctlz(vc, vb);
2467 break;
2468 case 0x33:
2469 /* CTTZ */
2470 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2471 REQUIRE_REG_31(ra);
2472 gen_helper_cttz(vc, vb);
2473 break;
2474 case 0x34:
2475 /* UNPKBW */
2476 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2477 REQUIRE_REG_31(ra);
2478 gen_helper_unpkbw(vc, vb);
2479 break;
2480 case 0x35:
2481 /* UNPKBL */
2482 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2483 REQUIRE_REG_31(ra);
2484 gen_helper_unpkbl(vc, vb);
2485 break;
2486 case 0x36:
2487 /* PKWB */
2488 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2489 REQUIRE_REG_31(ra);
2490 gen_helper_pkwb(vc, vb);
2491 break;
2492 case 0x37:
2493 /* PKLB */
2494 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2495 REQUIRE_REG_31(ra);
2496 gen_helper_pklb(vc, vb);
2497 break;
2498 case 0x38:
2499 /* MINSB8 */
2500 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2501 va = load_gpr(ctx, ra);
2502 gen_helper_minsb8(vc, va, vb);
2503 break;
2504 case 0x39:
2505 /* MINSW4 */
2506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2507 va = load_gpr(ctx, ra);
2508 gen_helper_minsw4(vc, va, vb);
2509 break;
2510 case 0x3A:
2511 /* MINUB8 */
2512 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2513 va = load_gpr(ctx, ra);
2514 gen_helper_minub8(vc, va, vb);
2515 break;
2516 case 0x3B:
2517 /* MINUW4 */
2518 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2519 va = load_gpr(ctx, ra);
2520 gen_helper_minuw4(vc, va, vb);
2521 break;
2522 case 0x3C:
2523 /* MAXUB8 */
2524 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2525 va = load_gpr(ctx, ra);
2526 gen_helper_maxub8(vc, va, vb);
2527 break;
2528 case 0x3D:
2529 /* MAXUW4 */
2530 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2531 va = load_gpr(ctx, ra);
2532 gen_helper_maxuw4(vc, va, vb);
2533 break;
2534 case 0x3E:
2535 /* MAXSB8 */
2536 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2537 va = load_gpr(ctx, ra);
2538 gen_helper_maxsb8(vc, va, vb);
2539 break;
2540 case 0x3F:
2541 /* MAXSW4 */
2542 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2543 va = load_gpr(ctx, ra);
2544 gen_helper_maxsw4(vc, va, vb);
2545 break;
2546 default:
2547 goto invalid_opc;
2549 break;
2551 case 0x1D:
2552 /* HW_MTPR (PALcode) */
2553 #ifndef CONFIG_USER_ONLY
2554 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2555 vb = load_gpr(ctx, rb);
2556 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2557 break;
2558 #else
2559 goto invalid_opc;
2560 #endif
2562 case 0x1E:
2563 /* HW_RET (PALcode) */
2564 #ifndef CONFIG_USER_ONLY
2565 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2566 if (rb == 31) {
2567 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2568 address from EXC_ADDR. This turns out to be useful for our
2569 emulation PALcode, so continue to accept it. */
2570 tmp = tcg_temp_new();
2571 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
2572 gen_helper_hw_ret(cpu_env, tmp);
2573 tcg_temp_free(tmp);
2574 } else {
2575 gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
2577 ret = EXIT_PC_UPDATED;
2578 break;
2579 #else
2580 goto invalid_opc;
2581 #endif
2583 case 0x1F:
2584 /* HW_ST (PALcode) */
2585 #ifndef CONFIG_USER_ONLY
2586 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2588 TCGv addr = tcg_temp_new();
2589 va = load_gpr(ctx, ra);
2590 vb = load_gpr(ctx, rb);
2592 tcg_gen_addi_i64(addr, vb, disp12);
2593 switch ((insn >> 12) & 0xF) {
2594 case 0x0:
2595 /* Longword physical access */
2596 gen_helper_stl_phys(cpu_env, addr, va);
2597 break;
2598 case 0x1:
2599 /* Quadword physical access */
2600 gen_helper_stq_phys(cpu_env, addr, va);
2601 break;
2602 case 0x2:
2603 /* Longword physical access with lock */
2604 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2605 break;
2606 case 0x3:
2607 /* Quadword physical access with lock */
2608 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2609 break;
2610 case 0x4:
2611 /* Longword virtual access */
2612 goto invalid_opc;
2613 case 0x5:
2614 /* Quadword virtual access */
2615 goto invalid_opc;
2616 case 0x6:
2617 /* Invalid */
2618 goto invalid_opc;
2619 case 0x7:
2620 /* Invalid */
2621 goto invalid_opc;
2622 case 0x8:
2623 /* Invalid */
2624 goto invalid_opc;
2625 case 0x9:
2626 /* Invalid */
2627 goto invalid_opc;
2628 case 0xA:
2629 /* Invalid */
2630 goto invalid_opc;
2631 case 0xB:
2632 /* Invalid */
2633 goto invalid_opc;
2634 case 0xC:
2635 /* Longword virtual access with alternate access mode */
2636 goto invalid_opc;
2637 case 0xD:
2638 /* Quadword virtual access with alternate access mode */
2639 goto invalid_opc;
2640 case 0xE:
2641 /* Invalid */
2642 goto invalid_opc;
2643 case 0xF:
2644 /* Invalid */
2645 goto invalid_opc;
2647 tcg_temp_free(addr);
2648 break;
2650 #else
2651 goto invalid_opc;
2652 #endif
2653 case 0x20:
2654 /* LDF */
2655 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2656 break;
2657 case 0x21:
2658 /* LDG */
2659 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2660 break;
2661 case 0x22:
2662 /* LDS */
2663 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2664 break;
2665 case 0x23:
2666 /* LDT */
2667 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2668 break;
2669 case 0x24:
2670 /* STF */
2671 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2672 break;
2673 case 0x25:
2674 /* STG */
2675 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2676 break;
2677 case 0x26:
2678 /* STS */
2679 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2680 break;
2681 case 0x27:
2682 /* STT */
2683 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2684 break;
2685 case 0x28:
2686 /* LDL */
2687 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2688 break;
2689 case 0x29:
2690 /* LDQ */
2691 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2692 break;
2693 case 0x2A:
2694 /* LDL_L */
2695 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2696 break;
2697 case 0x2B:
2698 /* LDQ_L */
2699 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2700 break;
2701 case 0x2C:
2702 /* STL */
2703 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2704 break;
2705 case 0x2D:
2706 /* STQ */
2707 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2708 break;
2709 case 0x2E:
2710 /* STL_C */
2711 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
2712 break;
2713 case 0x2F:
2714 /* STQ_C */
2715 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
2716 break;
2717 case 0x30:
2718 /* BR */
2719 ret = gen_bdirect(ctx, ra, disp21);
2720 break;
2721 case 0x31: /* FBEQ */
2722 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2723 break;
2724 case 0x32: /* FBLT */
2725 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2726 break;
2727 case 0x33: /* FBLE */
2728 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2729 break;
2730 case 0x34:
2731 /* BSR */
2732 ret = gen_bdirect(ctx, ra, disp21);
2733 break;
2734 case 0x35: /* FBNE */
2735 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2736 break;
2737 case 0x36: /* FBGE */
2738 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2739 break;
2740 case 0x37: /* FBGT */
2741 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2742 break;
2743 case 0x38:
2744 /* BLBC */
2745 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2746 break;
2747 case 0x39:
2748 /* BEQ */
2749 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2750 break;
2751 case 0x3A:
2752 /* BLT */
2753 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2754 break;
2755 case 0x3B:
2756 /* BLE */
2757 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2758 break;
2759 case 0x3C:
2760 /* BLBS */
2761 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2762 break;
2763 case 0x3D:
2764 /* BNE */
2765 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2766 break;
2767 case 0x3E:
2768 /* BGE */
2769 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2770 break;
2771 case 0x3F:
2772 /* BGT */
2773 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2774 break;
2775 invalid_opc:
2776 ret = gen_invalid(ctx);
2777 break;
2780 return ret;
2783 static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
2784 TranslationBlock *tb,
2785 bool search_pc)
2787 CPUState *cs = CPU(cpu);
2788 CPUAlphaState *env = &cpu->env;
2789 DisasContext ctx, *ctxp = &ctx;
2790 target_ulong pc_start;
2791 target_ulong pc_mask;
2792 uint32_t insn;
2793 CPUBreakpoint *bp;
2794 int j, lj = -1;
2795 ExitStatus ret;
2796 int num_insns;
2797 int max_insns;
2799 pc_start = tb->pc;
2801 ctx.tb = tb;
2802 ctx.pc = pc_start;
2803 ctx.mem_idx = cpu_mmu_index(env);
2804 ctx.implver = env->implver;
2805 ctx.singlestep_enabled = cs->singlestep_enabled;
2807 /* ??? Every TB begins with unset rounding mode, to be initialized on
2808 the first fp insn of the TB. Alternately we could define a proper
2809 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2810 to reset the FP_STATUS to that default at the end of any TB that
2811 changes the default. We could even (gasp) dynamiclly figure out
2812 what default would be most efficient given the running program. */
2813 ctx.tb_rm = -1;
2814 /* Similarly for flush-to-zero. */
2815 ctx.tb_ftz = -1;
2817 num_insns = 0;
2818 max_insns = tb->cflags & CF_COUNT_MASK;
2819 if (max_insns == 0) {
2820 max_insns = CF_COUNT_MASK;
2823 if (in_superpage(&ctx, pc_start)) {
2824 pc_mask = (1ULL << 41) - 1;
2825 } else {
2826 pc_mask = ~TARGET_PAGE_MASK;
2829 gen_tb_start(tb);
2830 do {
2831 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
2832 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
2833 if (bp->pc == ctx.pc) {
2834 gen_excp(&ctx, EXCP_DEBUG, 0);
2835 break;
2839 if (search_pc) {
2840 j = tcg_op_buf_count();
2841 if (lj < j) {
2842 lj++;
2843 while (lj < j) {
2844 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2847 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
2848 tcg_ctx.gen_opc_instr_start[lj] = 1;
2849 tcg_ctx.gen_opc_icount[lj] = num_insns;
2851 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2852 gen_io_start();
2854 insn = cpu_ldl_code(env, ctx.pc);
2855 num_insns++;
2857 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2858 tcg_gen_debug_insn_start(ctx.pc);
2861 TCGV_UNUSED_I64(ctx.zero);
2862 TCGV_UNUSED_I64(ctx.sink);
2863 TCGV_UNUSED_I64(ctx.lit);
2865 ctx.pc += 4;
2866 ret = translate_one(ctxp, insn);
2868 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2869 tcg_gen_discard_i64(ctx.sink);
2870 tcg_temp_free(ctx.sink);
2872 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2873 tcg_temp_free(ctx.zero);
2875 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2876 tcg_temp_free(ctx.lit);
2879 /* If we reach a page boundary, are single stepping,
2880 or exhaust instruction count, stop generation. */
2881 if (ret == NO_EXIT
2882 && ((ctx.pc & pc_mask) == 0
2883 || tcg_op_buf_full()
2884 || num_insns >= max_insns
2885 || singlestep
2886 || ctx.singlestep_enabled)) {
2887 ret = EXIT_PC_STALE;
2889 } while (ret == NO_EXIT);
2891 if (tb->cflags & CF_LAST_IO) {
2892 gen_io_end();
2895 switch (ret) {
2896 case EXIT_GOTO_TB:
2897 case EXIT_NORETURN:
2898 break;
2899 case EXIT_PC_STALE:
2900 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2901 /* FALLTHRU */
2902 case EXIT_PC_UPDATED:
2903 if (ctx.singlestep_enabled) {
2904 gen_excp_1(EXCP_DEBUG, 0);
2905 } else {
2906 tcg_gen_exit_tb(0);
2908 break;
2909 default:
2910 abort();
2913 gen_tb_end(tb, num_insns);
2915 if (search_pc) {
2916 j = tcg_op_buf_count();
2917 lj++;
2918 while (lj <= j) {
2919 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2921 } else {
2922 tb->size = ctx.pc - pc_start;
2923 tb->icount = num_insns;
2926 #ifdef DEBUG_DISAS
2927 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2928 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2929 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
2930 qemu_log("\n");
2932 #endif
2935 void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
2937 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
2940 void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
2942 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
2945 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
2947 env->pc = tcg_ctx.gen_opc_pc[pc_pos];