kvm: Handle exit reason KVM_EXIT_SYSTEM_EVENT
[qemu/ar7.git] / target-alpha / translate.c
blobcc81e774df5d77d500d1ea6f52e65bed75161a96
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
23 #include "tcg-op.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34 #else
35 # define LOG_DISAS(...) do { } while (0)
36 #endif
38 typedef struct DisasContext DisasContext;
39 struct DisasContext {
40 struct TranslationBlock *tb;
41 uint64_t pc;
42 int mem_idx;
44 /* Current rounding mode for this TB. */
45 int tb_rm;
46 /* Current flush-to-zero setting for this TB. */
47 int tb_ftz;
49 /* implver value for this CPU. */
50 int implver;
52 /* Temporaries for $31 and $f31 as source and destination. */
53 TCGv zero;
54 TCGv sink;
55 /* Temporary for immediate constants. */
56 TCGv lit;
58 bool singlestep_enabled;
61 /* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
64 typedef enum {
65 NO_EXIT,
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
77 EXIT_PC_STALE,
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
81 EXIT_NORETURN,
82 } ExitStatus;
84 /* global register indexes */
85 static TCGv_ptr cpu_env;
86 static TCGv cpu_ir[31];
87 static TCGv cpu_fir[31];
88 static TCGv cpu_pc;
89 static TCGv cpu_lock_addr;
90 static TCGv cpu_lock_st_addr;
91 static TCGv cpu_lock_value;
93 #include "exec/gen-icount.h"
95 void alpha_translate_init(void)
97 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
99 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
100 static const GlobalVar vars[] = {
101 DEF_VAR(pc),
102 DEF_VAR(lock_addr),
103 DEF_VAR(lock_st_addr),
104 DEF_VAR(lock_value),
107 #undef DEF_VAR
109 /* Use the symbolic register names that match the disassembler. */
110 static const char greg_names[31][4] = {
111 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
112 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
113 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
114 "t10", "t11", "ra", "t12", "at", "gp", "sp"
116 static const char freg_names[31][4] = {
117 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
118 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
119 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
120 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
123 static bool done_init = 0;
124 int i;
126 if (done_init) {
127 return;
129 done_init = 1;
131 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
133 for (i = 0; i < 31; i++) {
134 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
135 offsetof(CPUAlphaState, ir[i]),
136 greg_names[i]);
139 for (i = 0; i < 31; i++) {
140 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
141 offsetof(CPUAlphaState, fir[i]),
142 freg_names[i]);
145 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
146 const GlobalVar *v = &vars[i];
147 *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
151 static TCGv load_zero(DisasContext *ctx)
153 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
154 ctx->zero = tcg_const_i64(0);
156 return ctx->zero;
159 static TCGv dest_sink(DisasContext *ctx)
161 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
162 ctx->sink = tcg_temp_new();
164 return ctx->sink;
167 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
169 if (likely(reg < 31)) {
170 return cpu_ir[reg];
171 } else {
172 return load_zero(ctx);
176 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
177 uint8_t lit, bool islit)
179 if (islit) {
180 ctx->lit = tcg_const_i64(lit);
181 return ctx->lit;
182 } else if (likely(reg < 31)) {
183 return cpu_ir[reg];
184 } else {
185 return load_zero(ctx);
189 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
191 if (likely(reg < 31)) {
192 return cpu_ir[reg];
193 } else {
194 return dest_sink(ctx);
198 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
200 if (likely(reg < 31)) {
201 return cpu_fir[reg];
202 } else {
203 return load_zero(ctx);
207 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
209 if (likely(reg < 31)) {
210 return cpu_fir[reg];
211 } else {
212 return dest_sink(ctx);
216 static void gen_excp_1(int exception, int error_code)
218 TCGv_i32 tmp1, tmp2;
220 tmp1 = tcg_const_i32(exception);
221 tmp2 = tcg_const_i32(error_code);
222 gen_helper_excp(cpu_env, tmp1, tmp2);
223 tcg_temp_free_i32(tmp2);
224 tcg_temp_free_i32(tmp1);
227 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
229 tcg_gen_movi_i64(cpu_pc, ctx->pc);
230 gen_excp_1(exception, error_code);
231 return EXIT_NORETURN;
234 static inline ExitStatus gen_invalid(DisasContext *ctx)
236 return gen_excp(ctx, EXCP_OPCDEC, 0);
239 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
241 TCGv_i32 tmp32 = tcg_temp_new_i32();
242 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
243 gen_helper_memory_to_f(t0, tmp32);
244 tcg_temp_free_i32(tmp32);
247 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
249 TCGv tmp = tcg_temp_new();
250 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
251 gen_helper_memory_to_g(t0, tmp);
252 tcg_temp_free(tmp);
255 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
257 TCGv_i32 tmp32 = tcg_temp_new_i32();
258 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
259 gen_helper_memory_to_s(t0, tmp32);
260 tcg_temp_free_i32(tmp32);
263 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
265 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
266 tcg_gen_mov_i64(cpu_lock_addr, t1);
267 tcg_gen_mov_i64(cpu_lock_value, t0);
270 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
272 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
273 tcg_gen_mov_i64(cpu_lock_addr, t1);
274 tcg_gen_mov_i64(cpu_lock_value, t0);
277 static inline void gen_load_mem(DisasContext *ctx,
278 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
279 int flags),
280 int ra, int rb, int32_t disp16, bool fp,
281 bool clear)
283 TCGv tmp, addr, va;
285 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
286 prefetches, which we can treat as nops. No worries about
287 missed exceptions here. */
288 if (unlikely(ra == 31)) {
289 return;
292 tmp = tcg_temp_new();
293 addr = load_gpr(ctx, rb);
295 if (disp16) {
296 tcg_gen_addi_i64(tmp, addr, disp16);
297 addr = tmp;
299 if (clear) {
300 tcg_gen_andi_i64(tmp, addr, ~0x7);
301 addr = tmp;
304 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
305 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
307 tcg_temp_free(tmp);
310 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
312 TCGv_i32 tmp32 = tcg_temp_new_i32();
313 gen_helper_f_to_memory(tmp32, t0);
314 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
315 tcg_temp_free_i32(tmp32);
318 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
320 TCGv tmp = tcg_temp_new();
321 gen_helper_g_to_memory(tmp, t0);
322 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
323 tcg_temp_free(tmp);
326 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
328 TCGv_i32 tmp32 = tcg_temp_new_i32();
329 gen_helper_s_to_memory(tmp32, t0);
330 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
331 tcg_temp_free_i32(tmp32);
334 static inline void gen_store_mem(DisasContext *ctx,
335 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
336 int flags),
337 int ra, int rb, int32_t disp16, bool fp,
338 bool clear)
340 TCGv tmp, addr, va;
342 tmp = tcg_temp_new();
343 addr = load_gpr(ctx, rb);
345 if (disp16) {
346 tcg_gen_addi_i64(tmp, addr, disp16);
347 addr = tmp;
349 if (clear) {
350 tcg_gen_andi_i64(tmp, addr, ~0x7);
351 addr = tmp;
354 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
355 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
357 tcg_temp_free(tmp);
360 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
361 int32_t disp16, int quad)
363 TCGv addr;
365 if (ra == 31) {
366 /* ??? Don't bother storing anything. The user can't tell
367 the difference, since the zero register always reads zero. */
368 return NO_EXIT;
371 #if defined(CONFIG_USER_ONLY)
372 addr = cpu_lock_st_addr;
373 #else
374 addr = tcg_temp_local_new();
375 #endif
377 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
379 #if defined(CONFIG_USER_ONLY)
380 /* ??? This is handled via a complicated version of compare-and-swap
381 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
382 in TCG so that this isn't necessary. */
383 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
384 #else
385 /* ??? In system mode we are never multi-threaded, so CAS can be
386 implemented via a non-atomic load-compare-store sequence. */
388 int lab_fail, lab_done;
389 TCGv val;
391 lab_fail = gen_new_label();
392 lab_done = gen_new_label();
393 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
395 val = tcg_temp_new();
396 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
397 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
399 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
400 quad ? MO_LEQ : MO_LEUL);
401 tcg_gen_movi_i64(cpu_ir[ra], 1);
402 tcg_gen_br(lab_done);
404 gen_set_label(lab_fail);
405 tcg_gen_movi_i64(cpu_ir[ra], 0);
407 gen_set_label(lab_done);
408 tcg_gen_movi_i64(cpu_lock_addr, -1);
410 tcg_temp_free(addr);
411 return NO_EXIT;
413 #endif
416 static bool in_superpage(DisasContext *ctx, int64_t addr)
418 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
419 && addr < 0
420 && ((addr >> 41) & 3) == 2
421 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
424 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
426 /* Suppress goto_tb in the case of single-steping and IO. */
427 if ((ctx->tb->cflags & CF_LAST_IO)
428 || ctx->singlestep_enabled || singlestep) {
429 return false;
431 /* If the destination is in the superpage, the page perms can't change. */
432 if (in_superpage(ctx, dest)) {
433 return true;
435 /* Check for the dest on the same page as the start of the TB. */
436 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
439 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
441 uint64_t dest = ctx->pc + (disp << 2);
443 if (ra != 31) {
444 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
447 /* Notice branch-to-next; used to initialize RA with the PC. */
448 if (disp == 0) {
449 return 0;
450 } else if (use_goto_tb(ctx, dest)) {
451 tcg_gen_goto_tb(0);
452 tcg_gen_movi_i64(cpu_pc, dest);
453 tcg_gen_exit_tb((uintptr_t)ctx->tb);
454 return EXIT_GOTO_TB;
455 } else {
456 tcg_gen_movi_i64(cpu_pc, dest);
457 return EXIT_PC_UPDATED;
461 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
462 TCGv cmp, int32_t disp)
464 uint64_t dest = ctx->pc + (disp << 2);
465 int lab_true = gen_new_label();
467 if (use_goto_tb(ctx, dest)) {
468 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
470 tcg_gen_goto_tb(0);
471 tcg_gen_movi_i64(cpu_pc, ctx->pc);
472 tcg_gen_exit_tb((uintptr_t)ctx->tb);
474 gen_set_label(lab_true);
475 tcg_gen_goto_tb(1);
476 tcg_gen_movi_i64(cpu_pc, dest);
477 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
479 return EXIT_GOTO_TB;
480 } else {
481 TCGv_i64 z = tcg_const_i64(0);
482 TCGv_i64 d = tcg_const_i64(dest);
483 TCGv_i64 p = tcg_const_i64(ctx->pc);
485 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
487 tcg_temp_free_i64(z);
488 tcg_temp_free_i64(d);
489 tcg_temp_free_i64(p);
490 return EXIT_PC_UPDATED;
494 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
495 int32_t disp, int mask)
497 TCGv cmp_tmp;
499 if (mask) {
500 cmp_tmp = tcg_temp_new();
501 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
502 } else {
503 cmp_tmp = load_gpr(ctx, ra);
506 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
509 /* Fold -0.0 for comparison with COND. */
511 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
513 uint64_t mzero = 1ull << 63;
515 switch (cond) {
516 case TCG_COND_LE:
517 case TCG_COND_GT:
518 /* For <= or >, the -0.0 value directly compares the way we want. */
519 tcg_gen_mov_i64(dest, src);
520 break;
522 case TCG_COND_EQ:
523 case TCG_COND_NE:
524 /* For == or !=, we can simply mask off the sign bit and compare. */
525 tcg_gen_andi_i64(dest, src, mzero - 1);
526 break;
528 case TCG_COND_GE:
529 case TCG_COND_LT:
530 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
531 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
532 tcg_gen_neg_i64(dest, dest);
533 tcg_gen_and_i64(dest, dest, src);
534 break;
536 default:
537 abort();
541 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
542 int32_t disp)
544 TCGv cmp_tmp = tcg_temp_new();
545 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
546 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
549 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
551 TCGv_i64 va, vb, z;
553 z = load_zero(ctx);
554 vb = load_fpr(ctx, rb);
555 va = tcg_temp_new();
556 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
558 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
560 tcg_temp_free(va);
563 #define QUAL_RM_N 0x080 /* Round mode nearest even */
564 #define QUAL_RM_C 0x000 /* Round mode chopped */
565 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
566 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
567 #define QUAL_RM_MASK 0x0c0
569 #define QUAL_U 0x100 /* Underflow enable (fp output) */
570 #define QUAL_V 0x100 /* Overflow enable (int output) */
571 #define QUAL_S 0x400 /* Software completion enable */
572 #define QUAL_I 0x200 /* Inexact detection enable */
574 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
576 TCGv_i32 tmp;
578 fn11 &= QUAL_RM_MASK;
579 if (fn11 == ctx->tb_rm) {
580 return;
582 ctx->tb_rm = fn11;
584 tmp = tcg_temp_new_i32();
585 switch (fn11) {
586 case QUAL_RM_N:
587 tcg_gen_movi_i32(tmp, float_round_nearest_even);
588 break;
589 case QUAL_RM_C:
590 tcg_gen_movi_i32(tmp, float_round_to_zero);
591 break;
592 case QUAL_RM_M:
593 tcg_gen_movi_i32(tmp, float_round_down);
594 break;
595 case QUAL_RM_D:
596 tcg_gen_ld8u_i32(tmp, cpu_env,
597 offsetof(CPUAlphaState, fpcr_dyn_round));
598 break;
601 #if defined(CONFIG_SOFTFLOAT_INLINE)
602 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
603 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
604 sets the one field. */
605 tcg_gen_st8_i32(tmp, cpu_env,
606 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
607 #else
608 gen_helper_setroundmode(tmp);
609 #endif
611 tcg_temp_free_i32(tmp);
614 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
616 TCGv_i32 tmp;
618 fn11 &= QUAL_U;
619 if (fn11 == ctx->tb_ftz) {
620 return;
622 ctx->tb_ftz = fn11;
624 tmp = tcg_temp_new_i32();
625 if (fn11) {
626 /* Underflow is enabled, use the FPCR setting. */
627 tcg_gen_ld8u_i32(tmp, cpu_env,
628 offsetof(CPUAlphaState, fpcr_flush_to_zero));
629 } else {
630 /* Underflow is disabled, force flush-to-zero. */
631 tcg_gen_movi_i32(tmp, 1);
634 #if defined(CONFIG_SOFTFLOAT_INLINE)
635 tcg_gen_st8_i32(tmp, cpu_env,
636 offsetof(CPUAlphaState, fp_status.flush_to_zero));
637 #else
638 gen_helper_setflushzero(tmp);
639 #endif
641 tcg_temp_free_i32(tmp);
644 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
646 TCGv val;
648 if (unlikely(reg == 31)) {
649 val = load_zero(ctx);
650 } else {
651 val = cpu_fir[reg];
652 if ((fn11 & QUAL_S) == 0) {
653 if (is_cmp) {
654 gen_helper_ieee_input_cmp(cpu_env, val);
655 } else {
656 gen_helper_ieee_input(cpu_env, val);
660 return val;
663 static void gen_fp_exc_clear(void)
665 #if defined(CONFIG_SOFTFLOAT_INLINE)
666 TCGv_i32 zero = tcg_const_i32(0);
667 tcg_gen_st8_i32(zero, cpu_env,
668 offsetof(CPUAlphaState, fp_status.float_exception_flags));
669 tcg_temp_free_i32(zero);
670 #else
671 gen_helper_fp_exc_clear(cpu_env);
672 #endif
675 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
677 /* ??? We ought to be able to do something with imprecise exceptions.
678 E.g. notice we're still in the trap shadow of something within the
679 TB and do not generate the code to signal the exception; end the TB
680 when an exception is forced to arrive, either by consumption of a
681 register value or TRAPB or EXCB. */
682 TCGv_i32 exc = tcg_temp_new_i32();
683 TCGv_i32 reg;
685 #if defined(CONFIG_SOFTFLOAT_INLINE)
686 tcg_gen_ld8u_i32(exc, cpu_env,
687 offsetof(CPUAlphaState, fp_status.float_exception_flags));
688 #else
689 gen_helper_fp_exc_get(exc, cpu_env);
690 #endif
692 if (ignore) {
693 tcg_gen_andi_i32(exc, exc, ~ignore);
696 /* ??? Pass in the regno of the destination so that the helper can
697 set EXC_MASK, which contains a bitmask of destination registers
698 that have caused arithmetic traps. A simple userspace emulation
699 does not require this. We do need it for a guest kernel's entArith,
700 or if we were to do something clever with imprecise exceptions. */
701 reg = tcg_const_i32(rc + 32);
703 if (fn11 & QUAL_S) {
704 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
705 } else {
706 gen_helper_fp_exc_raise(cpu_env, exc, reg);
709 tcg_temp_free_i32(reg);
710 tcg_temp_free_i32(exc);
713 static inline void gen_fp_exc_raise(int rc, int fn11)
715 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
718 static void gen_fcvtlq(TCGv vc, TCGv vb)
720 TCGv tmp = tcg_temp_new();
722 /* The arithmetic right shift here, plus the sign-extended mask below
723 yields a sign-extended result without an explicit ext32s_i64. */
724 tcg_gen_sari_i64(tmp, vb, 32);
725 tcg_gen_shri_i64(vc, vb, 29);
726 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
727 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
728 tcg_gen_or_i64(vc, vc, tmp);
730 tcg_temp_free(tmp);
733 static void gen_fcvtql(TCGv vc, TCGv vb)
735 TCGv tmp = tcg_temp_new();
737 tcg_gen_andi_i64(tmp, vb, (int32_t)0xc0000000);
738 tcg_gen_andi_i64(vc, vb, 0x3FFFFFFF);
739 tcg_gen_shli_i64(tmp, tmp, 32);
740 tcg_gen_shli_i64(vc, vc, 29);
741 tcg_gen_or_i64(vc, vc, tmp);
743 tcg_temp_free(tmp);
746 static void gen_ieee_arith2(DisasContext *ctx,
747 void (*helper)(TCGv, TCGv_ptr, TCGv),
748 int rb, int rc, int fn11)
750 TCGv vb;
752 gen_qual_roundmode(ctx, fn11);
753 gen_qual_flushzero(ctx, fn11);
754 gen_fp_exc_clear();
756 vb = gen_ieee_input(ctx, rb, fn11, 0);
757 helper(dest_fpr(ctx, rc), cpu_env, vb);
759 gen_fp_exc_raise(rc, fn11);
762 #define IEEE_ARITH2(name) \
763 static inline void glue(gen_f, name)(DisasContext *ctx, \
764 int rb, int rc, int fn11) \
766 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
768 IEEE_ARITH2(sqrts)
769 IEEE_ARITH2(sqrtt)
770 IEEE_ARITH2(cvtst)
771 IEEE_ARITH2(cvtts)
773 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
775 TCGv vb, vc;
776 int ignore = 0;
778 /* No need to set flushzero, since we have an integer output. */
779 gen_fp_exc_clear();
780 vb = gen_ieee_input(ctx, rb, fn11, 0);
781 vc = dest_fpr(ctx, rc);
783 /* Almost all integer conversions use cropped rounding, and most
784 also do not have integer overflow enabled. Special case that. */
785 switch (fn11) {
786 case QUAL_RM_C:
787 gen_helper_cvttq_c(vc, cpu_env, vb);
788 break;
789 case QUAL_V | QUAL_RM_C:
790 case QUAL_S | QUAL_V | QUAL_RM_C:
791 ignore = float_flag_inexact;
792 /* FALLTHRU */
793 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
794 gen_helper_cvttq_svic(vc, cpu_env, vb);
795 break;
796 default:
797 gen_qual_roundmode(ctx, fn11);
798 gen_helper_cvttq(vc, cpu_env, vb);
799 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
800 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
801 break;
804 gen_fp_exc_raise_ignore(rc, fn11, ignore);
807 static void gen_ieee_intcvt(DisasContext *ctx,
808 void (*helper)(TCGv, TCGv_ptr, TCGv),
809 int rb, int rc, int fn11)
811 TCGv vb, vc;
813 gen_qual_roundmode(ctx, fn11);
814 vb = load_fpr(ctx, rb);
815 vc = dest_fpr(ctx, rc);
817 /* The only exception that can be raised by integer conversion
818 is inexact. Thus we only need to worry about exceptions when
819 inexact handling is requested. */
820 if (fn11 & QUAL_I) {
821 gen_fp_exc_clear();
822 helper(vc, cpu_env, vb);
823 gen_fp_exc_raise(rc, fn11);
824 } else {
825 helper(vc, cpu_env, vb);
829 #define IEEE_INTCVT(name) \
830 static inline void glue(gen_f, name)(DisasContext *ctx, \
831 int rb, int rc, int fn11) \
833 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
835 IEEE_INTCVT(cvtqs)
836 IEEE_INTCVT(cvtqt)
838 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
840 TCGv vmask = tcg_const_i64(mask);
841 TCGv tmp = tcg_temp_new_i64();
843 if (inv_a) {
844 tcg_gen_andc_i64(tmp, vmask, va);
845 } else {
846 tcg_gen_and_i64(tmp, va, vmask);
849 tcg_gen_andc_i64(vc, vb, vmask);
850 tcg_gen_or_i64(vc, vc, tmp);
852 tcg_temp_free(vmask);
853 tcg_temp_free(tmp);
856 static void gen_ieee_arith3(DisasContext *ctx,
857 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
858 int ra, int rb, int rc, int fn11)
860 TCGv va, vb, vc;
862 gen_qual_roundmode(ctx, fn11);
863 gen_qual_flushzero(ctx, fn11);
864 gen_fp_exc_clear();
866 va = gen_ieee_input(ctx, ra, fn11, 0);
867 vb = gen_ieee_input(ctx, rb, fn11, 0);
868 vc = dest_fpr(ctx, rc);
869 helper(vc, cpu_env, va, vb);
871 gen_fp_exc_raise(rc, fn11);
874 #define IEEE_ARITH3(name) \
875 static inline void glue(gen_f, name)(DisasContext *ctx, \
876 int ra, int rb, int rc, int fn11) \
878 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
880 IEEE_ARITH3(adds)
881 IEEE_ARITH3(subs)
882 IEEE_ARITH3(muls)
883 IEEE_ARITH3(divs)
884 IEEE_ARITH3(addt)
885 IEEE_ARITH3(subt)
886 IEEE_ARITH3(mult)
887 IEEE_ARITH3(divt)
889 static void gen_ieee_compare(DisasContext *ctx,
890 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
891 int ra, int rb, int rc, int fn11)
893 TCGv va, vb, vc;
895 gen_fp_exc_clear();
897 va = gen_ieee_input(ctx, ra, fn11, 1);
898 vb = gen_ieee_input(ctx, rb, fn11, 1);
899 vc = dest_fpr(ctx, rc);
900 helper(vc, cpu_env, va, vb);
902 gen_fp_exc_raise(rc, fn11);
905 #define IEEE_CMP3(name) \
906 static inline void glue(gen_f, name)(DisasContext *ctx, \
907 int ra, int rb, int rc, int fn11) \
909 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
911 IEEE_CMP3(cmptun)
912 IEEE_CMP3(cmpteq)
913 IEEE_CMP3(cmptlt)
914 IEEE_CMP3(cmptle)
916 static inline uint64_t zapnot_mask(uint8_t lit)
918 uint64_t mask = 0;
919 int i;
921 for (i = 0; i < 8; ++i) {
922 if ((lit >> i) & 1) {
923 mask |= 0xffull << (i * 8);
926 return mask;
929 /* Implement zapnot with an immediate operand, which expands to some
930 form of immediate AND. This is a basic building block in the
931 definition of many of the other byte manipulation instructions. */
932 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
934 switch (lit) {
935 case 0x00:
936 tcg_gen_movi_i64(dest, 0);
937 break;
938 case 0x01:
939 tcg_gen_ext8u_i64(dest, src);
940 break;
941 case 0x03:
942 tcg_gen_ext16u_i64(dest, src);
943 break;
944 case 0x0f:
945 tcg_gen_ext32u_i64(dest, src);
946 break;
947 case 0xff:
948 tcg_gen_mov_i64(dest, src);
949 break;
950 default:
951 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
952 break;
956 /* EXTWH, EXTLH, EXTQH */
957 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
958 uint8_t lit, uint8_t byte_mask)
960 if (islit) {
961 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
962 } else {
963 TCGv tmp = tcg_temp_new();
964 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
965 tcg_gen_neg_i64(tmp, tmp);
966 tcg_gen_andi_i64(tmp, tmp, 0x3f);
967 tcg_gen_shl_i64(vc, va, tmp);
968 tcg_temp_free(tmp);
970 gen_zapnoti(vc, vc, byte_mask);
973 /* EXTBL, EXTWL, EXTLL, EXTQL */
974 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
975 uint8_t lit, uint8_t byte_mask)
977 if (islit) {
978 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
979 } else {
980 TCGv tmp = tcg_temp_new();
981 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
982 tcg_gen_shli_i64(tmp, tmp, 3);
983 tcg_gen_shr_i64(vc, va, tmp);
984 tcg_temp_free(tmp);
986 gen_zapnoti(vc, vc, byte_mask);
989 /* INSWH, INSLH, INSQH */
990 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
991 uint8_t lit, uint8_t byte_mask)
993 TCGv tmp = tcg_temp_new();
995 /* The instruction description has us left-shift the byte mask and extract
996 bits <15:8> and apply that zap at the end. This is equivalent to simply
997 performing the zap first and shifting afterward. */
998 gen_zapnoti(tmp, va, byte_mask);
1000 if (islit) {
1001 lit &= 7;
1002 if (unlikely(lit == 0)) {
1003 tcg_gen_movi_i64(vc, 0);
1004 } else {
1005 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
1007 } else {
1008 TCGv shift = tcg_temp_new();
1010 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1011 portably by splitting the shift into two parts: shift_count-1 and 1.
1012 Arrange for the -1 by using ones-complement instead of
1013 twos-complement in the negation: ~(B * 8) & 63. */
1015 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1016 tcg_gen_not_i64(shift, shift);
1017 tcg_gen_andi_i64(shift, shift, 0x3f);
1019 tcg_gen_shr_i64(vc, tmp, shift);
1020 tcg_gen_shri_i64(vc, vc, 1);
1021 tcg_temp_free(shift);
1023 tcg_temp_free(tmp);
1026 /* INSBL, INSWL, INSLL, INSQL */
1027 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1028 uint8_t lit, uint8_t byte_mask)
1030 TCGv tmp = tcg_temp_new();
1032 /* The instruction description has us left-shift the byte mask
1033 the same number of byte slots as the data and apply the zap
1034 at the end. This is equivalent to simply performing the zap
1035 first and shifting afterward. */
1036 gen_zapnoti(tmp, va, byte_mask);
1038 if (islit) {
1039 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1040 } else {
1041 TCGv shift = tcg_temp_new();
1042 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1043 tcg_gen_shli_i64(shift, shift, 3);
1044 tcg_gen_shl_i64(vc, tmp, shift);
1045 tcg_temp_free(shift);
1047 tcg_temp_free(tmp);
1050 /* MSKWH, MSKLH, MSKQH */
1051 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1052 uint8_t lit, uint8_t byte_mask)
1054 if (islit) {
1055 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1056 } else {
1057 TCGv shift = tcg_temp_new();
1058 TCGv mask = tcg_temp_new();
1060 /* The instruction description is as above, where the byte_mask
1061 is shifted left, and then we extract bits <15:8>. This can be
1062 emulated with a right-shift on the expanded byte mask. This
1063 requires extra care because for an input <2:0> == 0 we need a
1064 shift of 64 bits in order to generate a zero. This is done by
1065 splitting the shift into two parts, the variable shift - 1
1066 followed by a constant 1 shift. The code we expand below is
1067 equivalent to ~(B * 8) & 63. */
1069 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1070 tcg_gen_not_i64(shift, shift);
1071 tcg_gen_andi_i64(shift, shift, 0x3f);
1072 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1073 tcg_gen_shr_i64(mask, mask, shift);
1074 tcg_gen_shri_i64(mask, mask, 1);
1076 tcg_gen_andc_i64(vc, va, mask);
1078 tcg_temp_free(mask);
1079 tcg_temp_free(shift);
1083 /* MSKBL, MSKWL, MSKLL, MSKQL */
1084 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1085 uint8_t lit, uint8_t byte_mask)
1087 if (islit) {
1088 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1089 } else {
1090 TCGv shift = tcg_temp_new();
1091 TCGv mask = tcg_temp_new();
1093 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1094 tcg_gen_shli_i64(shift, shift, 3);
1095 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1096 tcg_gen_shl_i64(mask, mask, shift);
1098 tcg_gen_andc_i64(vc, va, mask);
1100 tcg_temp_free(mask);
1101 tcg_temp_free(shift);
1105 static void gen_rx(int ra, int set)
1107 TCGv_i32 tmp;
1109 if (ra != 31) {
1110 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1113 tmp = tcg_const_i32(set);
1114 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1115 tcg_temp_free_i32(tmp);
1118 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1120 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1121 to internal cpu registers. */
1123 /* Unprivileged PAL call */
1124 if (palcode >= 0x80 && palcode < 0xC0) {
1125 switch (palcode) {
1126 case 0x86:
1127 /* IMB */
1128 /* No-op inside QEMU. */
1129 break;
1130 case 0x9E:
1131 /* RDUNIQUE */
1132 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1133 offsetof(CPUAlphaState, unique));
1134 break;
1135 case 0x9F:
1136 /* WRUNIQUE */
1137 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1138 offsetof(CPUAlphaState, unique));
1139 break;
1140 default:
1141 palcode &= 0xbf;
1142 goto do_call_pal;
1144 return NO_EXIT;
1147 #ifndef CONFIG_USER_ONLY
1148 /* Privileged PAL code */
1149 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1150 switch (palcode) {
1151 case 0x01:
1152 /* CFLUSH */
1153 /* No-op inside QEMU. */
1154 break;
1155 case 0x02:
1156 /* DRAINA */
1157 /* No-op inside QEMU. */
1158 break;
1159 case 0x2D:
1160 /* WRVPTPTR */
1161 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1162 offsetof(CPUAlphaState, vptptr));
1163 break;
1164 case 0x31:
1165 /* WRVAL */
1166 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1167 offsetof(CPUAlphaState, sysval));
1168 break;
1169 case 0x32:
1170 /* RDVAL */
1171 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1172 offsetof(CPUAlphaState, sysval));
1173 break;
1175 case 0x35: {
1176 /* SWPIPL */
1177 TCGv tmp;
1179 /* Note that we already know we're in kernel mode, so we know
1180 that PS only contains the 3 IPL bits. */
1181 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1182 offsetof(CPUAlphaState, ps));
1184 /* But make sure and store only the 3 IPL bits from the user. */
1185 tmp = tcg_temp_new();
1186 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1187 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1188 tcg_temp_free(tmp);
1189 break;
1192 case 0x36:
1193 /* RDPS */
1194 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1195 offsetof(CPUAlphaState, ps));
1196 break;
1197 case 0x38:
1198 /* WRUSP */
1199 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1200 offsetof(CPUAlphaState, usp));
1201 break;
1202 case 0x3A:
1203 /* RDUSP */
1204 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1205 offsetof(CPUAlphaState, usp));
1206 break;
1207 case 0x3C:
1208 /* WHAMI */
1209 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1210 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1211 break;
1213 default:
1214 palcode &= 0x3f;
1215 goto do_call_pal;
1217 return NO_EXIT;
1219 #endif
1220 return gen_invalid(ctx);
1222 do_call_pal:
1223 #ifdef CONFIG_USER_ONLY
1224 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1225 #else
1227 TCGv pc = tcg_const_i64(ctx->pc);
1228 TCGv entry = tcg_const_i64(palcode & 0x80
1229 ? 0x2000 + (palcode - 0x80) * 64
1230 : 0x1000 + palcode * 64);
1232 gen_helper_call_pal(cpu_env, pc, entry);
1234 tcg_temp_free(entry);
1235 tcg_temp_free(pc);
1237 /* Since the destination is running in PALmode, we don't really
1238 need the page permissions check. We'll see the existence of
1239 the page when we create the TB, and we'll flush all TBs if
1240 we change the PAL base register. */
1241 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1242 tcg_gen_goto_tb(0);
1243 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1244 return EXIT_GOTO_TB;
1247 return EXIT_PC_UPDATED;
1249 #endif
1252 #ifndef CONFIG_USER_ONLY
1254 #define PR_BYTE 0x100000
1255 #define PR_LONG 0x200000
1257 static int cpu_pr_data(int pr)
1259 switch (pr) {
1260 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1261 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1262 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1263 case 3: return offsetof(CPUAlphaState, trap_arg0);
1264 case 4: return offsetof(CPUAlphaState, trap_arg1);
1265 case 5: return offsetof(CPUAlphaState, trap_arg2);
1266 case 6: return offsetof(CPUAlphaState, exc_addr);
1267 case 7: return offsetof(CPUAlphaState, palbr);
1268 case 8: return offsetof(CPUAlphaState, ptbr);
1269 case 9: return offsetof(CPUAlphaState, vptptr);
1270 case 10: return offsetof(CPUAlphaState, unique);
1271 case 11: return offsetof(CPUAlphaState, sysval);
1272 case 12: return offsetof(CPUAlphaState, usp);
1274 case 32 ... 39:
1275 return offsetof(CPUAlphaState, shadow[pr - 32]);
1276 case 40 ... 63:
1277 return offsetof(CPUAlphaState, scratch[pr - 40]);
1279 case 251:
1280 return offsetof(CPUAlphaState, alarm_expire);
1282 return 0;
1285 static ExitStatus gen_mfpr(TCGv va, int regno)
1287 int data = cpu_pr_data(regno);
1289 /* Special help for VMTIME and WALLTIME. */
1290 if (regno == 250 || regno == 249) {
1291 void (*helper)(TCGv) = gen_helper_get_walltime;
1292 if (regno == 249) {
1293 helper = gen_helper_get_vmtime;
1295 if (use_icount) {
1296 gen_io_start();
1297 helper(va);
1298 gen_io_end();
1299 return EXIT_PC_STALE;
1300 } else {
1301 helper(va);
1302 return NO_EXIT;
1306 /* The basic registers are data only, and unknown registers
1307 are read-zero, write-ignore. */
1308 if (data == 0) {
1309 tcg_gen_movi_i64(va, 0);
1310 } else if (data & PR_BYTE) {
1311 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1312 } else if (data & PR_LONG) {
1313 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1314 } else {
1315 tcg_gen_ld_i64(va, cpu_env, data);
1317 return NO_EXIT;
1320 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1322 TCGv tmp;
1323 int data;
1325 switch (regno) {
1326 case 255:
1327 /* TBIA */
1328 gen_helper_tbia(cpu_env);
1329 break;
1331 case 254:
1332 /* TBIS */
1333 gen_helper_tbis(cpu_env, vb);
1334 break;
1336 case 253:
1337 /* WAIT */
1338 tmp = tcg_const_i64(1);
1339 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1340 offsetof(CPUState, halted));
1341 return gen_excp(ctx, EXCP_HLT, 0);
1343 case 252:
1344 /* HALT */
1345 gen_helper_halt(vb);
1346 return EXIT_PC_STALE;
1348 case 251:
1349 /* ALARM */
1350 gen_helper_set_alarm(cpu_env, vb);
1351 break;
1353 case 7:
1354 /* PALBR */
1355 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1356 /* Changing the PAL base register implies un-chaining all of the TBs
1357 that ended with a CALL_PAL. Since the base register usually only
1358 changes during boot, flushing everything works well. */
1359 gen_helper_tb_flush(cpu_env);
1360 return EXIT_PC_STALE;
1362 default:
1363 /* The basic registers are data only, and unknown registers
1364 are read-zero, write-ignore. */
1365 data = cpu_pr_data(regno);
1366 if (data != 0) {
1367 if (data & PR_BYTE) {
1368 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1369 } else if (data & PR_LONG) {
1370 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1371 } else {
1372 tcg_gen_st_i64(vb, cpu_env, data);
1375 break;
1378 return NO_EXIT;
1380 #endif /* !USER_ONLY*/
1382 #define REQUIRE_TB_FLAG(FLAG) \
1383 do { \
1384 if ((ctx->tb->flags & (FLAG)) == 0) { \
1385 goto invalid_opc; \
1387 } while (0)
1389 #define REQUIRE_REG_31(WHICH) \
1390 do { \
1391 if (WHICH != 31) { \
1392 goto invalid_opc; \
1394 } while (0)
1396 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1398 int32_t disp21, disp16, disp12 __attribute__((unused));
1399 uint16_t fn11;
1400 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1401 bool islit;
1402 TCGv va, vb, vc, tmp;
1403 TCGv_i32 t32;
1404 ExitStatus ret;
1406 /* Decode all instruction fields */
1407 opc = extract32(insn, 26, 6);
1408 ra = extract32(insn, 21, 5);
1409 rb = extract32(insn, 16, 5);
1410 rc = extract32(insn, 0, 5);
1411 islit = extract32(insn, 12, 1);
1412 lit = extract32(insn, 13, 8);
1414 disp21 = sextract32(insn, 0, 21);
1415 disp16 = sextract32(insn, 0, 16);
1416 disp12 = sextract32(insn, 0, 12);
1418 fn11 = extract32(insn, 5, 11);
1419 fpfn = extract32(insn, 5, 6);
1420 fn7 = extract32(insn, 5, 7);
1422 if (rb == 31 && !islit) {
1423 islit = true;
1424 lit = 0;
1427 ret = NO_EXIT;
1428 switch (opc) {
1429 case 0x00:
1430 /* CALL_PAL */
1431 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1432 break;
1433 case 0x01:
1434 /* OPC01 */
1435 goto invalid_opc;
1436 case 0x02:
1437 /* OPC02 */
1438 goto invalid_opc;
1439 case 0x03:
1440 /* OPC03 */
1441 goto invalid_opc;
1442 case 0x04:
1443 /* OPC04 */
1444 goto invalid_opc;
1445 case 0x05:
1446 /* OPC05 */
1447 goto invalid_opc;
1448 case 0x06:
1449 /* OPC06 */
1450 goto invalid_opc;
1451 case 0x07:
1452 /* OPC07 */
1453 goto invalid_opc;
1455 case 0x09:
1456 /* LDAH */
1457 disp16 = (uint32_t)disp16 << 16;
1458 /* fall through */
1459 case 0x08:
1460 /* LDA */
1461 va = dest_gpr(ctx, ra);
1462 /* It's worth special-casing immediate loads. */
1463 if (rb == 31) {
1464 tcg_gen_movi_i64(va, disp16);
1465 } else {
1466 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1468 break;
1470 case 0x0A:
1471 /* LDBU */
1472 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1473 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1474 break;
1475 case 0x0B:
1476 /* LDQ_U */
1477 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1478 break;
1479 case 0x0C:
1480 /* LDWU */
1481 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1482 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1483 break;
1484 case 0x0D:
1485 /* STW */
1486 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1487 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1488 break;
1489 case 0x0E:
1490 /* STB */
1491 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1492 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1493 break;
1494 case 0x0F:
1495 /* STQ_U */
1496 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1497 break;
1499 case 0x10:
1500 vc = dest_gpr(ctx, rc);
1501 vb = load_gpr_lit(ctx, rb, lit, islit);
1503 if (ra == 31) {
1504 if (fn7 == 0x00) {
1505 /* Special case ADDL as SEXTL. */
1506 tcg_gen_ext32s_i64(vc, vb);
1507 break;
1509 if (fn7 == 0x29) {
1510 /* Special case SUBQ as NEGQ. */
1511 tcg_gen_neg_i64(vc, vb);
1512 break;
1516 va = load_gpr(ctx, ra);
1517 switch (fn7) {
1518 case 0x00:
1519 /* ADDL */
1520 tcg_gen_add_i64(vc, va, vb);
1521 tcg_gen_ext32s_i64(vc, vc);
1522 break;
1523 case 0x02:
1524 /* S4ADDL */
1525 tmp = tcg_temp_new();
1526 tcg_gen_shli_i64(tmp, va, 2);
1527 tcg_gen_add_i64(tmp, tmp, vb);
1528 tcg_gen_ext32s_i64(vc, tmp);
1529 tcg_temp_free(tmp);
1530 break;
1531 case 0x09:
1532 /* SUBL */
1533 tcg_gen_sub_i64(vc, va, vb);
1534 tcg_gen_ext32s_i64(vc, vc);
1535 break;
1536 case 0x0B:
1537 /* S4SUBL */
1538 tmp = tcg_temp_new();
1539 tcg_gen_shli_i64(tmp, va, 2);
1540 tcg_gen_sub_i64(tmp, tmp, vb);
1541 tcg_gen_ext32s_i64(vc, tmp);
1542 tcg_temp_free(tmp);
1543 break;
1544 case 0x0F:
1545 /* CMPBGE */
1546 gen_helper_cmpbge(vc, va, vb);
1547 break;
1548 case 0x12:
1549 /* S8ADDL */
1550 tmp = tcg_temp_new();
1551 tcg_gen_shli_i64(tmp, va, 3);
1552 tcg_gen_add_i64(tmp, tmp, vb);
1553 tcg_gen_ext32s_i64(vc, tmp);
1554 tcg_temp_free(tmp);
1555 break;
1556 case 0x1B:
1557 /* S8SUBL */
1558 tmp = tcg_temp_new();
1559 tcg_gen_shli_i64(tmp, va, 3);
1560 tcg_gen_sub_i64(tmp, tmp, vb);
1561 tcg_gen_ext32s_i64(vc, tmp);
1562 tcg_temp_free(tmp);
1563 break;
1564 case 0x1D:
1565 /* CMPULT */
1566 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1567 break;
1568 case 0x20:
1569 /* ADDQ */
1570 tcg_gen_add_i64(vc, va, vb);
1571 break;
1572 case 0x22:
1573 /* S4ADDQ */
1574 tmp = tcg_temp_new();
1575 tcg_gen_shli_i64(tmp, va, 2);
1576 tcg_gen_add_i64(vc, tmp, vb);
1577 tcg_temp_free(tmp);
1578 break;
1579 case 0x29:
1580 /* SUBQ */
1581 tcg_gen_sub_i64(vc, va, vb);
1582 break;
1583 case 0x2B:
1584 /* S4SUBQ */
1585 tmp = tcg_temp_new();
1586 tcg_gen_shli_i64(tmp, va, 2);
1587 tcg_gen_sub_i64(vc, tmp, vb);
1588 tcg_temp_free(tmp);
1589 break;
1590 case 0x2D:
1591 /* CMPEQ */
1592 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1593 break;
1594 case 0x32:
1595 /* S8ADDQ */
1596 tmp = tcg_temp_new();
1597 tcg_gen_shli_i64(tmp, va, 3);
1598 tcg_gen_add_i64(vc, tmp, vb);
1599 tcg_temp_free(tmp);
1600 break;
1601 case 0x3B:
1602 /* S8SUBQ */
1603 tmp = tcg_temp_new();
1604 tcg_gen_shli_i64(tmp, va, 3);
1605 tcg_gen_sub_i64(vc, tmp, vb);
1606 tcg_temp_free(tmp);
1607 break;
1608 case 0x3D:
1609 /* CMPULE */
1610 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1611 break;
1612 case 0x40:
1613 /* ADDL/V */
1614 gen_helper_addlv(vc, cpu_env, va, vb);
1615 break;
1616 case 0x49:
1617 /* SUBL/V */
1618 gen_helper_sublv(vc, cpu_env, va, vb);
1619 break;
1620 case 0x4D:
1621 /* CMPLT */
1622 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1623 break;
1624 case 0x60:
1625 /* ADDQ/V */
1626 gen_helper_addqv(vc, cpu_env, va, vb);
1627 break;
1628 case 0x69:
1629 /* SUBQ/V */
1630 gen_helper_subqv(vc, cpu_env, va, vb);
1631 break;
1632 case 0x6D:
1633 /* CMPLE */
1634 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1635 break;
1636 default:
1637 goto invalid_opc;
1639 break;
1641 case 0x11:
1642 if (fn7 == 0x20) {
1643 if (rc == 31) {
1644 /* Special case BIS as NOP. */
1645 break;
1647 if (ra == 31) {
1648 /* Special case BIS as MOV. */
1649 vc = dest_gpr(ctx, rc);
1650 if (islit) {
1651 tcg_gen_movi_i64(vc, lit);
1652 } else {
1653 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1655 break;
1659 vc = dest_gpr(ctx, rc);
1660 vb = load_gpr_lit(ctx, rb, lit, islit);
1662 if (fn7 == 0x28 && ra == 31) {
1663 /* Special case ORNOT as NOT. */
1664 tcg_gen_not_i64(vc, vb);
1665 break;
1668 va = load_gpr(ctx, ra);
1669 switch (fn7) {
1670 case 0x00:
1671 /* AND */
1672 tcg_gen_and_i64(vc, va, vb);
1673 break;
1674 case 0x08:
1675 /* BIC */
1676 tcg_gen_andc_i64(vc, va, vb);
1677 break;
1678 case 0x14:
1679 /* CMOVLBS */
1680 tmp = tcg_temp_new();
1681 tcg_gen_andi_i64(tmp, va, 1);
1682 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1683 vb, load_gpr(ctx, rc));
1684 tcg_temp_free(tmp);
1685 break;
1686 case 0x16:
1687 /* CMOVLBC */
1688 tmp = tcg_temp_new();
1689 tcg_gen_andi_i64(tmp, va, 1);
1690 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1691 vb, load_gpr(ctx, rc));
1692 tcg_temp_free(tmp);
1693 break;
1694 case 0x20:
1695 /* BIS */
1696 tcg_gen_or_i64(vc, va, vb);
1697 break;
1698 case 0x24:
1699 /* CMOVEQ */
1700 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1701 vb, load_gpr(ctx, rc));
1702 break;
1703 case 0x26:
1704 /* CMOVNE */
1705 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1706 vb, load_gpr(ctx, rc));
1707 break;
1708 case 0x28:
1709 /* ORNOT */
1710 tcg_gen_orc_i64(vc, va, vb);
1711 break;
1712 case 0x40:
1713 /* XOR */
1714 tcg_gen_xor_i64(vc, va, vb);
1715 break;
1716 case 0x44:
1717 /* CMOVLT */
1718 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1719 vb, load_gpr(ctx, rc));
1720 break;
1721 case 0x46:
1722 /* CMOVGE */
1723 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1724 vb, load_gpr(ctx, rc));
1725 break;
1726 case 0x48:
1727 /* EQV */
1728 tcg_gen_eqv_i64(vc, va, vb);
1729 break;
1730 case 0x61:
1731 /* AMASK */
1732 REQUIRE_REG_31(ra);
1734 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1735 tcg_gen_andi_i64(vc, vb, ~amask);
1737 break;
1738 case 0x64:
1739 /* CMOVLE */
1740 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1741 vb, load_gpr(ctx, rc));
1742 break;
1743 case 0x66:
1744 /* CMOVGT */
1745 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1746 vb, load_gpr(ctx, rc));
1747 break;
1748 case 0x6C:
1749 /* IMPLVER */
1750 REQUIRE_REG_31(ra);
1751 tcg_gen_movi_i64(vc, ctx->implver);
1752 break;
1753 default:
1754 goto invalid_opc;
1756 break;
1758 case 0x12:
1759 vc = dest_gpr(ctx, rc);
1760 va = load_gpr(ctx, ra);
1761 switch (fn7) {
1762 case 0x02:
1763 /* MSKBL */
1764 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1765 break;
1766 case 0x06:
1767 /* EXTBL */
1768 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1769 break;
1770 case 0x0B:
1771 /* INSBL */
1772 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1773 break;
1774 case 0x12:
1775 /* MSKWL */
1776 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1777 break;
1778 case 0x16:
1779 /* EXTWL */
1780 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1781 break;
1782 case 0x1B:
1783 /* INSWL */
1784 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1785 break;
1786 case 0x22:
1787 /* MSKLL */
1788 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1789 break;
1790 case 0x26:
1791 /* EXTLL */
1792 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1793 break;
1794 case 0x2B:
1795 /* INSLL */
1796 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1797 break;
1798 case 0x30:
1799 /* ZAP */
1800 if (islit) {
1801 gen_zapnoti(vc, va, ~lit);
1802 } else {
1803 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1805 break;
1806 case 0x31:
1807 /* ZAPNOT */
1808 if (islit) {
1809 gen_zapnoti(vc, va, lit);
1810 } else {
1811 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1813 break;
1814 case 0x32:
1815 /* MSKQL */
1816 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1817 break;
1818 case 0x34:
1819 /* SRL */
1820 if (islit) {
1821 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1822 } else {
1823 tmp = tcg_temp_new();
1824 vb = load_gpr(ctx, rb);
1825 tcg_gen_andi_i64(tmp, vb, 0x3f);
1826 tcg_gen_shr_i64(vc, va, tmp);
1827 tcg_temp_free(tmp);
1829 break;
1830 case 0x36:
1831 /* EXTQL */
1832 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1833 break;
1834 case 0x39:
1835 /* SLL */
1836 if (islit) {
1837 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1838 } else {
1839 tmp = tcg_temp_new();
1840 vb = load_gpr(ctx, rb);
1841 tcg_gen_andi_i64(tmp, vb, 0x3f);
1842 tcg_gen_shl_i64(vc, va, tmp);
1843 tcg_temp_free(tmp);
1845 break;
1846 case 0x3B:
1847 /* INSQL */
1848 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1849 break;
1850 case 0x3C:
1851 /* SRA */
1852 if (islit) {
1853 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1854 } else {
1855 tmp = tcg_temp_new();
1856 vb = load_gpr(ctx, rb);
1857 tcg_gen_andi_i64(tmp, vb, 0x3f);
1858 tcg_gen_sar_i64(vc, va, tmp);
1859 tcg_temp_free(tmp);
1861 break;
1862 case 0x52:
1863 /* MSKWH */
1864 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1865 break;
1866 case 0x57:
1867 /* INSWH */
1868 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1869 break;
1870 case 0x5A:
1871 /* EXTWH */
1872 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1873 break;
1874 case 0x62:
1875 /* MSKLH */
1876 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1877 break;
1878 case 0x67:
1879 /* INSLH */
1880 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1881 break;
1882 case 0x6A:
1883 /* EXTLH */
1884 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1885 break;
1886 case 0x72:
1887 /* MSKQH */
1888 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1889 break;
1890 case 0x77:
1891 /* INSQH */
1892 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1893 break;
1894 case 0x7A:
1895 /* EXTQH */
1896 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1897 break;
1898 default:
1899 goto invalid_opc;
1901 break;
1903 case 0x13:
1904 vc = dest_gpr(ctx, rc);
1905 vb = load_gpr_lit(ctx, rb, lit, islit);
1906 va = load_gpr(ctx, ra);
1907 switch (fn7) {
1908 case 0x00:
1909 /* MULL */
1910 tcg_gen_mul_i64(vc, va, vb);
1911 tcg_gen_ext32s_i64(vc, vc);
1912 break;
1913 case 0x20:
1914 /* MULQ */
1915 tcg_gen_mul_i64(vc, va, vb);
1916 break;
1917 case 0x30:
1918 /* UMULH */
1919 tmp = tcg_temp_new();
1920 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1921 tcg_temp_free(tmp);
1922 break;
1923 case 0x40:
1924 /* MULL/V */
1925 gen_helper_mullv(vc, cpu_env, va, vb);
1926 break;
1927 case 0x60:
1928 /* MULQ/V */
1929 gen_helper_mulqv(vc, cpu_env, va, vb);
1930 break;
1931 default:
1932 goto invalid_opc;
1934 break;
1936 case 0x14:
1937 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
1938 vc = dest_fpr(ctx, rc);
1939 switch (fpfn) { /* fn11 & 0x3F */
1940 case 0x04:
1941 /* ITOFS */
1942 REQUIRE_REG_31(rb);
1943 t32 = tcg_temp_new_i32();
1944 va = load_gpr(ctx, ra);
1945 tcg_gen_trunc_i64_i32(t32, va);
1946 gen_helper_memory_to_s(vc, t32);
1947 tcg_temp_free_i32(t32);
1948 break;
1949 case 0x0A:
1950 /* SQRTF */
1951 REQUIRE_REG_31(ra);
1952 vb = load_fpr(ctx, rb);
1953 gen_helper_sqrtf(vc, cpu_env, vb);
1954 break;
1955 case 0x0B:
1956 /* SQRTS */
1957 REQUIRE_REG_31(ra);
1958 gen_fsqrts(ctx, rb, rc, fn11);
1959 break;
1960 case 0x14:
1961 /* ITOFF */
1962 REQUIRE_REG_31(rb);
1963 t32 = tcg_temp_new_i32();
1964 va = load_gpr(ctx, ra);
1965 tcg_gen_trunc_i64_i32(t32, va);
1966 gen_helper_memory_to_f(vc, t32);
1967 tcg_temp_free_i32(t32);
1968 break;
1969 case 0x24:
1970 /* ITOFT */
1971 REQUIRE_REG_31(rb);
1972 va = load_gpr(ctx, ra);
1973 tcg_gen_mov_i64(vc, va);
1974 break;
1975 case 0x2A:
1976 /* SQRTG */
1977 REQUIRE_REG_31(ra);
1978 vb = load_fpr(ctx, rb);
1979 gen_helper_sqrtg(vc, cpu_env, vb);
1980 break;
1981 case 0x02B:
1982 /* SQRTT */
1983 REQUIRE_REG_31(ra);
1984 gen_fsqrtt(ctx, rb, rc, fn11);
1985 break;
1986 default:
1987 goto invalid_opc;
1989 break;
1991 case 0x15:
1992 /* VAX floating point */
1993 /* XXX: rounding mode and trap are ignored (!) */
1994 vc = dest_fpr(ctx, rc);
1995 vb = load_fpr(ctx, rb);
1996 va = load_fpr(ctx, ra);
1997 switch (fpfn) { /* fn11 & 0x3F */
1998 case 0x00:
1999 /* ADDF */
2000 gen_helper_addf(vc, cpu_env, va, vb);
2001 break;
2002 case 0x01:
2003 /* SUBF */
2004 gen_helper_subf(vc, cpu_env, va, vb);
2005 break;
2006 case 0x02:
2007 /* MULF */
2008 gen_helper_mulf(vc, cpu_env, va, vb);
2009 break;
2010 case 0x03:
2011 /* DIVF */
2012 gen_helper_divf(vc, cpu_env, va, vb);
2013 break;
2014 case 0x1E:
2015 /* CVTDG -- TODO */
2016 REQUIRE_REG_31(ra);
2017 goto invalid_opc;
2018 case 0x20:
2019 /* ADDG */
2020 gen_helper_addg(vc, cpu_env, va, vb);
2021 break;
2022 case 0x21:
2023 /* SUBG */
2024 gen_helper_subg(vc, cpu_env, va, vb);
2025 break;
2026 case 0x22:
2027 /* MULG */
2028 gen_helper_mulg(vc, cpu_env, va, vb);
2029 break;
2030 case 0x23:
2031 /* DIVG */
2032 gen_helper_divg(vc, cpu_env, va, vb);
2033 break;
2034 case 0x25:
2035 /* CMPGEQ */
2036 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2037 break;
2038 case 0x26:
2039 /* CMPGLT */
2040 gen_helper_cmpglt(vc, cpu_env, va, vb);
2041 break;
2042 case 0x27:
2043 /* CMPGLE */
2044 gen_helper_cmpgle(vc, cpu_env, va, vb);
2045 break;
2046 case 0x2C:
2047 /* CVTGF */
2048 REQUIRE_REG_31(ra);
2049 gen_helper_cvtgf(vc, cpu_env, vb);
2050 break;
2051 case 0x2D:
2052 /* CVTGD -- TODO */
2053 REQUIRE_REG_31(ra);
2054 goto invalid_opc;
2055 case 0x2F:
2056 /* CVTGQ */
2057 REQUIRE_REG_31(ra);
2058 gen_helper_cvtgq(vc, cpu_env, vb);
2059 break;
2060 case 0x3C:
2061 /* CVTQF */
2062 REQUIRE_REG_31(ra);
2063 gen_helper_cvtqf(vc, cpu_env, vb);
2064 break;
2065 case 0x3E:
2066 /* CVTQG */
2067 REQUIRE_REG_31(ra);
2068 gen_helper_cvtqg(vc, cpu_env, vb);
2069 break;
2070 default:
2071 goto invalid_opc;
2073 break;
2075 case 0x16:
2076 /* IEEE floating-point */
2077 switch (fpfn) { /* fn11 & 0x3F */
2078 case 0x00:
2079 /* ADDS */
2080 gen_fadds(ctx, ra, rb, rc, fn11);
2081 break;
2082 case 0x01:
2083 /* SUBS */
2084 gen_fsubs(ctx, ra, rb, rc, fn11);
2085 break;
2086 case 0x02:
2087 /* MULS */
2088 gen_fmuls(ctx, ra, rb, rc, fn11);
2089 break;
2090 case 0x03:
2091 /* DIVS */
2092 gen_fdivs(ctx, ra, rb, rc, fn11);
2093 break;
2094 case 0x20:
2095 /* ADDT */
2096 gen_faddt(ctx, ra, rb, rc, fn11);
2097 break;
2098 case 0x21:
2099 /* SUBT */
2100 gen_fsubt(ctx, ra, rb, rc, fn11);
2101 break;
2102 case 0x22:
2103 /* MULT */
2104 gen_fmult(ctx, ra, rb, rc, fn11);
2105 break;
2106 case 0x23:
2107 /* DIVT */
2108 gen_fdivt(ctx, ra, rb, rc, fn11);
2109 break;
2110 case 0x24:
2111 /* CMPTUN */
2112 gen_fcmptun(ctx, ra, rb, rc, fn11);
2113 break;
2114 case 0x25:
2115 /* CMPTEQ */
2116 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2117 break;
2118 case 0x26:
2119 /* CMPTLT */
2120 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2121 break;
2122 case 0x27:
2123 /* CMPTLE */
2124 gen_fcmptle(ctx, ra, rb, rc, fn11);
2125 break;
2126 case 0x2C:
2127 REQUIRE_REG_31(ra);
2128 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2129 /* CVTST */
2130 gen_fcvtst(ctx, rb, rc, fn11);
2131 } else {
2132 /* CVTTS */
2133 gen_fcvtts(ctx, rb, rc, fn11);
2135 break;
2136 case 0x2F:
2137 /* CVTTQ */
2138 REQUIRE_REG_31(ra);
2139 gen_fcvttq(ctx, rb, rc, fn11);
2140 break;
2141 case 0x3C:
2142 /* CVTQS */
2143 REQUIRE_REG_31(ra);
2144 gen_fcvtqs(ctx, rb, rc, fn11);
2145 break;
2146 case 0x3E:
2147 /* CVTQT */
2148 REQUIRE_REG_31(ra);
2149 gen_fcvtqt(ctx, rb, rc, fn11);
2150 break;
2151 default:
2152 goto invalid_opc;
2154 break;
2156 case 0x17:
2157 switch (fn11) {
2158 case 0x010:
2159 /* CVTLQ */
2160 REQUIRE_REG_31(ra);
2161 vc = dest_fpr(ctx, rc);
2162 vb = load_fpr(ctx, rb);
2163 gen_fcvtlq(vc, vb);
2164 break;
2165 case 0x020:
2166 /* CPYS */
2167 if (rc == 31) {
2168 /* Special case CPYS as FNOP. */
2169 } else {
2170 vc = dest_fpr(ctx, rc);
2171 va = load_fpr(ctx, ra);
2172 if (ra == rb) {
2173 /* Special case CPYS as FMOV. */
2174 tcg_gen_mov_i64(vc, va);
2175 } else {
2176 vb = load_fpr(ctx, rb);
2177 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2180 break;
2181 case 0x021:
2182 /* CPYSN */
2183 vc = dest_fpr(ctx, rc);
2184 vb = load_fpr(ctx, rb);
2185 va = load_fpr(ctx, ra);
2186 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2187 break;
2188 case 0x022:
2189 /* CPYSE */
2190 vc = dest_fpr(ctx, rc);
2191 vb = load_fpr(ctx, rb);
2192 va = load_fpr(ctx, ra);
2193 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2194 break;
2195 case 0x024:
2196 /* MT_FPCR */
2197 va = load_fpr(ctx, ra);
2198 gen_helper_store_fpcr(cpu_env, va);
2199 break;
2200 case 0x025:
2201 /* MF_FPCR */
2202 va = dest_fpr(ctx, ra);
2203 gen_helper_load_fpcr(va, cpu_env);
2204 break;
2205 case 0x02A:
2206 /* FCMOVEQ */
2207 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2208 break;
2209 case 0x02B:
2210 /* FCMOVNE */
2211 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2212 break;
2213 case 0x02C:
2214 /* FCMOVLT */
2215 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2216 break;
2217 case 0x02D:
2218 /* FCMOVGE */
2219 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2220 break;
2221 case 0x02E:
2222 /* FCMOVLE */
2223 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2224 break;
2225 case 0x02F:
2226 /* FCMOVGT */
2227 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2228 break;
2229 case 0x030:
2230 /* CVTQL */
2231 REQUIRE_REG_31(ra);
2232 vc = dest_fpr(ctx, rc);
2233 vb = load_fpr(ctx, rb);
2234 gen_fcvtql(vc, vb);
2235 break;
2236 case 0x130:
2237 /* CVTQL/V */
2238 case 0x530:
2239 /* CVTQL/SV */
2240 REQUIRE_REG_31(ra);
2241 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2242 /v doesn't do. The only thing I can think is that /sv is a
2243 valid instruction merely for completeness in the ISA. */
2244 vc = dest_fpr(ctx, rc);
2245 vb = load_fpr(ctx, rb);
2246 gen_helper_fcvtql_v_input(cpu_env, vb);
2247 gen_fcvtql(vc, vb);
2248 break;
2249 default:
2250 goto invalid_opc;
2252 break;
2254 case 0x18:
2255 switch ((uint16_t)disp16) {
2256 case 0x0000:
2257 /* TRAPB */
2258 /* No-op. */
2259 break;
2260 case 0x0400:
2261 /* EXCB */
2262 /* No-op. */
2263 break;
2264 case 0x4000:
2265 /* MB */
2266 /* No-op */
2267 break;
2268 case 0x4400:
2269 /* WMB */
2270 /* No-op */
2271 break;
2272 case 0x8000:
2273 /* FETCH */
2274 /* No-op */
2275 break;
2276 case 0xA000:
2277 /* FETCH_M */
2278 /* No-op */
2279 break;
2280 case 0xC000:
2281 /* RPCC */
2282 va = dest_gpr(ctx, ra);
2283 if (use_icount) {
2284 gen_io_start();
2285 gen_helper_load_pcc(va, cpu_env);
2286 gen_io_end();
2287 ret = EXIT_PC_STALE;
2288 } else {
2289 gen_helper_load_pcc(va, cpu_env);
2291 break;
2292 case 0xE000:
2293 /* RC */
2294 gen_rx(ra, 0);
2295 break;
2296 case 0xE800:
2297 /* ECB */
2298 break;
2299 case 0xF000:
2300 /* RS */
2301 gen_rx(ra, 1);
2302 break;
2303 case 0xF800:
2304 /* WH64 */
2305 /* No-op */
2306 break;
2307 default:
2308 goto invalid_opc;
2310 break;
2312 case 0x19:
2313 /* HW_MFPR (PALcode) */
2314 #ifndef CONFIG_USER_ONLY
2315 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2316 va = dest_gpr(ctx, ra);
2317 ret = gen_mfpr(va, insn & 0xffff);
2318 break;
2319 #else
2320 goto invalid_opc;
2321 #endif
2323 case 0x1A:
2324 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2325 prediction stack action, which of course we don't implement. */
2326 vb = load_gpr(ctx, rb);
2327 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2328 if (ra != 31) {
2329 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2331 ret = EXIT_PC_UPDATED;
2332 break;
2334 case 0x1B:
2335 /* HW_LD (PALcode) */
2336 #ifndef CONFIG_USER_ONLY
2337 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2339 TCGv addr = tcg_temp_new();
2340 vb = load_gpr(ctx, rb);
2341 va = dest_gpr(ctx, ra);
2343 tcg_gen_addi_i64(addr, vb, disp12);
2344 switch ((insn >> 12) & 0xF) {
2345 case 0x0:
2346 /* Longword physical access (hw_ldl/p) */
2347 gen_helper_ldl_phys(va, cpu_env, addr);
2348 break;
2349 case 0x1:
2350 /* Quadword physical access (hw_ldq/p) */
2351 gen_helper_ldq_phys(va, cpu_env, addr);
2352 break;
2353 case 0x2:
2354 /* Longword physical access with lock (hw_ldl_l/p) */
2355 gen_helper_ldl_l_phys(va, cpu_env, addr);
2356 break;
2357 case 0x3:
2358 /* Quadword physical access with lock (hw_ldq_l/p) */
2359 gen_helper_ldq_l_phys(va, cpu_env, addr);
2360 break;
2361 case 0x4:
2362 /* Longword virtual PTE fetch (hw_ldl/v) */
2363 goto invalid_opc;
2364 case 0x5:
2365 /* Quadword virtual PTE fetch (hw_ldq/v) */
2366 goto invalid_opc;
2367 break;
2368 case 0x6:
2369 /* Incpu_ir[ra]id */
2370 goto invalid_opc;
2371 case 0x7:
2372 /* Incpu_ir[ra]id */
2373 goto invalid_opc;
2374 case 0x8:
2375 /* Longword virtual access (hw_ldl) */
2376 goto invalid_opc;
2377 case 0x9:
2378 /* Quadword virtual access (hw_ldq) */
2379 goto invalid_opc;
2380 case 0xA:
2381 /* Longword virtual access with protection check (hw_ldl/w) */
2382 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2383 break;
2384 case 0xB:
2385 /* Quadword virtual access with protection check (hw_ldq/w) */
2386 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2387 break;
2388 case 0xC:
2389 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2390 goto invalid_opc;
2391 case 0xD:
2392 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2393 goto invalid_opc;
2394 case 0xE:
2395 /* Longword virtual access with alternate access mode and
2396 protection checks (hw_ldl/wa) */
2397 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2398 break;
2399 case 0xF:
2400 /* Quadword virtual access with alternate access mode and
2401 protection checks (hw_ldq/wa) */
2402 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2403 break;
2405 tcg_temp_free(addr);
2406 break;
2408 #else
2409 goto invalid_opc;
2410 #endif
2412 case 0x1C:
2413 vc = dest_gpr(ctx, rc);
2414 if (fn7 == 0x70) {
2415 /* FTOIT */
2416 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2417 REQUIRE_REG_31(rb);
2418 va = load_fpr(ctx, ra);
2419 tcg_gen_mov_i64(vc, va);
2420 break;
2421 } else if (fn7 == 0x78) {
2422 /* FTOIS */
2423 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2424 REQUIRE_REG_31(rb);
2425 t32 = tcg_temp_new_i32();
2426 va = load_fpr(ctx, ra);
2427 gen_helper_s_to_memory(t32, va);
2428 tcg_gen_ext_i32_i64(vc, t32);
2429 tcg_temp_free_i32(t32);
2430 break;
2433 vb = load_gpr_lit(ctx, rb, lit, islit);
2434 switch (fn7) {
2435 case 0x00:
2436 /* SEXTB */
2437 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2438 REQUIRE_REG_31(ra);
2439 tcg_gen_ext8s_i64(vc, vb);
2440 break;
2441 case 0x01:
2442 /* SEXTW */
2443 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2444 REQUIRE_REG_31(ra);
2445 tcg_gen_ext16s_i64(vc, vb);
2446 break;
2447 case 0x30:
2448 /* CTPOP */
2449 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2450 REQUIRE_REG_31(ra);
2451 gen_helper_ctpop(vc, vb);
2452 break;
2453 case 0x31:
2454 /* PERR */
2455 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2456 va = load_gpr(ctx, ra);
2457 gen_helper_perr(vc, va, vb);
2458 break;
2459 case 0x32:
2460 /* CTLZ */
2461 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2462 REQUIRE_REG_31(ra);
2463 gen_helper_ctlz(vc, vb);
2464 break;
2465 case 0x33:
2466 /* CTTZ */
2467 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2468 REQUIRE_REG_31(ra);
2469 gen_helper_cttz(vc, vb);
2470 break;
2471 case 0x34:
2472 /* UNPKBW */
2473 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2474 REQUIRE_REG_31(ra);
2475 gen_helper_unpkbw(vc, vb);
2476 break;
2477 case 0x35:
2478 /* UNPKBL */
2479 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2480 REQUIRE_REG_31(ra);
2481 gen_helper_unpkbl(vc, vb);
2482 break;
2483 case 0x36:
2484 /* PKWB */
2485 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2486 REQUIRE_REG_31(ra);
2487 gen_helper_pkwb(vc, vb);
2488 break;
2489 case 0x37:
2490 /* PKLB */
2491 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2492 REQUIRE_REG_31(ra);
2493 gen_helper_pklb(vc, vb);
2494 break;
2495 case 0x38:
2496 /* MINSB8 */
2497 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2498 va = load_gpr(ctx, ra);
2499 gen_helper_minsb8(vc, va, vb);
2500 break;
2501 case 0x39:
2502 /* MINSW4 */
2503 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2504 va = load_gpr(ctx, ra);
2505 gen_helper_minsw4(vc, va, vb);
2506 break;
2507 case 0x3A:
2508 /* MINUB8 */
2509 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2510 va = load_gpr(ctx, ra);
2511 gen_helper_minub8(vc, va, vb);
2512 break;
2513 case 0x3B:
2514 /* MINUW4 */
2515 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2516 va = load_gpr(ctx, ra);
2517 gen_helper_minuw4(vc, va, vb);
2518 break;
2519 case 0x3C:
2520 /* MAXUB8 */
2521 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2522 va = load_gpr(ctx, ra);
2523 gen_helper_maxub8(vc, va, vb);
2524 break;
2525 case 0x3D:
2526 /* MAXUW4 */
2527 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2528 va = load_gpr(ctx, ra);
2529 gen_helper_maxuw4(vc, va, vb);
2530 break;
2531 case 0x3E:
2532 /* MAXSB8 */
2533 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2534 va = load_gpr(ctx, ra);
2535 gen_helper_maxsb8(vc, va, vb);
2536 break;
2537 case 0x3F:
2538 /* MAXSW4 */
2539 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2540 va = load_gpr(ctx, ra);
2541 gen_helper_maxsw4(vc, va, vb);
2542 break;
2543 default:
2544 goto invalid_opc;
2546 break;
2548 case 0x1D:
2549 /* HW_MTPR (PALcode) */
2550 #ifndef CONFIG_USER_ONLY
2551 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2552 vb = load_gpr(ctx, rb);
2553 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2554 break;
2555 #else
2556 goto invalid_opc;
2557 #endif
2559 case 0x1E:
2560 /* HW_RET (PALcode) */
2561 #ifndef CONFIG_USER_ONLY
2562 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2563 if (rb == 31) {
2564 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2565 address from EXC_ADDR. This turns out to be useful for our
2566 emulation PALcode, so continue to accept it. */
2567 tmp = tcg_temp_new();
2568 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
2569 gen_helper_hw_ret(cpu_env, tmp);
2570 tcg_temp_free(tmp);
2571 } else {
2572 gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
2574 ret = EXIT_PC_UPDATED;
2575 break;
2576 #else
2577 goto invalid_opc;
2578 #endif
2580 case 0x1F:
2581 /* HW_ST (PALcode) */
2582 #ifndef CONFIG_USER_ONLY
2583 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2585 TCGv addr = tcg_temp_new();
2586 va = load_gpr(ctx, ra);
2587 vb = load_gpr(ctx, rb);
2589 tcg_gen_addi_i64(addr, vb, disp12);
2590 switch ((insn >> 12) & 0xF) {
2591 case 0x0:
2592 /* Longword physical access */
2593 gen_helper_stl_phys(cpu_env, addr, va);
2594 break;
2595 case 0x1:
2596 /* Quadword physical access */
2597 gen_helper_stq_phys(cpu_env, addr, va);
2598 break;
2599 case 0x2:
2600 /* Longword physical access with lock */
2601 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2602 break;
2603 case 0x3:
2604 /* Quadword physical access with lock */
2605 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2606 break;
2607 case 0x4:
2608 /* Longword virtual access */
2609 goto invalid_opc;
2610 case 0x5:
2611 /* Quadword virtual access */
2612 goto invalid_opc;
2613 case 0x6:
2614 /* Invalid */
2615 goto invalid_opc;
2616 case 0x7:
2617 /* Invalid */
2618 goto invalid_opc;
2619 case 0x8:
2620 /* Invalid */
2621 goto invalid_opc;
2622 case 0x9:
2623 /* Invalid */
2624 goto invalid_opc;
2625 case 0xA:
2626 /* Invalid */
2627 goto invalid_opc;
2628 case 0xB:
2629 /* Invalid */
2630 goto invalid_opc;
2631 case 0xC:
2632 /* Longword virtual access with alternate access mode */
2633 goto invalid_opc;
2634 case 0xD:
2635 /* Quadword virtual access with alternate access mode */
2636 goto invalid_opc;
2637 case 0xE:
2638 /* Invalid */
2639 goto invalid_opc;
2640 case 0xF:
2641 /* Invalid */
2642 goto invalid_opc;
2644 tcg_temp_free(addr);
2645 break;
2647 #else
2648 goto invalid_opc;
2649 #endif
2650 case 0x20:
2651 /* LDF */
2652 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2653 break;
2654 case 0x21:
2655 /* LDG */
2656 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2657 break;
2658 case 0x22:
2659 /* LDS */
2660 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2661 break;
2662 case 0x23:
2663 /* LDT */
2664 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2665 break;
2666 case 0x24:
2667 /* STF */
2668 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2669 break;
2670 case 0x25:
2671 /* STG */
2672 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2673 break;
2674 case 0x26:
2675 /* STS */
2676 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2677 break;
2678 case 0x27:
2679 /* STT */
2680 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2681 break;
2682 case 0x28:
2683 /* LDL */
2684 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2685 break;
2686 case 0x29:
2687 /* LDQ */
2688 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2689 break;
2690 case 0x2A:
2691 /* LDL_L */
2692 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2693 break;
2694 case 0x2B:
2695 /* LDQ_L */
2696 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2697 break;
2698 case 0x2C:
2699 /* STL */
2700 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2701 break;
2702 case 0x2D:
2703 /* STQ */
2704 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2705 break;
2706 case 0x2E:
2707 /* STL_C */
2708 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
2709 break;
2710 case 0x2F:
2711 /* STQ_C */
2712 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
2713 break;
2714 case 0x30:
2715 /* BR */
2716 ret = gen_bdirect(ctx, ra, disp21);
2717 break;
2718 case 0x31: /* FBEQ */
2719 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2720 break;
2721 case 0x32: /* FBLT */
2722 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2723 break;
2724 case 0x33: /* FBLE */
2725 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2726 break;
2727 case 0x34:
2728 /* BSR */
2729 ret = gen_bdirect(ctx, ra, disp21);
2730 break;
2731 case 0x35: /* FBNE */
2732 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2733 break;
2734 case 0x36: /* FBGE */
2735 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2736 break;
2737 case 0x37: /* FBGT */
2738 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2739 break;
2740 case 0x38:
2741 /* BLBC */
2742 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2743 break;
2744 case 0x39:
2745 /* BEQ */
2746 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2747 break;
2748 case 0x3A:
2749 /* BLT */
2750 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2751 break;
2752 case 0x3B:
2753 /* BLE */
2754 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2755 break;
2756 case 0x3C:
2757 /* BLBS */
2758 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2759 break;
2760 case 0x3D:
2761 /* BNE */
2762 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2763 break;
2764 case 0x3E:
2765 /* BGE */
2766 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2767 break;
2768 case 0x3F:
2769 /* BGT */
2770 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2771 break;
2772 invalid_opc:
2773 ret = gen_invalid(ctx);
2774 break;
2777 return ret;
2780 static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
2781 TranslationBlock *tb,
2782 bool search_pc)
2784 CPUState *cs = CPU(cpu);
2785 CPUAlphaState *env = &cpu->env;
2786 DisasContext ctx, *ctxp = &ctx;
2787 target_ulong pc_start;
2788 target_ulong pc_mask;
2789 uint32_t insn;
2790 uint16_t *gen_opc_end;
2791 CPUBreakpoint *bp;
2792 int j, lj = -1;
2793 ExitStatus ret;
2794 int num_insns;
2795 int max_insns;
2797 pc_start = tb->pc;
2798 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2800 ctx.tb = tb;
2801 ctx.pc = pc_start;
2802 ctx.mem_idx = cpu_mmu_index(env);
2803 ctx.implver = env->implver;
2804 ctx.singlestep_enabled = cs->singlestep_enabled;
2806 /* ??? Every TB begins with unset rounding mode, to be initialized on
2807 the first fp insn of the TB. Alternately we could define a proper
2808 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2809 to reset the FP_STATUS to that default at the end of any TB that
2810 changes the default. We could even (gasp) dynamiclly figure out
2811 what default would be most efficient given the running program. */
2812 ctx.tb_rm = -1;
2813 /* Similarly for flush-to-zero. */
2814 ctx.tb_ftz = -1;
2816 num_insns = 0;
2817 max_insns = tb->cflags & CF_COUNT_MASK;
2818 if (max_insns == 0) {
2819 max_insns = CF_COUNT_MASK;
2822 if (in_superpage(&ctx, pc_start)) {
2823 pc_mask = (1ULL << 41) - 1;
2824 } else {
2825 pc_mask = ~TARGET_PAGE_MASK;
2828 gen_tb_start();
2829 do {
2830 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
2831 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
2832 if (bp->pc == ctx.pc) {
2833 gen_excp(&ctx, EXCP_DEBUG, 0);
2834 break;
2838 if (search_pc) {
2839 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2840 if (lj < j) {
2841 lj++;
2842 while (lj < j)
2843 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2845 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
2846 tcg_ctx.gen_opc_instr_start[lj] = 1;
2847 tcg_ctx.gen_opc_icount[lj] = num_insns;
2849 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2850 gen_io_start();
2852 insn = cpu_ldl_code(env, ctx.pc);
2853 num_insns++;
2855 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2856 tcg_gen_debug_insn_start(ctx.pc);
2859 TCGV_UNUSED_I64(ctx.zero);
2860 TCGV_UNUSED_I64(ctx.sink);
2861 TCGV_UNUSED_I64(ctx.lit);
2863 ctx.pc += 4;
2864 ret = translate_one(ctxp, insn);
2866 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2867 tcg_gen_discard_i64(ctx.sink);
2868 tcg_temp_free(ctx.sink);
2870 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2871 tcg_temp_free(ctx.zero);
2873 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2874 tcg_temp_free(ctx.lit);
2877 /* If we reach a page boundary, are single stepping,
2878 or exhaust instruction count, stop generation. */
2879 if (ret == NO_EXIT
2880 && ((ctx.pc & pc_mask) == 0
2881 || tcg_ctx.gen_opc_ptr >= gen_opc_end
2882 || num_insns >= max_insns
2883 || singlestep
2884 || ctx.singlestep_enabled)) {
2885 ret = EXIT_PC_STALE;
2887 } while (ret == NO_EXIT);
2889 if (tb->cflags & CF_LAST_IO) {
2890 gen_io_end();
2893 switch (ret) {
2894 case EXIT_GOTO_TB:
2895 case EXIT_NORETURN:
2896 break;
2897 case EXIT_PC_STALE:
2898 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2899 /* FALLTHRU */
2900 case EXIT_PC_UPDATED:
2901 if (ctx.singlestep_enabled) {
2902 gen_excp_1(EXCP_DEBUG, 0);
2903 } else {
2904 tcg_gen_exit_tb(0);
2906 break;
2907 default:
2908 abort();
2911 gen_tb_end(tb, num_insns);
2912 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2913 if (search_pc) {
2914 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2915 lj++;
2916 while (lj <= j)
2917 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2918 } else {
2919 tb->size = ctx.pc - pc_start;
2920 tb->icount = num_insns;
2923 #ifdef DEBUG_DISAS
2924 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2925 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2926 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
2927 qemu_log("\n");
2929 #endif
2932 void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
2934 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
2937 void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
2939 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
2942 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
2944 env->pc = tcg_ctx.gen_opc_pc[pc_pos];