Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging
[qemu/ar7.git] / target-alpha / translate.c
blob81d4ff827cfd9c85e9fb30228ef057ee1f5538b4
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
23 #include "tcg-op.h"
24 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "trace-tcg.h"
32 #undef ALPHA_DEBUG_DISAS
33 #define CONFIG_SOFTFLOAT_INLINE
35 #ifdef ALPHA_DEBUG_DISAS
36 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
37 #else
38 # define LOG_DISAS(...) do { } while (0)
39 #endif
41 typedef struct DisasContext DisasContext;
42 struct DisasContext {
43 struct TranslationBlock *tb;
44 uint64_t pc;
45 int mem_idx;
47 /* Current rounding mode for this TB. */
48 int tb_rm;
49 /* Current flush-to-zero setting for this TB. */
50 int tb_ftz;
52 /* implver value for this CPU. */
53 int implver;
55 /* Temporaries for $31 and $f31 as source and destination. */
56 TCGv zero;
57 TCGv sink;
58 /* Temporary for immediate constants. */
59 TCGv lit;
61 bool singlestep_enabled;
64 /* Return values from translate_one, indicating the state of the TB.
65 Note that zero indicates that we are not exiting the TB. */
67 typedef enum {
68 NO_EXIT,
70 /* We have emitted one or more goto_tb. No fixup required. */
71 EXIT_GOTO_TB,
73 /* We are not using a goto_tb (for whatever reason), but have updated
74 the PC (for whatever reason), so there's no need to do it again on
75 exiting the TB. */
76 EXIT_PC_UPDATED,
78 /* We are exiting the TB, but have neither emitted a goto_tb, nor
79 updated the PC for the next instruction to be executed. */
80 EXIT_PC_STALE,
82 /* We are ending the TB with a noreturn function call, e.g. longjmp.
83 No following code will be executed. */
84 EXIT_NORETURN,
85 } ExitStatus;
87 /* global register indexes */
88 static TCGv_ptr cpu_env;
89 static TCGv cpu_ir[31];
90 static TCGv cpu_fir[31];
91 static TCGv cpu_pc;
92 static TCGv cpu_lock_addr;
93 static TCGv cpu_lock_st_addr;
94 static TCGv cpu_lock_value;
96 #include "exec/gen-icount.h"
98 void alpha_translate_init(void)
100 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
102 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
103 static const GlobalVar vars[] = {
104 DEF_VAR(pc),
105 DEF_VAR(lock_addr),
106 DEF_VAR(lock_st_addr),
107 DEF_VAR(lock_value),
110 #undef DEF_VAR
112 /* Use the symbolic register names that match the disassembler. */
113 static const char greg_names[31][4] = {
114 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
115 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
116 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
117 "t10", "t11", "ra", "t12", "at", "gp", "sp"
119 static const char freg_names[31][4] = {
120 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
121 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
122 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
123 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
126 static bool done_init = 0;
127 int i;
129 if (done_init) {
130 return;
132 done_init = 1;
134 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
136 for (i = 0; i < 31; i++) {
137 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
138 offsetof(CPUAlphaState, ir[i]),
139 greg_names[i]);
142 for (i = 0; i < 31; i++) {
143 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
144 offsetof(CPUAlphaState, fir[i]),
145 freg_names[i]);
148 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
149 const GlobalVar *v = &vars[i];
150 *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
154 static TCGv load_zero(DisasContext *ctx)
156 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
157 ctx->zero = tcg_const_i64(0);
159 return ctx->zero;
162 static TCGv dest_sink(DisasContext *ctx)
164 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
165 ctx->sink = tcg_temp_new();
167 return ctx->sink;
170 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
172 if (likely(reg < 31)) {
173 return cpu_ir[reg];
174 } else {
175 return load_zero(ctx);
179 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
180 uint8_t lit, bool islit)
182 if (islit) {
183 ctx->lit = tcg_const_i64(lit);
184 return ctx->lit;
185 } else if (likely(reg < 31)) {
186 return cpu_ir[reg];
187 } else {
188 return load_zero(ctx);
192 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
194 if (likely(reg < 31)) {
195 return cpu_ir[reg];
196 } else {
197 return dest_sink(ctx);
201 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
203 if (likely(reg < 31)) {
204 return cpu_fir[reg];
205 } else {
206 return load_zero(ctx);
210 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
212 if (likely(reg < 31)) {
213 return cpu_fir[reg];
214 } else {
215 return dest_sink(ctx);
219 static void gen_excp_1(int exception, int error_code)
221 TCGv_i32 tmp1, tmp2;
223 tmp1 = tcg_const_i32(exception);
224 tmp2 = tcg_const_i32(error_code);
225 gen_helper_excp(cpu_env, tmp1, tmp2);
226 tcg_temp_free_i32(tmp2);
227 tcg_temp_free_i32(tmp1);
230 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
232 tcg_gen_movi_i64(cpu_pc, ctx->pc);
233 gen_excp_1(exception, error_code);
234 return EXIT_NORETURN;
237 static inline ExitStatus gen_invalid(DisasContext *ctx)
239 return gen_excp(ctx, EXCP_OPCDEC, 0);
242 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
244 TCGv_i32 tmp32 = tcg_temp_new_i32();
245 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
246 gen_helper_memory_to_f(t0, tmp32);
247 tcg_temp_free_i32(tmp32);
250 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
252 TCGv tmp = tcg_temp_new();
253 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
254 gen_helper_memory_to_g(t0, tmp);
255 tcg_temp_free(tmp);
258 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
260 TCGv_i32 tmp32 = tcg_temp_new_i32();
261 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
262 gen_helper_memory_to_s(t0, tmp32);
263 tcg_temp_free_i32(tmp32);
266 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
268 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
269 tcg_gen_mov_i64(cpu_lock_addr, t1);
270 tcg_gen_mov_i64(cpu_lock_value, t0);
273 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
275 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
276 tcg_gen_mov_i64(cpu_lock_addr, t1);
277 tcg_gen_mov_i64(cpu_lock_value, t0);
280 static inline void gen_load_mem(DisasContext *ctx,
281 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
282 int flags),
283 int ra, int rb, int32_t disp16, bool fp,
284 bool clear)
286 TCGv tmp, addr, va;
288 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
289 prefetches, which we can treat as nops. No worries about
290 missed exceptions here. */
291 if (unlikely(ra == 31)) {
292 return;
295 tmp = tcg_temp_new();
296 addr = load_gpr(ctx, rb);
298 if (disp16) {
299 tcg_gen_addi_i64(tmp, addr, disp16);
300 addr = tmp;
302 if (clear) {
303 tcg_gen_andi_i64(tmp, addr, ~0x7);
304 addr = tmp;
307 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
308 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
310 tcg_temp_free(tmp);
313 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
315 TCGv_i32 tmp32 = tcg_temp_new_i32();
316 gen_helper_f_to_memory(tmp32, t0);
317 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
318 tcg_temp_free_i32(tmp32);
321 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
323 TCGv tmp = tcg_temp_new();
324 gen_helper_g_to_memory(tmp, t0);
325 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
326 tcg_temp_free(tmp);
329 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
331 TCGv_i32 tmp32 = tcg_temp_new_i32();
332 gen_helper_s_to_memory(tmp32, t0);
333 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
334 tcg_temp_free_i32(tmp32);
337 static inline void gen_store_mem(DisasContext *ctx,
338 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
339 int flags),
340 int ra, int rb, int32_t disp16, bool fp,
341 bool clear)
343 TCGv tmp, addr, va;
345 tmp = tcg_temp_new();
346 addr = load_gpr(ctx, rb);
348 if (disp16) {
349 tcg_gen_addi_i64(tmp, addr, disp16);
350 addr = tmp;
352 if (clear) {
353 tcg_gen_andi_i64(tmp, addr, ~0x7);
354 addr = tmp;
357 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
358 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
360 tcg_temp_free(tmp);
363 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
364 int32_t disp16, int quad)
366 TCGv addr;
368 if (ra == 31) {
369 /* ??? Don't bother storing anything. The user can't tell
370 the difference, since the zero register always reads zero. */
371 return NO_EXIT;
374 #if defined(CONFIG_USER_ONLY)
375 addr = cpu_lock_st_addr;
376 #else
377 addr = tcg_temp_local_new();
378 #endif
380 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
382 #if defined(CONFIG_USER_ONLY)
383 /* ??? This is handled via a complicated version of compare-and-swap
384 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
385 in TCG so that this isn't necessary. */
386 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
387 #else
388 /* ??? In system mode we are never multi-threaded, so CAS can be
389 implemented via a non-atomic load-compare-store sequence. */
391 TCGLabel *lab_fail, *lab_done;
392 TCGv val;
394 lab_fail = gen_new_label();
395 lab_done = gen_new_label();
396 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
398 val = tcg_temp_new();
399 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
400 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
402 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
403 quad ? MO_LEQ : MO_LEUL);
404 tcg_gen_movi_i64(cpu_ir[ra], 1);
405 tcg_gen_br(lab_done);
407 gen_set_label(lab_fail);
408 tcg_gen_movi_i64(cpu_ir[ra], 0);
410 gen_set_label(lab_done);
411 tcg_gen_movi_i64(cpu_lock_addr, -1);
413 tcg_temp_free(addr);
414 return NO_EXIT;
416 #endif
419 static bool in_superpage(DisasContext *ctx, int64_t addr)
421 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
422 && addr < 0
423 && ((addr >> 41) & 3) == 2
424 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
427 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
429 /* Suppress goto_tb in the case of single-steping and IO. */
430 if ((ctx->tb->cflags & CF_LAST_IO)
431 || ctx->singlestep_enabled || singlestep) {
432 return false;
434 /* If the destination is in the superpage, the page perms can't change. */
435 if (in_superpage(ctx, dest)) {
436 return true;
438 /* Check for the dest on the same page as the start of the TB. */
439 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
442 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
444 uint64_t dest = ctx->pc + (disp << 2);
446 if (ra != 31) {
447 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
450 /* Notice branch-to-next; used to initialize RA with the PC. */
451 if (disp == 0) {
452 return 0;
453 } else if (use_goto_tb(ctx, dest)) {
454 tcg_gen_goto_tb(0);
455 tcg_gen_movi_i64(cpu_pc, dest);
456 tcg_gen_exit_tb((uintptr_t)ctx->tb);
457 return EXIT_GOTO_TB;
458 } else {
459 tcg_gen_movi_i64(cpu_pc, dest);
460 return EXIT_PC_UPDATED;
464 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
465 TCGv cmp, int32_t disp)
467 uint64_t dest = ctx->pc + (disp << 2);
468 TCGLabel *lab_true = gen_new_label();
470 if (use_goto_tb(ctx, dest)) {
471 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
473 tcg_gen_goto_tb(0);
474 tcg_gen_movi_i64(cpu_pc, ctx->pc);
475 tcg_gen_exit_tb((uintptr_t)ctx->tb);
477 gen_set_label(lab_true);
478 tcg_gen_goto_tb(1);
479 tcg_gen_movi_i64(cpu_pc, dest);
480 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
482 return EXIT_GOTO_TB;
483 } else {
484 TCGv_i64 z = tcg_const_i64(0);
485 TCGv_i64 d = tcg_const_i64(dest);
486 TCGv_i64 p = tcg_const_i64(ctx->pc);
488 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
490 tcg_temp_free_i64(z);
491 tcg_temp_free_i64(d);
492 tcg_temp_free_i64(p);
493 return EXIT_PC_UPDATED;
497 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
498 int32_t disp, int mask)
500 TCGv cmp_tmp;
502 if (mask) {
503 cmp_tmp = tcg_temp_new();
504 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
505 } else {
506 cmp_tmp = load_gpr(ctx, ra);
509 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
512 /* Fold -0.0 for comparison with COND. */
514 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
516 uint64_t mzero = 1ull << 63;
518 switch (cond) {
519 case TCG_COND_LE:
520 case TCG_COND_GT:
521 /* For <= or >, the -0.0 value directly compares the way we want. */
522 tcg_gen_mov_i64(dest, src);
523 break;
525 case TCG_COND_EQ:
526 case TCG_COND_NE:
527 /* For == or !=, we can simply mask off the sign bit and compare. */
528 tcg_gen_andi_i64(dest, src, mzero - 1);
529 break;
531 case TCG_COND_GE:
532 case TCG_COND_LT:
533 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
534 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
535 tcg_gen_neg_i64(dest, dest);
536 tcg_gen_and_i64(dest, dest, src);
537 break;
539 default:
540 abort();
544 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
545 int32_t disp)
547 TCGv cmp_tmp = tcg_temp_new();
548 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
549 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
552 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
554 TCGv_i64 va, vb, z;
556 z = load_zero(ctx);
557 vb = load_fpr(ctx, rb);
558 va = tcg_temp_new();
559 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
561 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
563 tcg_temp_free(va);
566 #define QUAL_RM_N 0x080 /* Round mode nearest even */
567 #define QUAL_RM_C 0x000 /* Round mode chopped */
568 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
569 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
570 #define QUAL_RM_MASK 0x0c0
572 #define QUAL_U 0x100 /* Underflow enable (fp output) */
573 #define QUAL_V 0x100 /* Overflow enable (int output) */
574 #define QUAL_S 0x400 /* Software completion enable */
575 #define QUAL_I 0x200 /* Inexact detection enable */
577 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
579 TCGv_i32 tmp;
581 fn11 &= QUAL_RM_MASK;
582 if (fn11 == ctx->tb_rm) {
583 return;
585 ctx->tb_rm = fn11;
587 tmp = tcg_temp_new_i32();
588 switch (fn11) {
589 case QUAL_RM_N:
590 tcg_gen_movi_i32(tmp, float_round_nearest_even);
591 break;
592 case QUAL_RM_C:
593 tcg_gen_movi_i32(tmp, float_round_to_zero);
594 break;
595 case QUAL_RM_M:
596 tcg_gen_movi_i32(tmp, float_round_down);
597 break;
598 case QUAL_RM_D:
599 tcg_gen_ld8u_i32(tmp, cpu_env,
600 offsetof(CPUAlphaState, fpcr_dyn_round));
601 break;
604 #if defined(CONFIG_SOFTFLOAT_INLINE)
605 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
606 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
607 sets the one field. */
608 tcg_gen_st8_i32(tmp, cpu_env,
609 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
610 #else
611 gen_helper_setroundmode(tmp);
612 #endif
614 tcg_temp_free_i32(tmp);
617 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
619 TCGv_i32 tmp;
621 fn11 &= QUAL_U;
622 if (fn11 == ctx->tb_ftz) {
623 return;
625 ctx->tb_ftz = fn11;
627 tmp = tcg_temp_new_i32();
628 if (fn11) {
629 /* Underflow is enabled, use the FPCR setting. */
630 tcg_gen_ld8u_i32(tmp, cpu_env,
631 offsetof(CPUAlphaState, fpcr_flush_to_zero));
632 } else {
633 /* Underflow is disabled, force flush-to-zero. */
634 tcg_gen_movi_i32(tmp, 1);
637 #if defined(CONFIG_SOFTFLOAT_INLINE)
638 tcg_gen_st8_i32(tmp, cpu_env,
639 offsetof(CPUAlphaState, fp_status.flush_to_zero));
640 #else
641 gen_helper_setflushzero(tmp);
642 #endif
644 tcg_temp_free_i32(tmp);
647 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
649 TCGv val;
651 if (unlikely(reg == 31)) {
652 val = load_zero(ctx);
653 } else {
654 val = cpu_fir[reg];
655 if ((fn11 & QUAL_S) == 0) {
656 if (is_cmp) {
657 gen_helper_ieee_input_cmp(cpu_env, val);
658 } else {
659 gen_helper_ieee_input(cpu_env, val);
661 } else {
662 #ifndef CONFIG_USER_ONLY
663 /* In system mode, raise exceptions for denormals like real
664 hardware. In user mode, proceed as if the OS completion
665 handler is handling the denormal as per spec. */
666 gen_helper_ieee_input_s(cpu_env, val);
667 #endif
670 return val;
673 static void gen_fp_exc_raise(int rc, int fn11)
675 /* ??? We ought to be able to do something with imprecise exceptions.
676 E.g. notice we're still in the trap shadow of something within the
677 TB and do not generate the code to signal the exception; end the TB
678 when an exception is forced to arrive, either by consumption of a
679 register value or TRAPB or EXCB. */
680 TCGv_i32 reg, ign;
681 uint32_t ignore = 0;
683 if (!(fn11 & QUAL_U)) {
684 /* Note that QUAL_U == QUAL_V, so ignore either. */
685 ignore |= FPCR_UNF | FPCR_IOV;
687 if (!(fn11 & QUAL_I)) {
688 ignore |= FPCR_INE;
690 ign = tcg_const_i32(ignore);
692 /* ??? Pass in the regno of the destination so that the helper can
693 set EXC_MASK, which contains a bitmask of destination registers
694 that have caused arithmetic traps. A simple userspace emulation
695 does not require this. We do need it for a guest kernel's entArith,
696 or if we were to do something clever with imprecise exceptions. */
697 reg = tcg_const_i32(rc + 32);
698 if (fn11 & QUAL_S) {
699 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
700 } else {
701 gen_helper_fp_exc_raise(cpu_env, ign, reg);
704 tcg_temp_free_i32(reg);
705 tcg_temp_free_i32(ign);
708 static void gen_cvtlq(TCGv vc, TCGv vb)
710 TCGv tmp = tcg_temp_new();
712 /* The arithmetic right shift here, plus the sign-extended mask below
713 yields a sign-extended result without an explicit ext32s_i64. */
714 tcg_gen_sari_i64(tmp, vb, 32);
715 tcg_gen_shri_i64(vc, vb, 29);
716 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
717 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
718 tcg_gen_or_i64(vc, vc, tmp);
720 tcg_temp_free(tmp);
723 static void gen_ieee_arith2(DisasContext *ctx,
724 void (*helper)(TCGv, TCGv_ptr, TCGv),
725 int rb, int rc, int fn11)
727 TCGv vb;
729 gen_qual_roundmode(ctx, fn11);
730 gen_qual_flushzero(ctx, fn11);
732 vb = gen_ieee_input(ctx, rb, fn11, 0);
733 helper(dest_fpr(ctx, rc), cpu_env, vb);
735 gen_fp_exc_raise(rc, fn11);
738 #define IEEE_ARITH2(name) \
739 static inline void glue(gen_, name)(DisasContext *ctx, \
740 int rb, int rc, int fn11) \
742 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
744 IEEE_ARITH2(sqrts)
745 IEEE_ARITH2(sqrtt)
746 IEEE_ARITH2(cvtst)
747 IEEE_ARITH2(cvtts)
749 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
751 TCGv vb, vc;
753 /* No need to set flushzero, since we have an integer output. */
754 vb = gen_ieee_input(ctx, rb, fn11, 0);
755 vc = dest_fpr(ctx, rc);
757 /* Almost all integer conversions use cropped rounding;
758 special case that. */
759 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
760 gen_helper_cvttq_c(vc, cpu_env, vb);
761 } else {
762 gen_qual_roundmode(ctx, fn11);
763 gen_helper_cvttq(vc, cpu_env, vb);
765 gen_fp_exc_raise(rc, fn11);
768 static void gen_ieee_intcvt(DisasContext *ctx,
769 void (*helper)(TCGv, TCGv_ptr, TCGv),
770 int rb, int rc, int fn11)
772 TCGv vb, vc;
774 gen_qual_roundmode(ctx, fn11);
775 vb = load_fpr(ctx, rb);
776 vc = dest_fpr(ctx, rc);
778 /* The only exception that can be raised by integer conversion
779 is inexact. Thus we only need to worry about exceptions when
780 inexact handling is requested. */
781 if (fn11 & QUAL_I) {
782 helper(vc, cpu_env, vb);
783 gen_fp_exc_raise(rc, fn11);
784 } else {
785 helper(vc, cpu_env, vb);
789 #define IEEE_INTCVT(name) \
790 static inline void glue(gen_, name)(DisasContext *ctx, \
791 int rb, int rc, int fn11) \
793 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
795 IEEE_INTCVT(cvtqs)
796 IEEE_INTCVT(cvtqt)
798 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
800 TCGv vmask = tcg_const_i64(mask);
801 TCGv tmp = tcg_temp_new_i64();
803 if (inv_a) {
804 tcg_gen_andc_i64(tmp, vmask, va);
805 } else {
806 tcg_gen_and_i64(tmp, va, vmask);
809 tcg_gen_andc_i64(vc, vb, vmask);
810 tcg_gen_or_i64(vc, vc, tmp);
812 tcg_temp_free(vmask);
813 tcg_temp_free(tmp);
816 static void gen_ieee_arith3(DisasContext *ctx,
817 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
818 int ra, int rb, int rc, int fn11)
820 TCGv va, vb, vc;
822 gen_qual_roundmode(ctx, fn11);
823 gen_qual_flushzero(ctx, fn11);
825 va = gen_ieee_input(ctx, ra, fn11, 0);
826 vb = gen_ieee_input(ctx, rb, fn11, 0);
827 vc = dest_fpr(ctx, rc);
828 helper(vc, cpu_env, va, vb);
830 gen_fp_exc_raise(rc, fn11);
833 #define IEEE_ARITH3(name) \
834 static inline void glue(gen_, name)(DisasContext *ctx, \
835 int ra, int rb, int rc, int fn11) \
837 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
839 IEEE_ARITH3(adds)
840 IEEE_ARITH3(subs)
841 IEEE_ARITH3(muls)
842 IEEE_ARITH3(divs)
843 IEEE_ARITH3(addt)
844 IEEE_ARITH3(subt)
845 IEEE_ARITH3(mult)
846 IEEE_ARITH3(divt)
848 static void gen_ieee_compare(DisasContext *ctx,
849 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
850 int ra, int rb, int rc, int fn11)
852 TCGv va, vb, vc;
854 va = gen_ieee_input(ctx, ra, fn11, 1);
855 vb = gen_ieee_input(ctx, rb, fn11, 1);
856 vc = dest_fpr(ctx, rc);
857 helper(vc, cpu_env, va, vb);
859 gen_fp_exc_raise(rc, fn11);
862 #define IEEE_CMP3(name) \
863 static inline void glue(gen_, name)(DisasContext *ctx, \
864 int ra, int rb, int rc, int fn11) \
866 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
868 IEEE_CMP3(cmptun)
869 IEEE_CMP3(cmpteq)
870 IEEE_CMP3(cmptlt)
871 IEEE_CMP3(cmptle)
873 static inline uint64_t zapnot_mask(uint8_t lit)
875 uint64_t mask = 0;
876 int i;
878 for (i = 0; i < 8; ++i) {
879 if ((lit >> i) & 1) {
880 mask |= 0xffull << (i * 8);
883 return mask;
886 /* Implement zapnot with an immediate operand, which expands to some
887 form of immediate AND. This is a basic building block in the
888 definition of many of the other byte manipulation instructions. */
889 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
891 switch (lit) {
892 case 0x00:
893 tcg_gen_movi_i64(dest, 0);
894 break;
895 case 0x01:
896 tcg_gen_ext8u_i64(dest, src);
897 break;
898 case 0x03:
899 tcg_gen_ext16u_i64(dest, src);
900 break;
901 case 0x0f:
902 tcg_gen_ext32u_i64(dest, src);
903 break;
904 case 0xff:
905 tcg_gen_mov_i64(dest, src);
906 break;
907 default:
908 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
909 break;
913 /* EXTWH, EXTLH, EXTQH */
914 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
915 uint8_t lit, uint8_t byte_mask)
917 if (islit) {
918 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
919 } else {
920 TCGv tmp = tcg_temp_new();
921 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
922 tcg_gen_neg_i64(tmp, tmp);
923 tcg_gen_andi_i64(tmp, tmp, 0x3f);
924 tcg_gen_shl_i64(vc, va, tmp);
925 tcg_temp_free(tmp);
927 gen_zapnoti(vc, vc, byte_mask);
930 /* EXTBL, EXTWL, EXTLL, EXTQL */
931 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
932 uint8_t lit, uint8_t byte_mask)
934 if (islit) {
935 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
936 } else {
937 TCGv tmp = tcg_temp_new();
938 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
939 tcg_gen_shli_i64(tmp, tmp, 3);
940 tcg_gen_shr_i64(vc, va, tmp);
941 tcg_temp_free(tmp);
943 gen_zapnoti(vc, vc, byte_mask);
946 /* INSWH, INSLH, INSQH */
947 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
948 uint8_t lit, uint8_t byte_mask)
950 TCGv tmp = tcg_temp_new();
952 /* The instruction description has us left-shift the byte mask and extract
953 bits <15:8> and apply that zap at the end. This is equivalent to simply
954 performing the zap first and shifting afterward. */
955 gen_zapnoti(tmp, va, byte_mask);
957 if (islit) {
958 lit &= 7;
959 if (unlikely(lit == 0)) {
960 tcg_gen_movi_i64(vc, 0);
961 } else {
962 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
964 } else {
965 TCGv shift = tcg_temp_new();
967 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
968 portably by splitting the shift into two parts: shift_count-1 and 1.
969 Arrange for the -1 by using ones-complement instead of
970 twos-complement in the negation: ~(B * 8) & 63. */
972 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
973 tcg_gen_not_i64(shift, shift);
974 tcg_gen_andi_i64(shift, shift, 0x3f);
976 tcg_gen_shr_i64(vc, tmp, shift);
977 tcg_gen_shri_i64(vc, vc, 1);
978 tcg_temp_free(shift);
980 tcg_temp_free(tmp);
983 /* INSBL, INSWL, INSLL, INSQL */
984 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
985 uint8_t lit, uint8_t byte_mask)
987 TCGv tmp = tcg_temp_new();
989 /* The instruction description has us left-shift the byte mask
990 the same number of byte slots as the data and apply the zap
991 at the end. This is equivalent to simply performing the zap
992 first and shifting afterward. */
993 gen_zapnoti(tmp, va, byte_mask);
995 if (islit) {
996 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
997 } else {
998 TCGv shift = tcg_temp_new();
999 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1000 tcg_gen_shli_i64(shift, shift, 3);
1001 tcg_gen_shl_i64(vc, tmp, shift);
1002 tcg_temp_free(shift);
1004 tcg_temp_free(tmp);
1007 /* MSKWH, MSKLH, MSKQH */
1008 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1009 uint8_t lit, uint8_t byte_mask)
1011 if (islit) {
1012 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1013 } else {
1014 TCGv shift = tcg_temp_new();
1015 TCGv mask = tcg_temp_new();
1017 /* The instruction description is as above, where the byte_mask
1018 is shifted left, and then we extract bits <15:8>. This can be
1019 emulated with a right-shift on the expanded byte mask. This
1020 requires extra care because for an input <2:0> == 0 we need a
1021 shift of 64 bits in order to generate a zero. This is done by
1022 splitting the shift into two parts, the variable shift - 1
1023 followed by a constant 1 shift. The code we expand below is
1024 equivalent to ~(B * 8) & 63. */
1026 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1027 tcg_gen_not_i64(shift, shift);
1028 tcg_gen_andi_i64(shift, shift, 0x3f);
1029 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1030 tcg_gen_shr_i64(mask, mask, shift);
1031 tcg_gen_shri_i64(mask, mask, 1);
1033 tcg_gen_andc_i64(vc, va, mask);
1035 tcg_temp_free(mask);
1036 tcg_temp_free(shift);
1040 /* MSKBL, MSKWL, MSKLL, MSKQL */
1041 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1042 uint8_t lit, uint8_t byte_mask)
1044 if (islit) {
1045 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1046 } else {
1047 TCGv shift = tcg_temp_new();
1048 TCGv mask = tcg_temp_new();
1050 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1051 tcg_gen_shli_i64(shift, shift, 3);
1052 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1053 tcg_gen_shl_i64(mask, mask, shift);
1055 tcg_gen_andc_i64(vc, va, mask);
1057 tcg_temp_free(mask);
1058 tcg_temp_free(shift);
1062 static void gen_rx(int ra, int set)
1064 TCGv_i32 tmp;
1066 if (ra != 31) {
1067 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1070 tmp = tcg_const_i32(set);
1071 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1072 tcg_temp_free_i32(tmp);
1075 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1077 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1078 to internal cpu registers. */
1080 /* Unprivileged PAL call */
1081 if (palcode >= 0x80 && palcode < 0xC0) {
1082 switch (palcode) {
1083 case 0x86:
1084 /* IMB */
1085 /* No-op inside QEMU. */
1086 break;
1087 case 0x9E:
1088 /* RDUNIQUE */
1089 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1090 offsetof(CPUAlphaState, unique));
1091 break;
1092 case 0x9F:
1093 /* WRUNIQUE */
1094 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1095 offsetof(CPUAlphaState, unique));
1096 break;
1097 default:
1098 palcode &= 0xbf;
1099 goto do_call_pal;
1101 return NO_EXIT;
1104 #ifndef CONFIG_USER_ONLY
1105 /* Privileged PAL code */
1106 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1107 switch (palcode) {
1108 case 0x01:
1109 /* CFLUSH */
1110 /* No-op inside QEMU. */
1111 break;
1112 case 0x02:
1113 /* DRAINA */
1114 /* No-op inside QEMU. */
1115 break;
1116 case 0x2D:
1117 /* WRVPTPTR */
1118 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1119 offsetof(CPUAlphaState, vptptr));
1120 break;
1121 case 0x31:
1122 /* WRVAL */
1123 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1124 offsetof(CPUAlphaState, sysval));
1125 break;
1126 case 0x32:
1127 /* RDVAL */
1128 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1129 offsetof(CPUAlphaState, sysval));
1130 break;
1132 case 0x35: {
1133 /* SWPIPL */
1134 TCGv tmp;
1136 /* Note that we already know we're in kernel mode, so we know
1137 that PS only contains the 3 IPL bits. */
1138 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1139 offsetof(CPUAlphaState, ps));
1141 /* But make sure and store only the 3 IPL bits from the user. */
1142 tmp = tcg_temp_new();
1143 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1144 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1145 tcg_temp_free(tmp);
1146 break;
1149 case 0x36:
1150 /* RDPS */
1151 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
1152 offsetof(CPUAlphaState, ps));
1153 break;
1154 case 0x38:
1155 /* WRUSP */
1156 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
1157 offsetof(CPUAlphaState, usp));
1158 break;
1159 case 0x3A:
1160 /* RDUSP */
1161 tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
1162 offsetof(CPUAlphaState, usp));
1163 break;
1164 case 0x3C:
1165 /* WHAMI */
1166 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1167 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1168 break;
1170 default:
1171 palcode &= 0x3f;
1172 goto do_call_pal;
1174 return NO_EXIT;
1176 #endif
1177 return gen_invalid(ctx);
1179 do_call_pal:
1180 #ifdef CONFIG_USER_ONLY
1181 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1182 #else
1184 TCGv pc = tcg_const_i64(ctx->pc);
1185 TCGv entry = tcg_const_i64(palcode & 0x80
1186 ? 0x2000 + (palcode - 0x80) * 64
1187 : 0x1000 + palcode * 64);
1189 gen_helper_call_pal(cpu_env, pc, entry);
1191 tcg_temp_free(entry);
1192 tcg_temp_free(pc);
1194 /* Since the destination is running in PALmode, we don't really
1195 need the page permissions check. We'll see the existence of
1196 the page when we create the TB, and we'll flush all TBs if
1197 we change the PAL base register. */
1198 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1199 tcg_gen_goto_tb(0);
1200 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1201 return EXIT_GOTO_TB;
1204 return EXIT_PC_UPDATED;
1206 #endif
1209 #ifndef CONFIG_USER_ONLY
1211 #define PR_BYTE 0x100000
1212 #define PR_LONG 0x200000
1214 static int cpu_pr_data(int pr)
1216 switch (pr) {
1217 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1218 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1219 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1220 case 3: return offsetof(CPUAlphaState, trap_arg0);
1221 case 4: return offsetof(CPUAlphaState, trap_arg1);
1222 case 5: return offsetof(CPUAlphaState, trap_arg2);
1223 case 6: return offsetof(CPUAlphaState, exc_addr);
1224 case 7: return offsetof(CPUAlphaState, palbr);
1225 case 8: return offsetof(CPUAlphaState, ptbr);
1226 case 9: return offsetof(CPUAlphaState, vptptr);
1227 case 10: return offsetof(CPUAlphaState, unique);
1228 case 11: return offsetof(CPUAlphaState, sysval);
1229 case 12: return offsetof(CPUAlphaState, usp);
1231 case 32 ... 39:
1232 return offsetof(CPUAlphaState, shadow[pr - 32]);
1233 case 40 ... 63:
1234 return offsetof(CPUAlphaState, scratch[pr - 40]);
1236 case 251:
1237 return offsetof(CPUAlphaState, alarm_expire);
1239 return 0;
1242 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1244 int data = cpu_pr_data(regno);
1246 /* Special help for VMTIME and WALLTIME. */
1247 if (regno == 250 || regno == 249) {
1248 void (*helper)(TCGv) = gen_helper_get_walltime;
1249 if (regno == 249) {
1250 helper = gen_helper_get_vmtime;
1252 if (ctx->tb->cflags & CF_USE_ICOUNT) {
1253 gen_io_start();
1254 helper(va);
1255 gen_io_end();
1256 return EXIT_PC_STALE;
1257 } else {
1258 helper(va);
1259 return NO_EXIT;
1263 /* The basic registers are data only, and unknown registers
1264 are read-zero, write-ignore. */
1265 if (data == 0) {
1266 tcg_gen_movi_i64(va, 0);
1267 } else if (data & PR_BYTE) {
1268 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1269 } else if (data & PR_LONG) {
1270 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1271 } else {
1272 tcg_gen_ld_i64(va, cpu_env, data);
1274 return NO_EXIT;
1277 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1279 TCGv tmp;
1280 int data;
1282 switch (regno) {
1283 case 255:
1284 /* TBIA */
1285 gen_helper_tbia(cpu_env);
1286 break;
1288 case 254:
1289 /* TBIS */
1290 gen_helper_tbis(cpu_env, vb);
1291 break;
1293 case 253:
1294 /* WAIT */
1295 tmp = tcg_const_i64(1);
1296 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1297 offsetof(CPUState, halted));
1298 return gen_excp(ctx, EXCP_HLT, 0);
1300 case 252:
1301 /* HALT */
1302 gen_helper_halt(vb);
1303 return EXIT_PC_STALE;
1305 case 251:
1306 /* ALARM */
1307 gen_helper_set_alarm(cpu_env, vb);
1308 break;
1310 case 7:
1311 /* PALBR */
1312 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1313 /* Changing the PAL base register implies un-chaining all of the TBs
1314 that ended with a CALL_PAL. Since the base register usually only
1315 changes during boot, flushing everything works well. */
1316 gen_helper_tb_flush(cpu_env);
1317 return EXIT_PC_STALE;
1319 default:
1320 /* The basic registers are data only, and unknown registers
1321 are read-zero, write-ignore. */
1322 data = cpu_pr_data(regno);
1323 if (data != 0) {
1324 if (data & PR_BYTE) {
1325 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1326 } else if (data & PR_LONG) {
1327 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1328 } else {
1329 tcg_gen_st_i64(vb, cpu_env, data);
1332 break;
1335 return NO_EXIT;
1337 #endif /* !USER_ONLY*/
1339 #define REQUIRE_NO_LIT \
1340 do { \
1341 if (real_islit) { \
1342 goto invalid_opc; \
1344 } while (0)
1346 #define REQUIRE_TB_FLAG(FLAG) \
1347 do { \
1348 if ((ctx->tb->flags & (FLAG)) == 0) { \
1349 goto invalid_opc; \
1351 } while (0)
1353 #define REQUIRE_REG_31(WHICH) \
1354 do { \
1355 if (WHICH != 31) { \
1356 goto invalid_opc; \
1358 } while (0)
1360 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1362 int32_t disp21, disp16, disp12 __attribute__((unused));
1363 uint16_t fn11;
1364 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1365 bool islit, real_islit;
1366 TCGv va, vb, vc, tmp, tmp2;
1367 TCGv_i32 t32;
1368 ExitStatus ret;
1370 /* Decode all instruction fields */
1371 opc = extract32(insn, 26, 6);
1372 ra = extract32(insn, 21, 5);
1373 rb = extract32(insn, 16, 5);
1374 rc = extract32(insn, 0, 5);
1375 real_islit = islit = extract32(insn, 12, 1);
1376 lit = extract32(insn, 13, 8);
1378 disp21 = sextract32(insn, 0, 21);
1379 disp16 = sextract32(insn, 0, 16);
1380 disp12 = sextract32(insn, 0, 12);
1382 fn11 = extract32(insn, 5, 11);
1383 fpfn = extract32(insn, 5, 6);
1384 fn7 = extract32(insn, 5, 7);
1386 if (rb == 31 && !islit) {
1387 islit = true;
1388 lit = 0;
1391 ret = NO_EXIT;
1392 switch (opc) {
1393 case 0x00:
1394 /* CALL_PAL */
1395 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1396 break;
1397 case 0x01:
1398 /* OPC01 */
1399 goto invalid_opc;
1400 case 0x02:
1401 /* OPC02 */
1402 goto invalid_opc;
1403 case 0x03:
1404 /* OPC03 */
1405 goto invalid_opc;
1406 case 0x04:
1407 /* OPC04 */
1408 goto invalid_opc;
1409 case 0x05:
1410 /* OPC05 */
1411 goto invalid_opc;
1412 case 0x06:
1413 /* OPC06 */
1414 goto invalid_opc;
1415 case 0x07:
1416 /* OPC07 */
1417 goto invalid_opc;
1419 case 0x09:
1420 /* LDAH */
1421 disp16 = (uint32_t)disp16 << 16;
1422 /* fall through */
1423 case 0x08:
1424 /* LDA */
1425 va = dest_gpr(ctx, ra);
1426 /* It's worth special-casing immediate loads. */
1427 if (rb == 31) {
1428 tcg_gen_movi_i64(va, disp16);
1429 } else {
1430 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1432 break;
1434 case 0x0A:
1435 /* LDBU */
1436 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1437 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1438 break;
1439 case 0x0B:
1440 /* LDQ_U */
1441 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1442 break;
1443 case 0x0C:
1444 /* LDWU */
1445 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1446 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1447 break;
1448 case 0x0D:
1449 /* STW */
1450 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1451 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1452 break;
1453 case 0x0E:
1454 /* STB */
1455 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1456 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1457 break;
1458 case 0x0F:
1459 /* STQ_U */
1460 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1461 break;
1463 case 0x10:
1464 vc = dest_gpr(ctx, rc);
1465 vb = load_gpr_lit(ctx, rb, lit, islit);
1467 if (ra == 31) {
1468 if (fn7 == 0x00) {
1469 /* Special case ADDL as SEXTL. */
1470 tcg_gen_ext32s_i64(vc, vb);
1471 break;
1473 if (fn7 == 0x29) {
1474 /* Special case SUBQ as NEGQ. */
1475 tcg_gen_neg_i64(vc, vb);
1476 break;
1480 va = load_gpr(ctx, ra);
1481 switch (fn7) {
1482 case 0x00:
1483 /* ADDL */
1484 tcg_gen_add_i64(vc, va, vb);
1485 tcg_gen_ext32s_i64(vc, vc);
1486 break;
1487 case 0x02:
1488 /* S4ADDL */
1489 tmp = tcg_temp_new();
1490 tcg_gen_shli_i64(tmp, va, 2);
1491 tcg_gen_add_i64(tmp, tmp, vb);
1492 tcg_gen_ext32s_i64(vc, tmp);
1493 tcg_temp_free(tmp);
1494 break;
1495 case 0x09:
1496 /* SUBL */
1497 tcg_gen_sub_i64(vc, va, vb);
1498 tcg_gen_ext32s_i64(vc, vc);
1499 break;
1500 case 0x0B:
1501 /* S4SUBL */
1502 tmp = tcg_temp_new();
1503 tcg_gen_shli_i64(tmp, va, 2);
1504 tcg_gen_sub_i64(tmp, tmp, vb);
1505 tcg_gen_ext32s_i64(vc, tmp);
1506 tcg_temp_free(tmp);
1507 break;
1508 case 0x0F:
1509 /* CMPBGE */
1510 gen_helper_cmpbge(vc, va, vb);
1511 break;
1512 case 0x12:
1513 /* S8ADDL */
1514 tmp = tcg_temp_new();
1515 tcg_gen_shli_i64(tmp, va, 3);
1516 tcg_gen_add_i64(tmp, tmp, vb);
1517 tcg_gen_ext32s_i64(vc, tmp);
1518 tcg_temp_free(tmp);
1519 break;
1520 case 0x1B:
1521 /* S8SUBL */
1522 tmp = tcg_temp_new();
1523 tcg_gen_shli_i64(tmp, va, 3);
1524 tcg_gen_sub_i64(tmp, tmp, vb);
1525 tcg_gen_ext32s_i64(vc, tmp);
1526 tcg_temp_free(tmp);
1527 break;
1528 case 0x1D:
1529 /* CMPULT */
1530 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1531 break;
1532 case 0x20:
1533 /* ADDQ */
1534 tcg_gen_add_i64(vc, va, vb);
1535 break;
1536 case 0x22:
1537 /* S4ADDQ */
1538 tmp = tcg_temp_new();
1539 tcg_gen_shli_i64(tmp, va, 2);
1540 tcg_gen_add_i64(vc, tmp, vb);
1541 tcg_temp_free(tmp);
1542 break;
1543 case 0x29:
1544 /* SUBQ */
1545 tcg_gen_sub_i64(vc, va, vb);
1546 break;
1547 case 0x2B:
1548 /* S4SUBQ */
1549 tmp = tcg_temp_new();
1550 tcg_gen_shli_i64(tmp, va, 2);
1551 tcg_gen_sub_i64(vc, tmp, vb);
1552 tcg_temp_free(tmp);
1553 break;
1554 case 0x2D:
1555 /* CMPEQ */
1556 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1557 break;
1558 case 0x32:
1559 /* S8ADDQ */
1560 tmp = tcg_temp_new();
1561 tcg_gen_shli_i64(tmp, va, 3);
1562 tcg_gen_add_i64(vc, tmp, vb);
1563 tcg_temp_free(tmp);
1564 break;
1565 case 0x3B:
1566 /* S8SUBQ */
1567 tmp = tcg_temp_new();
1568 tcg_gen_shli_i64(tmp, va, 3);
1569 tcg_gen_sub_i64(vc, tmp, vb);
1570 tcg_temp_free(tmp);
1571 break;
1572 case 0x3D:
1573 /* CMPULE */
1574 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1575 break;
1576 case 0x40:
1577 /* ADDL/V */
1578 tmp = tcg_temp_new();
1579 tcg_gen_ext32s_i64(tmp, va);
1580 tcg_gen_ext32s_i64(vc, vb);
1581 tcg_gen_add_i64(tmp, tmp, vc);
1582 tcg_gen_ext32s_i64(vc, tmp);
1583 gen_helper_check_overflow(cpu_env, vc, tmp);
1584 tcg_temp_free(tmp);
1585 break;
1586 case 0x49:
1587 /* SUBL/V */
1588 tmp = tcg_temp_new();
1589 tcg_gen_ext32s_i64(tmp, va);
1590 tcg_gen_ext32s_i64(vc, vb);
1591 tcg_gen_sub_i64(tmp, tmp, vc);
1592 tcg_gen_ext32s_i64(vc, tmp);
1593 gen_helper_check_overflow(cpu_env, vc, tmp);
1594 tcg_temp_free(tmp);
1595 break;
1596 case 0x4D:
1597 /* CMPLT */
1598 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1599 break;
1600 case 0x60:
1601 /* ADDQ/V */
1602 tmp = tcg_temp_new();
1603 tmp2 = tcg_temp_new();
1604 tcg_gen_eqv_i64(tmp, va, vb);
1605 tcg_gen_mov_i64(tmp2, va);
1606 tcg_gen_add_i64(vc, va, vb);
1607 tcg_gen_xor_i64(tmp2, tmp2, vc);
1608 tcg_gen_and_i64(tmp, tmp, tmp2);
1609 tcg_gen_shri_i64(tmp, tmp, 63);
1610 tcg_gen_movi_i64(tmp2, 0);
1611 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1612 tcg_temp_free(tmp);
1613 tcg_temp_free(tmp2);
1614 break;
1615 case 0x69:
1616 /* SUBQ/V */
1617 tmp = tcg_temp_new();
1618 tmp2 = tcg_temp_new();
1619 tcg_gen_xor_i64(tmp, va, vb);
1620 tcg_gen_mov_i64(tmp2, va);
1621 tcg_gen_sub_i64(vc, va, vb);
1622 tcg_gen_xor_i64(tmp2, tmp2, vc);
1623 tcg_gen_and_i64(tmp, tmp, tmp2);
1624 tcg_gen_shri_i64(tmp, tmp, 63);
1625 tcg_gen_movi_i64(tmp2, 0);
1626 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1627 tcg_temp_free(tmp);
1628 tcg_temp_free(tmp2);
1629 break;
1630 case 0x6D:
1631 /* CMPLE */
1632 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1633 break;
1634 default:
1635 goto invalid_opc;
1637 break;
1639 case 0x11:
1640 if (fn7 == 0x20) {
1641 if (rc == 31) {
1642 /* Special case BIS as NOP. */
1643 break;
1645 if (ra == 31) {
1646 /* Special case BIS as MOV. */
1647 vc = dest_gpr(ctx, rc);
1648 if (islit) {
1649 tcg_gen_movi_i64(vc, lit);
1650 } else {
1651 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1653 break;
1657 vc = dest_gpr(ctx, rc);
1658 vb = load_gpr_lit(ctx, rb, lit, islit);
1660 if (fn7 == 0x28 && ra == 31) {
1661 /* Special case ORNOT as NOT. */
1662 tcg_gen_not_i64(vc, vb);
1663 break;
1666 va = load_gpr(ctx, ra);
1667 switch (fn7) {
1668 case 0x00:
1669 /* AND */
1670 tcg_gen_and_i64(vc, va, vb);
1671 break;
1672 case 0x08:
1673 /* BIC */
1674 tcg_gen_andc_i64(vc, va, vb);
1675 break;
1676 case 0x14:
1677 /* CMOVLBS */
1678 tmp = tcg_temp_new();
1679 tcg_gen_andi_i64(tmp, va, 1);
1680 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1681 vb, load_gpr(ctx, rc));
1682 tcg_temp_free(tmp);
1683 break;
1684 case 0x16:
1685 /* CMOVLBC */
1686 tmp = tcg_temp_new();
1687 tcg_gen_andi_i64(tmp, va, 1);
1688 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1689 vb, load_gpr(ctx, rc));
1690 tcg_temp_free(tmp);
1691 break;
1692 case 0x20:
1693 /* BIS */
1694 tcg_gen_or_i64(vc, va, vb);
1695 break;
1696 case 0x24:
1697 /* CMOVEQ */
1698 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1699 vb, load_gpr(ctx, rc));
1700 break;
1701 case 0x26:
1702 /* CMOVNE */
1703 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1704 vb, load_gpr(ctx, rc));
1705 break;
1706 case 0x28:
1707 /* ORNOT */
1708 tcg_gen_orc_i64(vc, va, vb);
1709 break;
1710 case 0x40:
1711 /* XOR */
1712 tcg_gen_xor_i64(vc, va, vb);
1713 break;
1714 case 0x44:
1715 /* CMOVLT */
1716 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1717 vb, load_gpr(ctx, rc));
1718 break;
1719 case 0x46:
1720 /* CMOVGE */
1721 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1722 vb, load_gpr(ctx, rc));
1723 break;
1724 case 0x48:
1725 /* EQV */
1726 tcg_gen_eqv_i64(vc, va, vb);
1727 break;
1728 case 0x61:
1729 /* AMASK */
1730 REQUIRE_REG_31(ra);
1732 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1733 tcg_gen_andi_i64(vc, vb, ~amask);
1735 break;
1736 case 0x64:
1737 /* CMOVLE */
1738 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1739 vb, load_gpr(ctx, rc));
1740 break;
1741 case 0x66:
1742 /* CMOVGT */
1743 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1744 vb, load_gpr(ctx, rc));
1745 break;
1746 case 0x6C:
1747 /* IMPLVER */
1748 REQUIRE_REG_31(ra);
1749 tcg_gen_movi_i64(vc, ctx->implver);
1750 break;
1751 default:
1752 goto invalid_opc;
1754 break;
1756 case 0x12:
1757 vc = dest_gpr(ctx, rc);
1758 va = load_gpr(ctx, ra);
1759 switch (fn7) {
1760 case 0x02:
1761 /* MSKBL */
1762 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1763 break;
1764 case 0x06:
1765 /* EXTBL */
1766 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1767 break;
1768 case 0x0B:
1769 /* INSBL */
1770 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1771 break;
1772 case 0x12:
1773 /* MSKWL */
1774 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1775 break;
1776 case 0x16:
1777 /* EXTWL */
1778 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1779 break;
1780 case 0x1B:
1781 /* INSWL */
1782 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1783 break;
1784 case 0x22:
1785 /* MSKLL */
1786 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1787 break;
1788 case 0x26:
1789 /* EXTLL */
1790 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1791 break;
1792 case 0x2B:
1793 /* INSLL */
1794 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1795 break;
1796 case 0x30:
1797 /* ZAP */
1798 if (islit) {
1799 gen_zapnoti(vc, va, ~lit);
1800 } else {
1801 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1803 break;
1804 case 0x31:
1805 /* ZAPNOT */
1806 if (islit) {
1807 gen_zapnoti(vc, va, lit);
1808 } else {
1809 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1811 break;
1812 case 0x32:
1813 /* MSKQL */
1814 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1815 break;
1816 case 0x34:
1817 /* SRL */
1818 if (islit) {
1819 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1820 } else {
1821 tmp = tcg_temp_new();
1822 vb = load_gpr(ctx, rb);
1823 tcg_gen_andi_i64(tmp, vb, 0x3f);
1824 tcg_gen_shr_i64(vc, va, tmp);
1825 tcg_temp_free(tmp);
1827 break;
1828 case 0x36:
1829 /* EXTQL */
1830 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1831 break;
1832 case 0x39:
1833 /* SLL */
1834 if (islit) {
1835 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1836 } else {
1837 tmp = tcg_temp_new();
1838 vb = load_gpr(ctx, rb);
1839 tcg_gen_andi_i64(tmp, vb, 0x3f);
1840 tcg_gen_shl_i64(vc, va, tmp);
1841 tcg_temp_free(tmp);
1843 break;
1844 case 0x3B:
1845 /* INSQL */
1846 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1847 break;
1848 case 0x3C:
1849 /* SRA */
1850 if (islit) {
1851 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1852 } else {
1853 tmp = tcg_temp_new();
1854 vb = load_gpr(ctx, rb);
1855 tcg_gen_andi_i64(tmp, vb, 0x3f);
1856 tcg_gen_sar_i64(vc, va, tmp);
1857 tcg_temp_free(tmp);
1859 break;
1860 case 0x52:
1861 /* MSKWH */
1862 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1863 break;
1864 case 0x57:
1865 /* INSWH */
1866 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1867 break;
1868 case 0x5A:
1869 /* EXTWH */
1870 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1871 break;
1872 case 0x62:
1873 /* MSKLH */
1874 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1875 break;
1876 case 0x67:
1877 /* INSLH */
1878 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1879 break;
1880 case 0x6A:
1881 /* EXTLH */
1882 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1883 break;
1884 case 0x72:
1885 /* MSKQH */
1886 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1887 break;
1888 case 0x77:
1889 /* INSQH */
1890 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1891 break;
1892 case 0x7A:
1893 /* EXTQH */
1894 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1895 break;
1896 default:
1897 goto invalid_opc;
1899 break;
1901 case 0x13:
1902 vc = dest_gpr(ctx, rc);
1903 vb = load_gpr_lit(ctx, rb, lit, islit);
1904 va = load_gpr(ctx, ra);
1905 switch (fn7) {
1906 case 0x00:
1907 /* MULL */
1908 tcg_gen_mul_i64(vc, va, vb);
1909 tcg_gen_ext32s_i64(vc, vc);
1910 break;
1911 case 0x20:
1912 /* MULQ */
1913 tcg_gen_mul_i64(vc, va, vb);
1914 break;
1915 case 0x30:
1916 /* UMULH */
1917 tmp = tcg_temp_new();
1918 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1919 tcg_temp_free(tmp);
1920 break;
1921 case 0x40:
1922 /* MULL/V */
1923 tmp = tcg_temp_new();
1924 tcg_gen_ext32s_i64(tmp, va);
1925 tcg_gen_ext32s_i64(vc, vb);
1926 tcg_gen_mul_i64(tmp, tmp, vc);
1927 tcg_gen_ext32s_i64(vc, tmp);
1928 gen_helper_check_overflow(cpu_env, vc, tmp);
1929 tcg_temp_free(tmp);
1930 break;
1931 case 0x60:
1932 /* MULQ/V */
1933 tmp = tcg_temp_new();
1934 tmp2 = tcg_temp_new();
1935 tcg_gen_muls2_i64(vc, tmp, va, vb);
1936 tcg_gen_sari_i64(tmp2, vc, 63);
1937 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1938 tcg_temp_free(tmp);
1939 tcg_temp_free(tmp2);
1940 break;
1941 default:
1942 goto invalid_opc;
1944 break;
1946 case 0x14:
1947 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
1948 vc = dest_fpr(ctx, rc);
1949 switch (fpfn) { /* fn11 & 0x3F */
1950 case 0x04:
1951 /* ITOFS */
1952 REQUIRE_REG_31(rb);
1953 t32 = tcg_temp_new_i32();
1954 va = load_gpr(ctx, ra);
1955 tcg_gen_trunc_i64_i32(t32, va);
1956 gen_helper_memory_to_s(vc, t32);
1957 tcg_temp_free_i32(t32);
1958 break;
1959 case 0x0A:
1960 /* SQRTF */
1961 REQUIRE_REG_31(ra);
1962 vb = load_fpr(ctx, rb);
1963 gen_helper_sqrtf(vc, cpu_env, vb);
1964 break;
1965 case 0x0B:
1966 /* SQRTS */
1967 REQUIRE_REG_31(ra);
1968 gen_sqrts(ctx, rb, rc, fn11);
1969 break;
1970 case 0x14:
1971 /* ITOFF */
1972 REQUIRE_REG_31(rb);
1973 t32 = tcg_temp_new_i32();
1974 va = load_gpr(ctx, ra);
1975 tcg_gen_trunc_i64_i32(t32, va);
1976 gen_helper_memory_to_f(vc, t32);
1977 tcg_temp_free_i32(t32);
1978 break;
1979 case 0x24:
1980 /* ITOFT */
1981 REQUIRE_REG_31(rb);
1982 va = load_gpr(ctx, ra);
1983 tcg_gen_mov_i64(vc, va);
1984 break;
1985 case 0x2A:
1986 /* SQRTG */
1987 REQUIRE_REG_31(ra);
1988 vb = load_fpr(ctx, rb);
1989 gen_helper_sqrtg(vc, cpu_env, vb);
1990 break;
1991 case 0x02B:
1992 /* SQRTT */
1993 REQUIRE_REG_31(ra);
1994 gen_sqrtt(ctx, rb, rc, fn11);
1995 break;
1996 default:
1997 goto invalid_opc;
1999 break;
2001 case 0x15:
2002 /* VAX floating point */
2003 /* XXX: rounding mode and trap are ignored (!) */
2004 vc = dest_fpr(ctx, rc);
2005 vb = load_fpr(ctx, rb);
2006 va = load_fpr(ctx, ra);
2007 switch (fpfn) { /* fn11 & 0x3F */
2008 case 0x00:
2009 /* ADDF */
2010 gen_helper_addf(vc, cpu_env, va, vb);
2011 break;
2012 case 0x01:
2013 /* SUBF */
2014 gen_helper_subf(vc, cpu_env, va, vb);
2015 break;
2016 case 0x02:
2017 /* MULF */
2018 gen_helper_mulf(vc, cpu_env, va, vb);
2019 break;
2020 case 0x03:
2021 /* DIVF */
2022 gen_helper_divf(vc, cpu_env, va, vb);
2023 break;
2024 case 0x1E:
2025 /* CVTDG -- TODO */
2026 REQUIRE_REG_31(ra);
2027 goto invalid_opc;
2028 case 0x20:
2029 /* ADDG */
2030 gen_helper_addg(vc, cpu_env, va, vb);
2031 break;
2032 case 0x21:
2033 /* SUBG */
2034 gen_helper_subg(vc, cpu_env, va, vb);
2035 break;
2036 case 0x22:
2037 /* MULG */
2038 gen_helper_mulg(vc, cpu_env, va, vb);
2039 break;
2040 case 0x23:
2041 /* DIVG */
2042 gen_helper_divg(vc, cpu_env, va, vb);
2043 break;
2044 case 0x25:
2045 /* CMPGEQ */
2046 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2047 break;
2048 case 0x26:
2049 /* CMPGLT */
2050 gen_helper_cmpglt(vc, cpu_env, va, vb);
2051 break;
2052 case 0x27:
2053 /* CMPGLE */
2054 gen_helper_cmpgle(vc, cpu_env, va, vb);
2055 break;
2056 case 0x2C:
2057 /* CVTGF */
2058 REQUIRE_REG_31(ra);
2059 gen_helper_cvtgf(vc, cpu_env, vb);
2060 break;
2061 case 0x2D:
2062 /* CVTGD -- TODO */
2063 REQUIRE_REG_31(ra);
2064 goto invalid_opc;
2065 case 0x2F:
2066 /* CVTGQ */
2067 REQUIRE_REG_31(ra);
2068 gen_helper_cvtgq(vc, cpu_env, vb);
2069 break;
2070 case 0x3C:
2071 /* CVTQF */
2072 REQUIRE_REG_31(ra);
2073 gen_helper_cvtqf(vc, cpu_env, vb);
2074 break;
2075 case 0x3E:
2076 /* CVTQG */
2077 REQUIRE_REG_31(ra);
2078 gen_helper_cvtqg(vc, cpu_env, vb);
2079 break;
2080 default:
2081 goto invalid_opc;
2083 break;
2085 case 0x16:
2086 /* IEEE floating-point */
2087 switch (fpfn) { /* fn11 & 0x3F */
2088 case 0x00:
2089 /* ADDS */
2090 gen_adds(ctx, ra, rb, rc, fn11);
2091 break;
2092 case 0x01:
2093 /* SUBS */
2094 gen_subs(ctx, ra, rb, rc, fn11);
2095 break;
2096 case 0x02:
2097 /* MULS */
2098 gen_muls(ctx, ra, rb, rc, fn11);
2099 break;
2100 case 0x03:
2101 /* DIVS */
2102 gen_divs(ctx, ra, rb, rc, fn11);
2103 break;
2104 case 0x20:
2105 /* ADDT */
2106 gen_addt(ctx, ra, rb, rc, fn11);
2107 break;
2108 case 0x21:
2109 /* SUBT */
2110 gen_subt(ctx, ra, rb, rc, fn11);
2111 break;
2112 case 0x22:
2113 /* MULT */
2114 gen_mult(ctx, ra, rb, rc, fn11);
2115 break;
2116 case 0x23:
2117 /* DIVT */
2118 gen_divt(ctx, ra, rb, rc, fn11);
2119 break;
2120 case 0x24:
2121 /* CMPTUN */
2122 gen_cmptun(ctx, ra, rb, rc, fn11);
2123 break;
2124 case 0x25:
2125 /* CMPTEQ */
2126 gen_cmpteq(ctx, ra, rb, rc, fn11);
2127 break;
2128 case 0x26:
2129 /* CMPTLT */
2130 gen_cmptlt(ctx, ra, rb, rc, fn11);
2131 break;
2132 case 0x27:
2133 /* CMPTLE */
2134 gen_cmptle(ctx, ra, rb, rc, fn11);
2135 break;
2136 case 0x2C:
2137 REQUIRE_REG_31(ra);
2138 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2139 /* CVTST */
2140 gen_cvtst(ctx, rb, rc, fn11);
2141 } else {
2142 /* CVTTS */
2143 gen_cvtts(ctx, rb, rc, fn11);
2145 break;
2146 case 0x2F:
2147 /* CVTTQ */
2148 REQUIRE_REG_31(ra);
2149 gen_cvttq(ctx, rb, rc, fn11);
2150 break;
2151 case 0x3C:
2152 /* CVTQS */
2153 REQUIRE_REG_31(ra);
2154 gen_cvtqs(ctx, rb, rc, fn11);
2155 break;
2156 case 0x3E:
2157 /* CVTQT */
2158 REQUIRE_REG_31(ra);
2159 gen_cvtqt(ctx, rb, rc, fn11);
2160 break;
2161 default:
2162 goto invalid_opc;
2164 break;
2166 case 0x17:
2167 switch (fn11) {
2168 case 0x010:
2169 /* CVTLQ */
2170 REQUIRE_REG_31(ra);
2171 vc = dest_fpr(ctx, rc);
2172 vb = load_fpr(ctx, rb);
2173 gen_cvtlq(vc, vb);
2174 break;
2175 case 0x020:
2176 /* CPYS */
2177 if (rc == 31) {
2178 /* Special case CPYS as FNOP. */
2179 } else {
2180 vc = dest_fpr(ctx, rc);
2181 va = load_fpr(ctx, ra);
2182 if (ra == rb) {
2183 /* Special case CPYS as FMOV. */
2184 tcg_gen_mov_i64(vc, va);
2185 } else {
2186 vb = load_fpr(ctx, rb);
2187 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2190 break;
2191 case 0x021:
2192 /* CPYSN */
2193 vc = dest_fpr(ctx, rc);
2194 vb = load_fpr(ctx, rb);
2195 va = load_fpr(ctx, ra);
2196 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2197 break;
2198 case 0x022:
2199 /* CPYSE */
2200 vc = dest_fpr(ctx, rc);
2201 vb = load_fpr(ctx, rb);
2202 va = load_fpr(ctx, ra);
2203 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2204 break;
2205 case 0x024:
2206 /* MT_FPCR */
2207 va = load_fpr(ctx, ra);
2208 gen_helper_store_fpcr(cpu_env, va);
2209 if (ctx->tb_rm == QUAL_RM_D) {
2210 /* Re-do the copy of the rounding mode to fp_status
2211 the next time we use dynamic rounding. */
2212 ctx->tb_rm = -1;
2214 break;
2215 case 0x025:
2216 /* MF_FPCR */
2217 va = dest_fpr(ctx, ra);
2218 gen_helper_load_fpcr(va, cpu_env);
2219 break;
2220 case 0x02A:
2221 /* FCMOVEQ */
2222 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2223 break;
2224 case 0x02B:
2225 /* FCMOVNE */
2226 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2227 break;
2228 case 0x02C:
2229 /* FCMOVLT */
2230 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2231 break;
2232 case 0x02D:
2233 /* FCMOVGE */
2234 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2235 break;
2236 case 0x02E:
2237 /* FCMOVLE */
2238 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2239 break;
2240 case 0x02F:
2241 /* FCMOVGT */
2242 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2243 break;
2244 case 0x030: /* CVTQL */
2245 case 0x130: /* CVTQL/V */
2246 case 0x530: /* CVTQL/SV */
2247 REQUIRE_REG_31(ra);
2248 vc = dest_fpr(ctx, rc);
2249 vb = load_fpr(ctx, rb);
2250 gen_helper_cvtql(vc, cpu_env, vb);
2251 gen_fp_exc_raise(rc, fn11);
2252 break;
2253 default:
2254 goto invalid_opc;
2256 break;
2258 case 0x18:
2259 switch ((uint16_t)disp16) {
2260 case 0x0000:
2261 /* TRAPB */
2262 /* No-op. */
2263 break;
2264 case 0x0400:
2265 /* EXCB */
2266 /* No-op. */
2267 break;
2268 case 0x4000:
2269 /* MB */
2270 /* No-op */
2271 break;
2272 case 0x4400:
2273 /* WMB */
2274 /* No-op */
2275 break;
2276 case 0x8000:
2277 /* FETCH */
2278 /* No-op */
2279 break;
2280 case 0xA000:
2281 /* FETCH_M */
2282 /* No-op */
2283 break;
2284 case 0xC000:
2285 /* RPCC */
2286 va = dest_gpr(ctx, ra);
2287 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2288 gen_io_start();
2289 gen_helper_load_pcc(va, cpu_env);
2290 gen_io_end();
2291 ret = EXIT_PC_STALE;
2292 } else {
2293 gen_helper_load_pcc(va, cpu_env);
2295 break;
2296 case 0xE000:
2297 /* RC */
2298 gen_rx(ra, 0);
2299 break;
2300 case 0xE800:
2301 /* ECB */
2302 break;
2303 case 0xF000:
2304 /* RS */
2305 gen_rx(ra, 1);
2306 break;
2307 case 0xF800:
2308 /* WH64 */
2309 /* No-op */
2310 break;
2311 case 0xFC00:
2312 /* WH64EN */
2313 /* No-op */
2314 break;
2315 default:
2316 goto invalid_opc;
2318 break;
2320 case 0x19:
2321 /* HW_MFPR (PALcode) */
2322 #ifndef CONFIG_USER_ONLY
2323 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2324 va = dest_gpr(ctx, ra);
2325 ret = gen_mfpr(ctx, va, insn & 0xffff);
2326 break;
2327 #else
2328 goto invalid_opc;
2329 #endif
2331 case 0x1A:
2332 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2333 prediction stack action, which of course we don't implement. */
2334 vb = load_gpr(ctx, rb);
2335 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2336 if (ra != 31) {
2337 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2339 ret = EXIT_PC_UPDATED;
2340 break;
2342 case 0x1B:
2343 /* HW_LD (PALcode) */
2344 #ifndef CONFIG_USER_ONLY
2345 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2347 TCGv addr = tcg_temp_new();
2348 vb = load_gpr(ctx, rb);
2349 va = dest_gpr(ctx, ra);
2351 tcg_gen_addi_i64(addr, vb, disp12);
2352 switch ((insn >> 12) & 0xF) {
2353 case 0x0:
2354 /* Longword physical access (hw_ldl/p) */
2355 gen_helper_ldl_phys(va, cpu_env, addr);
2356 break;
2357 case 0x1:
2358 /* Quadword physical access (hw_ldq/p) */
2359 gen_helper_ldq_phys(va, cpu_env, addr);
2360 break;
2361 case 0x2:
2362 /* Longword physical access with lock (hw_ldl_l/p) */
2363 gen_helper_ldl_l_phys(va, cpu_env, addr);
2364 break;
2365 case 0x3:
2366 /* Quadword physical access with lock (hw_ldq_l/p) */
2367 gen_helper_ldq_l_phys(va, cpu_env, addr);
2368 break;
2369 case 0x4:
2370 /* Longword virtual PTE fetch (hw_ldl/v) */
2371 goto invalid_opc;
2372 case 0x5:
2373 /* Quadword virtual PTE fetch (hw_ldq/v) */
2374 goto invalid_opc;
2375 break;
2376 case 0x6:
2377 /* Incpu_ir[ra]id */
2378 goto invalid_opc;
2379 case 0x7:
2380 /* Incpu_ir[ra]id */
2381 goto invalid_opc;
2382 case 0x8:
2383 /* Longword virtual access (hw_ldl) */
2384 goto invalid_opc;
2385 case 0x9:
2386 /* Quadword virtual access (hw_ldq) */
2387 goto invalid_opc;
2388 case 0xA:
2389 /* Longword virtual access with protection check (hw_ldl/w) */
2390 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2391 break;
2392 case 0xB:
2393 /* Quadword virtual access with protection check (hw_ldq/w) */
2394 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2395 break;
2396 case 0xC:
2397 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2398 goto invalid_opc;
2399 case 0xD:
2400 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2401 goto invalid_opc;
2402 case 0xE:
2403 /* Longword virtual access with alternate access mode and
2404 protection checks (hw_ldl/wa) */
2405 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2406 break;
2407 case 0xF:
2408 /* Quadword virtual access with alternate access mode and
2409 protection checks (hw_ldq/wa) */
2410 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2411 break;
2413 tcg_temp_free(addr);
2414 break;
2416 #else
2417 goto invalid_opc;
2418 #endif
2420 case 0x1C:
2421 vc = dest_gpr(ctx, rc);
2422 if (fn7 == 0x70) {
2423 /* FTOIT */
2424 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2425 REQUIRE_REG_31(rb);
2426 va = load_fpr(ctx, ra);
2427 tcg_gen_mov_i64(vc, va);
2428 break;
2429 } else if (fn7 == 0x78) {
2430 /* FTOIS */
2431 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2432 REQUIRE_REG_31(rb);
2433 t32 = tcg_temp_new_i32();
2434 va = load_fpr(ctx, ra);
2435 gen_helper_s_to_memory(t32, va);
2436 tcg_gen_ext_i32_i64(vc, t32);
2437 tcg_temp_free_i32(t32);
2438 break;
2441 vb = load_gpr_lit(ctx, rb, lit, islit);
2442 switch (fn7) {
2443 case 0x00:
2444 /* SEXTB */
2445 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2446 REQUIRE_REG_31(ra);
2447 tcg_gen_ext8s_i64(vc, vb);
2448 break;
2449 case 0x01:
2450 /* SEXTW */
2451 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2452 REQUIRE_REG_31(ra);
2453 tcg_gen_ext16s_i64(vc, vb);
2454 break;
2455 case 0x30:
2456 /* CTPOP */
2457 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2458 REQUIRE_REG_31(ra);
2459 REQUIRE_NO_LIT;
2460 gen_helper_ctpop(vc, vb);
2461 break;
2462 case 0x31:
2463 /* PERR */
2464 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2465 REQUIRE_NO_LIT;
2466 va = load_gpr(ctx, ra);
2467 gen_helper_perr(vc, va, vb);
2468 break;
2469 case 0x32:
2470 /* CTLZ */
2471 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2472 REQUIRE_REG_31(ra);
2473 REQUIRE_NO_LIT;
2474 gen_helper_ctlz(vc, vb);
2475 break;
2476 case 0x33:
2477 /* CTTZ */
2478 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2479 REQUIRE_REG_31(ra);
2480 REQUIRE_NO_LIT;
2481 gen_helper_cttz(vc, vb);
2482 break;
2483 case 0x34:
2484 /* UNPKBW */
2485 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2486 REQUIRE_REG_31(ra);
2487 REQUIRE_NO_LIT;
2488 gen_helper_unpkbw(vc, vb);
2489 break;
2490 case 0x35:
2491 /* UNPKBL */
2492 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2493 REQUIRE_REG_31(ra);
2494 REQUIRE_NO_LIT;
2495 gen_helper_unpkbl(vc, vb);
2496 break;
2497 case 0x36:
2498 /* PKWB */
2499 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2500 REQUIRE_REG_31(ra);
2501 REQUIRE_NO_LIT;
2502 gen_helper_pkwb(vc, vb);
2503 break;
2504 case 0x37:
2505 /* PKLB */
2506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2507 REQUIRE_REG_31(ra);
2508 REQUIRE_NO_LIT;
2509 gen_helper_pklb(vc, vb);
2510 break;
2511 case 0x38:
2512 /* MINSB8 */
2513 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2514 va = load_gpr(ctx, ra);
2515 gen_helper_minsb8(vc, va, vb);
2516 break;
2517 case 0x39:
2518 /* MINSW4 */
2519 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2520 va = load_gpr(ctx, ra);
2521 gen_helper_minsw4(vc, va, vb);
2522 break;
2523 case 0x3A:
2524 /* MINUB8 */
2525 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2526 va = load_gpr(ctx, ra);
2527 gen_helper_minub8(vc, va, vb);
2528 break;
2529 case 0x3B:
2530 /* MINUW4 */
2531 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2532 va = load_gpr(ctx, ra);
2533 gen_helper_minuw4(vc, va, vb);
2534 break;
2535 case 0x3C:
2536 /* MAXUB8 */
2537 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2538 va = load_gpr(ctx, ra);
2539 gen_helper_maxub8(vc, va, vb);
2540 break;
2541 case 0x3D:
2542 /* MAXUW4 */
2543 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2544 va = load_gpr(ctx, ra);
2545 gen_helper_maxuw4(vc, va, vb);
2546 break;
2547 case 0x3E:
2548 /* MAXSB8 */
2549 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2550 va = load_gpr(ctx, ra);
2551 gen_helper_maxsb8(vc, va, vb);
2552 break;
2553 case 0x3F:
2554 /* MAXSW4 */
2555 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2556 va = load_gpr(ctx, ra);
2557 gen_helper_maxsw4(vc, va, vb);
2558 break;
2559 default:
2560 goto invalid_opc;
2562 break;
2564 case 0x1D:
2565 /* HW_MTPR (PALcode) */
2566 #ifndef CONFIG_USER_ONLY
2567 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2568 vb = load_gpr(ctx, rb);
2569 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2570 break;
2571 #else
2572 goto invalid_opc;
2573 #endif
2575 case 0x1E:
2576 /* HW_RET (PALcode) */
2577 #ifndef CONFIG_USER_ONLY
2578 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2579 if (rb == 31) {
2580 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2581 address from EXC_ADDR. This turns out to be useful for our
2582 emulation PALcode, so continue to accept it. */
2583 tmp = tcg_temp_new();
2584 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
2585 gen_helper_hw_ret(cpu_env, tmp);
2586 tcg_temp_free(tmp);
2587 } else {
2588 gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
2590 ret = EXIT_PC_UPDATED;
2591 break;
2592 #else
2593 goto invalid_opc;
2594 #endif
2596 case 0x1F:
2597 /* HW_ST (PALcode) */
2598 #ifndef CONFIG_USER_ONLY
2599 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2601 TCGv addr = tcg_temp_new();
2602 va = load_gpr(ctx, ra);
2603 vb = load_gpr(ctx, rb);
2605 tcg_gen_addi_i64(addr, vb, disp12);
2606 switch ((insn >> 12) & 0xF) {
2607 case 0x0:
2608 /* Longword physical access */
2609 gen_helper_stl_phys(cpu_env, addr, va);
2610 break;
2611 case 0x1:
2612 /* Quadword physical access */
2613 gen_helper_stq_phys(cpu_env, addr, va);
2614 break;
2615 case 0x2:
2616 /* Longword physical access with lock */
2617 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2618 break;
2619 case 0x3:
2620 /* Quadword physical access with lock */
2621 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2622 break;
2623 case 0x4:
2624 /* Longword virtual access */
2625 goto invalid_opc;
2626 case 0x5:
2627 /* Quadword virtual access */
2628 goto invalid_opc;
2629 case 0x6:
2630 /* Invalid */
2631 goto invalid_opc;
2632 case 0x7:
2633 /* Invalid */
2634 goto invalid_opc;
2635 case 0x8:
2636 /* Invalid */
2637 goto invalid_opc;
2638 case 0x9:
2639 /* Invalid */
2640 goto invalid_opc;
2641 case 0xA:
2642 /* Invalid */
2643 goto invalid_opc;
2644 case 0xB:
2645 /* Invalid */
2646 goto invalid_opc;
2647 case 0xC:
2648 /* Longword virtual access with alternate access mode */
2649 goto invalid_opc;
2650 case 0xD:
2651 /* Quadword virtual access with alternate access mode */
2652 goto invalid_opc;
2653 case 0xE:
2654 /* Invalid */
2655 goto invalid_opc;
2656 case 0xF:
2657 /* Invalid */
2658 goto invalid_opc;
2660 tcg_temp_free(addr);
2661 break;
2663 #else
2664 goto invalid_opc;
2665 #endif
2666 case 0x20:
2667 /* LDF */
2668 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2669 break;
2670 case 0x21:
2671 /* LDG */
2672 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2673 break;
2674 case 0x22:
2675 /* LDS */
2676 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2677 break;
2678 case 0x23:
2679 /* LDT */
2680 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2681 break;
2682 case 0x24:
2683 /* STF */
2684 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2685 break;
2686 case 0x25:
2687 /* STG */
2688 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2689 break;
2690 case 0x26:
2691 /* STS */
2692 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2693 break;
2694 case 0x27:
2695 /* STT */
2696 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2697 break;
2698 case 0x28:
2699 /* LDL */
2700 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2701 break;
2702 case 0x29:
2703 /* LDQ */
2704 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2705 break;
2706 case 0x2A:
2707 /* LDL_L */
2708 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2709 break;
2710 case 0x2B:
2711 /* LDQ_L */
2712 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2713 break;
2714 case 0x2C:
2715 /* STL */
2716 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2717 break;
2718 case 0x2D:
2719 /* STQ */
2720 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2721 break;
2722 case 0x2E:
2723 /* STL_C */
2724 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
2725 break;
2726 case 0x2F:
2727 /* STQ_C */
2728 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
2729 break;
2730 case 0x30:
2731 /* BR */
2732 ret = gen_bdirect(ctx, ra, disp21);
2733 break;
2734 case 0x31: /* FBEQ */
2735 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2736 break;
2737 case 0x32: /* FBLT */
2738 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2739 break;
2740 case 0x33: /* FBLE */
2741 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2742 break;
2743 case 0x34:
2744 /* BSR */
2745 ret = gen_bdirect(ctx, ra, disp21);
2746 break;
2747 case 0x35: /* FBNE */
2748 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2749 break;
2750 case 0x36: /* FBGE */
2751 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2752 break;
2753 case 0x37: /* FBGT */
2754 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2755 break;
2756 case 0x38:
2757 /* BLBC */
2758 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2759 break;
2760 case 0x39:
2761 /* BEQ */
2762 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2763 break;
2764 case 0x3A:
2765 /* BLT */
2766 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2767 break;
2768 case 0x3B:
2769 /* BLE */
2770 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2771 break;
2772 case 0x3C:
2773 /* BLBS */
2774 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2775 break;
2776 case 0x3D:
2777 /* BNE */
2778 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2779 break;
2780 case 0x3E:
2781 /* BGE */
2782 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2783 break;
2784 case 0x3F:
2785 /* BGT */
2786 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2787 break;
2788 invalid_opc:
2789 ret = gen_invalid(ctx);
2790 break;
2793 return ret;
2796 static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
2797 TranslationBlock *tb,
2798 bool search_pc)
2800 CPUState *cs = CPU(cpu);
2801 CPUAlphaState *env = &cpu->env;
2802 DisasContext ctx, *ctxp = &ctx;
2803 target_ulong pc_start;
2804 target_ulong pc_mask;
2805 uint32_t insn;
2806 CPUBreakpoint *bp;
2807 int j, lj = -1;
2808 ExitStatus ret;
2809 int num_insns;
2810 int max_insns;
2812 pc_start = tb->pc;
2814 ctx.tb = tb;
2815 ctx.pc = pc_start;
2816 ctx.mem_idx = cpu_mmu_index(env);
2817 ctx.implver = env->implver;
2818 ctx.singlestep_enabled = cs->singlestep_enabled;
2820 /* ??? Every TB begins with unset rounding mode, to be initialized on
2821 the first fp insn of the TB. Alternately we could define a proper
2822 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2823 to reset the FP_STATUS to that default at the end of any TB that
2824 changes the default. We could even (gasp) dynamiclly figure out
2825 what default would be most efficient given the running program. */
2826 ctx.tb_rm = -1;
2827 /* Similarly for flush-to-zero. */
2828 ctx.tb_ftz = -1;
2830 num_insns = 0;
2831 max_insns = tb->cflags & CF_COUNT_MASK;
2832 if (max_insns == 0) {
2833 max_insns = CF_COUNT_MASK;
2836 if (in_superpage(&ctx, pc_start)) {
2837 pc_mask = (1ULL << 41) - 1;
2838 } else {
2839 pc_mask = ~TARGET_PAGE_MASK;
2842 gen_tb_start(tb);
2843 do {
2844 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
2845 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
2846 if (bp->pc == ctx.pc) {
2847 gen_excp(&ctx, EXCP_DEBUG, 0);
2848 break;
2852 if (search_pc) {
2853 j = tcg_op_buf_count();
2854 if (lj < j) {
2855 lj++;
2856 while (lj < j) {
2857 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2860 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
2861 tcg_ctx.gen_opc_instr_start[lj] = 1;
2862 tcg_ctx.gen_opc_icount[lj] = num_insns;
2864 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
2865 gen_io_start();
2867 insn = cpu_ldl_code(env, ctx.pc);
2868 num_insns++;
2870 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
2871 tcg_gen_debug_insn_start(ctx.pc);
2874 TCGV_UNUSED_I64(ctx.zero);
2875 TCGV_UNUSED_I64(ctx.sink);
2876 TCGV_UNUSED_I64(ctx.lit);
2878 ctx.pc += 4;
2879 ret = translate_one(ctxp, insn);
2881 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2882 tcg_gen_discard_i64(ctx.sink);
2883 tcg_temp_free(ctx.sink);
2885 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2886 tcg_temp_free(ctx.zero);
2888 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2889 tcg_temp_free(ctx.lit);
2892 /* If we reach a page boundary, are single stepping,
2893 or exhaust instruction count, stop generation. */
2894 if (ret == NO_EXIT
2895 && ((ctx.pc & pc_mask) == 0
2896 || tcg_op_buf_full()
2897 || num_insns >= max_insns
2898 || singlestep
2899 || ctx.singlestep_enabled)) {
2900 ret = EXIT_PC_STALE;
2902 } while (ret == NO_EXIT);
2904 if (tb->cflags & CF_LAST_IO) {
2905 gen_io_end();
2908 switch (ret) {
2909 case EXIT_GOTO_TB:
2910 case EXIT_NORETURN:
2911 break;
2912 case EXIT_PC_STALE:
2913 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2914 /* FALLTHRU */
2915 case EXIT_PC_UPDATED:
2916 if (ctx.singlestep_enabled) {
2917 gen_excp_1(EXCP_DEBUG, 0);
2918 } else {
2919 tcg_gen_exit_tb(0);
2921 break;
2922 default:
2923 abort();
2926 gen_tb_end(tb, num_insns);
2928 if (search_pc) {
2929 j = tcg_op_buf_count();
2930 lj++;
2931 while (lj <= j) {
2932 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2934 } else {
2935 tb->size = ctx.pc - pc_start;
2936 tb->icount = num_insns;
2939 #ifdef DEBUG_DISAS
2940 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2941 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2942 log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
2943 qemu_log("\n");
2945 #endif
2948 void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
2950 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
2953 void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
2955 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
2958 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
2960 env->pc = tcg_ctx.gen_opc_pc[pc_pos];