net: cadence_gem: Make phy respond to broadcast
[qemu.git] / target-alpha / translate.c
blobe7e319b31de913eeff81ca85b0498a294aa291ea
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
23 #include "tcg-op.h"
25 #include "helper.h"
26 #define GEN_HELPER 1
27 #include "helper.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34 #else
35 # define LOG_DISAS(...) do { } while (0)
36 #endif
38 typedef struct DisasContext DisasContext;
39 struct DisasContext {
40 struct TranslationBlock *tb;
41 uint64_t pc;
42 int mem_idx;
44 /* Current rounding mode for this TB. */
45 int tb_rm;
46 /* Current flush-to-zero setting for this TB. */
47 int tb_ftz;
49 /* implver value for this CPU. */
50 int implver;
52 bool singlestep_enabled;
55 /* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
58 typedef enum {
59 NO_EXIT,
61 /* We have emitted one or more goto_tb. No fixup required. */
62 EXIT_GOTO_TB,
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
66 exiting the TB. */
67 EXIT_PC_UPDATED,
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
71 EXIT_PC_STALE,
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
75 EXIT_NORETURN,
76 } ExitStatus;
78 /* global register indexes */
79 static TCGv_ptr cpu_env;
80 static TCGv cpu_ir[31];
81 static TCGv cpu_fir[31];
82 static TCGv cpu_pc;
83 static TCGv cpu_lock_addr;
84 static TCGv cpu_lock_st_addr;
85 static TCGv cpu_lock_value;
86 static TCGv cpu_unique;
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_sysval;
89 static TCGv cpu_usp;
90 #endif
92 /* register names */
93 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
95 #include "exec/gen-icount.h"
97 void alpha_translate_init(void)
99 int i;
100 char *p;
101 static int done_init = 0;
103 if (done_init)
104 return;
106 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108 p = cpu_reg_names;
109 for (i = 0; i < 31; i++) {
110 sprintf(p, "ir%d", i);
111 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
112 offsetof(CPUAlphaState, ir[i]), p);
113 p += (i < 10) ? 4 : 5;
115 sprintf(p, "fir%d", i);
116 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
117 offsetof(CPUAlphaState, fir[i]), p);
118 p += (i < 10) ? 5 : 6;
121 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
122 offsetof(CPUAlphaState, pc), "pc");
124 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
125 offsetof(CPUAlphaState, lock_addr),
126 "lock_addr");
127 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
128 offsetof(CPUAlphaState, lock_st_addr),
129 "lock_st_addr");
130 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
131 offsetof(CPUAlphaState, lock_value),
132 "lock_value");
134 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
135 offsetof(CPUAlphaState, unique), "unique");
136 #ifndef CONFIG_USER_ONLY
137 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
138 offsetof(CPUAlphaState, sysval), "sysval");
139 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
140 offsetof(CPUAlphaState, usp), "usp");
141 #endif
143 done_init = 1;
146 static void gen_excp_1(int exception, int error_code)
148 TCGv_i32 tmp1, tmp2;
150 tmp1 = tcg_const_i32(exception);
151 tmp2 = tcg_const_i32(error_code);
152 gen_helper_excp(cpu_env, tmp1, tmp2);
153 tcg_temp_free_i32(tmp2);
154 tcg_temp_free_i32(tmp1);
157 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
159 tcg_gen_movi_i64(cpu_pc, ctx->pc);
160 gen_excp_1(exception, error_code);
161 return EXIT_NORETURN;
164 static inline ExitStatus gen_invalid(DisasContext *ctx)
166 return gen_excp(ctx, EXCP_OPCDEC, 0);
169 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
171 TCGv_i32 tmp32 = tcg_temp_new_i32();
172 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
173 gen_helper_memory_to_f(t0, tmp32);
174 tcg_temp_free_i32(tmp32);
177 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
179 TCGv tmp = tcg_temp_new();
180 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
181 gen_helper_memory_to_g(t0, tmp);
182 tcg_temp_free(tmp);
185 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
187 TCGv_i32 tmp32 = tcg_temp_new_i32();
188 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
189 gen_helper_memory_to_s(t0, tmp32);
190 tcg_temp_free_i32(tmp32);
193 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
195 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
196 tcg_gen_mov_i64(cpu_lock_addr, t1);
197 tcg_gen_mov_i64(cpu_lock_value, t0);
200 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
202 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
203 tcg_gen_mov_i64(cpu_lock_addr, t1);
204 tcg_gen_mov_i64(cpu_lock_value, t0);
207 static inline void gen_load_mem(DisasContext *ctx,
208 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
209 int flags),
210 int ra, int rb, int32_t disp16, int fp,
211 int clear)
213 TCGv addr, va;
215 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
216 prefetches, which we can treat as nops. No worries about
217 missed exceptions here. */
218 if (unlikely(ra == 31)) {
219 return;
222 addr = tcg_temp_new();
223 if (rb != 31) {
224 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
225 if (clear) {
226 tcg_gen_andi_i64(addr, addr, ~0x7);
228 } else {
229 if (clear) {
230 disp16 &= ~0x7;
232 tcg_gen_movi_i64(addr, disp16);
235 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
236 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
238 tcg_temp_free(addr);
241 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
243 TCGv_i32 tmp32 = tcg_temp_new_i32();
244 gen_helper_f_to_memory(tmp32, t0);
245 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
246 tcg_temp_free_i32(tmp32);
249 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
251 TCGv tmp = tcg_temp_new();
252 gen_helper_g_to_memory(tmp, t0);
253 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
254 tcg_temp_free(tmp);
257 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
259 TCGv_i32 tmp32 = tcg_temp_new_i32();
260 gen_helper_s_to_memory(tmp32, t0);
261 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
262 tcg_temp_free_i32(tmp32);
265 static inline void gen_store_mem(DisasContext *ctx,
266 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
267 int flags),
268 int ra, int rb, int32_t disp16, int fp,
269 int clear)
271 TCGv addr, va;
273 addr = tcg_temp_new();
274 if (rb != 31) {
275 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
276 if (clear) {
277 tcg_gen_andi_i64(addr, addr, ~0x7);
279 } else {
280 if (clear) {
281 disp16 &= ~0x7;
283 tcg_gen_movi_i64(addr, disp16);
286 if (ra == 31) {
287 va = tcg_const_i64(0);
288 } else {
289 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
291 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
293 tcg_temp_free(addr);
294 if (ra == 31) {
295 tcg_temp_free(va);
299 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
300 int32_t disp16, int quad)
302 TCGv addr;
304 if (ra == 31) {
305 /* ??? Don't bother storing anything. The user can't tell
306 the difference, since the zero register always reads zero. */
307 return NO_EXIT;
310 #if defined(CONFIG_USER_ONLY)
311 addr = cpu_lock_st_addr;
312 #else
313 addr = tcg_temp_local_new();
314 #endif
316 if (rb != 31) {
317 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
318 } else {
319 tcg_gen_movi_i64(addr, disp16);
322 #if defined(CONFIG_USER_ONLY)
323 /* ??? This is handled via a complicated version of compare-and-swap
324 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
325 in TCG so that this isn't necessary. */
326 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
327 #else
328 /* ??? In system mode we are never multi-threaded, so CAS can be
329 implemented via a non-atomic load-compare-store sequence. */
331 int lab_fail, lab_done;
332 TCGv val;
334 lab_fail = gen_new_label();
335 lab_done = gen_new_label();
336 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
338 val = tcg_temp_new();
339 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
340 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
342 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
343 quad ? MO_LEQ : MO_LEUL);
344 tcg_gen_movi_i64(cpu_ir[ra], 1);
345 tcg_gen_br(lab_done);
347 gen_set_label(lab_fail);
348 tcg_gen_movi_i64(cpu_ir[ra], 0);
350 gen_set_label(lab_done);
351 tcg_gen_movi_i64(cpu_lock_addr, -1);
353 tcg_temp_free(addr);
354 return NO_EXIT;
356 #endif
359 static bool in_superpage(DisasContext *ctx, int64_t addr)
361 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
362 && addr < 0
363 && ((addr >> 41) & 3) == 2
364 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
367 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
369 /* Suppress goto_tb in the case of single-steping and IO. */
370 if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
371 return false;
373 /* If the destination is in the superpage, the page perms can't change. */
374 if (in_superpage(ctx, dest)) {
375 return true;
377 /* Check for the dest on the same page as the start of the TB. */
378 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
381 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
383 uint64_t dest = ctx->pc + (disp << 2);
385 if (ra != 31) {
386 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
389 /* Notice branch-to-next; used to initialize RA with the PC. */
390 if (disp == 0) {
391 return 0;
392 } else if (use_goto_tb(ctx, dest)) {
393 tcg_gen_goto_tb(0);
394 tcg_gen_movi_i64(cpu_pc, dest);
395 tcg_gen_exit_tb((uintptr_t)ctx->tb);
396 return EXIT_GOTO_TB;
397 } else {
398 tcg_gen_movi_i64(cpu_pc, dest);
399 return EXIT_PC_UPDATED;
403 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
404 TCGv cmp, int32_t disp)
406 uint64_t dest = ctx->pc + (disp << 2);
407 int lab_true = gen_new_label();
409 if (use_goto_tb(ctx, dest)) {
410 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
412 tcg_gen_goto_tb(0);
413 tcg_gen_movi_i64(cpu_pc, ctx->pc);
414 tcg_gen_exit_tb((uintptr_t)ctx->tb);
416 gen_set_label(lab_true);
417 tcg_gen_goto_tb(1);
418 tcg_gen_movi_i64(cpu_pc, dest);
419 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
421 return EXIT_GOTO_TB;
422 } else {
423 TCGv_i64 z = tcg_const_i64(0);
424 TCGv_i64 d = tcg_const_i64(dest);
425 TCGv_i64 p = tcg_const_i64(ctx->pc);
427 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
429 tcg_temp_free_i64(z);
430 tcg_temp_free_i64(d);
431 tcg_temp_free_i64(p);
432 return EXIT_PC_UPDATED;
436 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
437 int32_t disp, int mask)
439 TCGv cmp_tmp;
441 if (unlikely(ra == 31)) {
442 cmp_tmp = tcg_const_i64(0);
443 } else {
444 cmp_tmp = tcg_temp_new();
445 if (mask) {
446 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
447 } else {
448 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
452 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
455 /* Fold -0.0 for comparison with COND. */
457 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
459 uint64_t mzero = 1ull << 63;
461 switch (cond) {
462 case TCG_COND_LE:
463 case TCG_COND_GT:
464 /* For <= or >, the -0.0 value directly compares the way we want. */
465 tcg_gen_mov_i64(dest, src);
466 break;
468 case TCG_COND_EQ:
469 case TCG_COND_NE:
470 /* For == or !=, we can simply mask off the sign bit and compare. */
471 tcg_gen_andi_i64(dest, src, mzero - 1);
472 break;
474 case TCG_COND_GE:
475 case TCG_COND_LT:
476 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
477 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
478 tcg_gen_neg_i64(dest, dest);
479 tcg_gen_and_i64(dest, dest, src);
480 break;
482 default:
483 abort();
487 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
488 int32_t disp)
490 TCGv cmp_tmp;
492 if (unlikely(ra == 31)) {
493 /* Very uncommon case, but easier to optimize it to an integer
494 comparison than continuing with the floating point comparison. */
495 return gen_bcond(ctx, cond, ra, disp, 0);
498 cmp_tmp = tcg_temp_new();
499 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
500 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
503 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
504 int islit, uint8_t lit, int mask)
506 TCGv_i64 c1, z, v1;
508 if (unlikely(rc == 31)) {
509 return;
512 if (ra == 31) {
513 /* Very uncommon case - Do not bother to optimize. */
514 c1 = tcg_const_i64(0);
515 } else if (mask) {
516 c1 = tcg_const_i64(1);
517 tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
518 } else {
519 c1 = cpu_ir[ra];
521 if (islit) {
522 v1 = tcg_const_i64(lit);
523 } else {
524 v1 = cpu_ir[rb];
526 z = tcg_const_i64(0);
528 tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
530 tcg_temp_free_i64(z);
531 if (ra == 31 || mask) {
532 tcg_temp_free_i64(c1);
534 if (islit) {
535 tcg_temp_free_i64(v1);
539 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
541 TCGv_i64 c1, z, v1;
543 if (unlikely(rc == 31)) {
544 return;
547 c1 = tcg_temp_new_i64();
548 if (unlikely(ra == 31)) {
549 tcg_gen_movi_i64(c1, 0);
550 } else {
551 gen_fold_mzero(cond, c1, cpu_fir[ra]);
553 if (rb == 31) {
554 v1 = tcg_const_i64(0);
555 } else {
556 v1 = cpu_fir[rb];
558 z = tcg_const_i64(0);
560 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
562 tcg_temp_free_i64(z);
563 tcg_temp_free_i64(c1);
564 if (rb == 31) {
565 tcg_temp_free_i64(v1);
569 #define QUAL_RM_N 0x080 /* Round mode nearest even */
570 #define QUAL_RM_C 0x000 /* Round mode chopped */
571 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
572 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
573 #define QUAL_RM_MASK 0x0c0
575 #define QUAL_U 0x100 /* Underflow enable (fp output) */
576 #define QUAL_V 0x100 /* Overflow enable (int output) */
577 #define QUAL_S 0x400 /* Software completion enable */
578 #define QUAL_I 0x200 /* Inexact detection enable */
580 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
582 TCGv_i32 tmp;
584 fn11 &= QUAL_RM_MASK;
585 if (fn11 == ctx->tb_rm) {
586 return;
588 ctx->tb_rm = fn11;
590 tmp = tcg_temp_new_i32();
591 switch (fn11) {
592 case QUAL_RM_N:
593 tcg_gen_movi_i32(tmp, float_round_nearest_even);
594 break;
595 case QUAL_RM_C:
596 tcg_gen_movi_i32(tmp, float_round_to_zero);
597 break;
598 case QUAL_RM_M:
599 tcg_gen_movi_i32(tmp, float_round_down);
600 break;
601 case QUAL_RM_D:
602 tcg_gen_ld8u_i32(tmp, cpu_env,
603 offsetof(CPUAlphaState, fpcr_dyn_round));
604 break;
607 #if defined(CONFIG_SOFTFLOAT_INLINE)
608 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
609 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
610 sets the one field. */
611 tcg_gen_st8_i32(tmp, cpu_env,
612 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
613 #else
614 gen_helper_setroundmode(tmp);
615 #endif
617 tcg_temp_free_i32(tmp);
620 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
622 TCGv_i32 tmp;
624 fn11 &= QUAL_U;
625 if (fn11 == ctx->tb_ftz) {
626 return;
628 ctx->tb_ftz = fn11;
630 tmp = tcg_temp_new_i32();
631 if (fn11) {
632 /* Underflow is enabled, use the FPCR setting. */
633 tcg_gen_ld8u_i32(tmp, cpu_env,
634 offsetof(CPUAlphaState, fpcr_flush_to_zero));
635 } else {
636 /* Underflow is disabled, force flush-to-zero. */
637 tcg_gen_movi_i32(tmp, 1);
640 #if defined(CONFIG_SOFTFLOAT_INLINE)
641 tcg_gen_st8_i32(tmp, cpu_env,
642 offsetof(CPUAlphaState, fp_status.flush_to_zero));
643 #else
644 gen_helper_setflushzero(tmp);
645 #endif
647 tcg_temp_free_i32(tmp);
650 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
652 TCGv val;
653 if (reg == 31) {
654 val = tcg_const_i64(0);
655 } else {
656 if ((fn11 & QUAL_S) == 0) {
657 if (is_cmp) {
658 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
659 } else {
660 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
663 val = tcg_temp_new();
664 tcg_gen_mov_i64(val, cpu_fir[reg]);
666 return val;
669 static void gen_fp_exc_clear(void)
671 #if defined(CONFIG_SOFTFLOAT_INLINE)
672 TCGv_i32 zero = tcg_const_i32(0);
673 tcg_gen_st8_i32(zero, cpu_env,
674 offsetof(CPUAlphaState, fp_status.float_exception_flags));
675 tcg_temp_free_i32(zero);
676 #else
677 gen_helper_fp_exc_clear(cpu_env);
678 #endif
681 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
683 /* ??? We ought to be able to do something with imprecise exceptions.
684 E.g. notice we're still in the trap shadow of something within the
685 TB and do not generate the code to signal the exception; end the TB
686 when an exception is forced to arrive, either by consumption of a
687 register value or TRAPB or EXCB. */
688 TCGv_i32 exc = tcg_temp_new_i32();
689 TCGv_i32 reg;
691 #if defined(CONFIG_SOFTFLOAT_INLINE)
692 tcg_gen_ld8u_i32(exc, cpu_env,
693 offsetof(CPUAlphaState, fp_status.float_exception_flags));
694 #else
695 gen_helper_fp_exc_get(exc, cpu_env);
696 #endif
698 if (ignore) {
699 tcg_gen_andi_i32(exc, exc, ~ignore);
702 /* ??? Pass in the regno of the destination so that the helper can
703 set EXC_MASK, which contains a bitmask of destination registers
704 that have caused arithmetic traps. A simple userspace emulation
705 does not require this. We do need it for a guest kernel's entArith,
706 or if we were to do something clever with imprecise exceptions. */
707 reg = tcg_const_i32(rc + 32);
709 if (fn11 & QUAL_S) {
710 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
711 } else {
712 gen_helper_fp_exc_raise(cpu_env, exc, reg);
715 tcg_temp_free_i32(reg);
716 tcg_temp_free_i32(exc);
719 static inline void gen_fp_exc_raise(int rc, int fn11)
721 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
724 static void gen_fcvtlq(int rb, int rc)
726 if (unlikely(rc == 31)) {
727 return;
729 if (unlikely(rb == 31)) {
730 tcg_gen_movi_i64(cpu_fir[rc], 0);
731 } else {
732 TCGv tmp = tcg_temp_new();
734 /* The arithmetic right shift here, plus the sign-extended mask below
735 yields a sign-extended result without an explicit ext32s_i64. */
736 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
737 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
738 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
739 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
740 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
742 tcg_temp_free(tmp);
746 static void gen_fcvtql(int rb, int rc)
748 if (unlikely(rc == 31)) {
749 return;
751 if (unlikely(rb == 31)) {
752 tcg_gen_movi_i64(cpu_fir[rc], 0);
753 } else {
754 TCGv tmp = tcg_temp_new();
756 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
757 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
758 tcg_gen_shli_i64(tmp, tmp, 32);
759 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
760 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
762 tcg_temp_free(tmp);
766 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
768 if (rb != 31) {
769 int lab = gen_new_label();
770 TCGv tmp = tcg_temp_new();
772 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
773 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
774 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
776 gen_set_label(lab);
778 gen_fcvtql(rb, rc);
781 #define FARITH2(name) \
782 static inline void glue(gen_f, name)(int rb, int rc) \
784 if (unlikely(rc == 31)) { \
785 return; \
787 if (rb != 31) { \
788 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
789 } else { \
790 TCGv tmp = tcg_const_i64(0); \
791 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
792 tcg_temp_free(tmp); \
796 /* ??? VAX instruction qualifiers ignored. */
797 FARITH2(sqrtf)
798 FARITH2(sqrtg)
799 FARITH2(cvtgf)
800 FARITH2(cvtgq)
801 FARITH2(cvtqf)
802 FARITH2(cvtqg)
804 static void gen_ieee_arith2(DisasContext *ctx,
805 void (*helper)(TCGv, TCGv_ptr, TCGv),
806 int rb, int rc, int fn11)
808 TCGv vb;
810 /* ??? This is wrong: the instruction is not a nop, it still may
811 raise exceptions. */
812 if (unlikely(rc == 31)) {
813 return;
816 gen_qual_roundmode(ctx, fn11);
817 gen_qual_flushzero(ctx, fn11);
818 gen_fp_exc_clear();
820 vb = gen_ieee_input(rb, fn11, 0);
821 helper(cpu_fir[rc], cpu_env, vb);
822 tcg_temp_free(vb);
824 gen_fp_exc_raise(rc, fn11);
827 #define IEEE_ARITH2(name) \
828 static inline void glue(gen_f, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
831 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
833 IEEE_ARITH2(sqrts)
834 IEEE_ARITH2(sqrtt)
835 IEEE_ARITH2(cvtst)
836 IEEE_ARITH2(cvtts)
838 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
840 TCGv vb;
841 int ignore = 0;
843 /* ??? This is wrong: the instruction is not a nop, it still may
844 raise exceptions. */
845 if (unlikely(rc == 31)) {
846 return;
849 /* No need to set flushzero, since we have an integer output. */
850 gen_fp_exc_clear();
851 vb = gen_ieee_input(rb, fn11, 0);
853 /* Almost all integer conversions use cropped rounding, and most
854 also do not have integer overflow enabled. Special case that. */
855 switch (fn11) {
856 case QUAL_RM_C:
857 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
858 break;
859 case QUAL_V | QUAL_RM_C:
860 case QUAL_S | QUAL_V | QUAL_RM_C:
861 ignore = float_flag_inexact;
862 /* FALLTHRU */
863 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
864 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
865 break;
866 default:
867 gen_qual_roundmode(ctx, fn11);
868 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
869 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
870 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
871 break;
873 tcg_temp_free(vb);
875 gen_fp_exc_raise_ignore(rc, fn11, ignore);
878 static void gen_ieee_intcvt(DisasContext *ctx,
879 void (*helper)(TCGv, TCGv_ptr, TCGv),
880 int rb, int rc, int fn11)
882 TCGv vb;
884 /* ??? This is wrong: the instruction is not a nop, it still may
885 raise exceptions. */
886 if (unlikely(rc == 31)) {
887 return;
890 gen_qual_roundmode(ctx, fn11);
892 if (rb == 31) {
893 vb = tcg_const_i64(0);
894 } else {
895 vb = cpu_fir[rb];
898 /* The only exception that can be raised by integer conversion
899 is inexact. Thus we only need to worry about exceptions when
900 inexact handling is requested. */
901 if (fn11 & QUAL_I) {
902 gen_fp_exc_clear();
903 helper(cpu_fir[rc], cpu_env, vb);
904 gen_fp_exc_raise(rc, fn11);
905 } else {
906 helper(cpu_fir[rc], cpu_env, vb);
909 if (rb == 31) {
910 tcg_temp_free(vb);
914 #define IEEE_INTCVT(name) \
915 static inline void glue(gen_f, name)(DisasContext *ctx, \
916 int rb, int rc, int fn11) \
918 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
920 IEEE_INTCVT(cvtqs)
921 IEEE_INTCVT(cvtqt)
923 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
925 TCGv va, vb, vmask;
926 int za = 0, zb = 0;
928 if (unlikely(rc == 31)) {
929 return;
932 vmask = tcg_const_i64(mask);
934 TCGV_UNUSED_I64(va);
935 if (ra == 31) {
936 if (inv_a) {
937 va = vmask;
938 } else {
939 za = 1;
941 } else {
942 va = tcg_temp_new_i64();
943 tcg_gen_mov_i64(va, cpu_fir[ra]);
944 if (inv_a) {
945 tcg_gen_andc_i64(va, vmask, va);
946 } else {
947 tcg_gen_and_i64(va, va, vmask);
951 TCGV_UNUSED_I64(vb);
952 if (rb == 31) {
953 zb = 1;
954 } else {
955 vb = tcg_temp_new_i64();
956 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
959 switch (za << 1 | zb) {
960 case 0 | 0:
961 tcg_gen_or_i64(cpu_fir[rc], va, vb);
962 break;
963 case 0 | 1:
964 tcg_gen_mov_i64(cpu_fir[rc], va);
965 break;
966 case 2 | 0:
967 tcg_gen_mov_i64(cpu_fir[rc], vb);
968 break;
969 case 2 | 1:
970 tcg_gen_movi_i64(cpu_fir[rc], 0);
971 break;
974 tcg_temp_free(vmask);
975 if (ra != 31) {
976 tcg_temp_free(va);
978 if (rb != 31) {
979 tcg_temp_free(vb);
983 static inline void gen_fcpys(int ra, int rb, int rc)
985 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
988 static inline void gen_fcpysn(int ra, int rb, int rc)
990 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
993 static inline void gen_fcpyse(int ra, int rb, int rc)
995 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
998 #define FARITH3(name) \
999 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1001 TCGv va, vb; \
1003 if (unlikely(rc == 31)) { \
1004 return; \
1006 if (ra == 31) { \
1007 va = tcg_const_i64(0); \
1008 } else { \
1009 va = cpu_fir[ra]; \
1011 if (rb == 31) { \
1012 vb = tcg_const_i64(0); \
1013 } else { \
1014 vb = cpu_fir[rb]; \
1017 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1019 if (ra == 31) { \
1020 tcg_temp_free(va); \
1022 if (rb == 31) { \
1023 tcg_temp_free(vb); \
1027 /* ??? VAX instruction qualifiers ignored. */
1028 FARITH3(addf)
1029 FARITH3(subf)
1030 FARITH3(mulf)
1031 FARITH3(divf)
1032 FARITH3(addg)
1033 FARITH3(subg)
1034 FARITH3(mulg)
1035 FARITH3(divg)
1036 FARITH3(cmpgeq)
1037 FARITH3(cmpglt)
1038 FARITH3(cmpgle)
1040 static void gen_ieee_arith3(DisasContext *ctx,
1041 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1042 int ra, int rb, int rc, int fn11)
1044 TCGv va, vb;
1046 /* ??? This is wrong: the instruction is not a nop, it still may
1047 raise exceptions. */
1048 if (unlikely(rc == 31)) {
1049 return;
1052 gen_qual_roundmode(ctx, fn11);
1053 gen_qual_flushzero(ctx, fn11);
1054 gen_fp_exc_clear();
1056 va = gen_ieee_input(ra, fn11, 0);
1057 vb = gen_ieee_input(rb, fn11, 0);
1058 helper(cpu_fir[rc], cpu_env, va, vb);
1059 tcg_temp_free(va);
1060 tcg_temp_free(vb);
1062 gen_fp_exc_raise(rc, fn11);
1065 #define IEEE_ARITH3(name) \
1066 static inline void glue(gen_f, name)(DisasContext *ctx, \
1067 int ra, int rb, int rc, int fn11) \
1069 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1071 IEEE_ARITH3(adds)
1072 IEEE_ARITH3(subs)
1073 IEEE_ARITH3(muls)
1074 IEEE_ARITH3(divs)
1075 IEEE_ARITH3(addt)
1076 IEEE_ARITH3(subt)
1077 IEEE_ARITH3(mult)
1078 IEEE_ARITH3(divt)
1080 static void gen_ieee_compare(DisasContext *ctx,
1081 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1082 int ra, int rb, int rc, int fn11)
1084 TCGv va, vb;
1086 /* ??? This is wrong: the instruction is not a nop, it still may
1087 raise exceptions. */
1088 if (unlikely(rc == 31)) {
1089 return;
1092 gen_fp_exc_clear();
1094 va = gen_ieee_input(ra, fn11, 1);
1095 vb = gen_ieee_input(rb, fn11, 1);
1096 helper(cpu_fir[rc], cpu_env, va, vb);
1097 tcg_temp_free(va);
1098 tcg_temp_free(vb);
1100 gen_fp_exc_raise(rc, fn11);
1103 #define IEEE_CMP3(name) \
1104 static inline void glue(gen_f, name)(DisasContext *ctx, \
1105 int ra, int rb, int rc, int fn11) \
1107 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1109 IEEE_CMP3(cmptun)
1110 IEEE_CMP3(cmpteq)
1111 IEEE_CMP3(cmptlt)
1112 IEEE_CMP3(cmptle)
1114 static inline uint64_t zapnot_mask(uint8_t lit)
1116 uint64_t mask = 0;
1117 int i;
1119 for (i = 0; i < 8; ++i) {
1120 if ((lit >> i) & 1)
1121 mask |= 0xffull << (i * 8);
1123 return mask;
1126 /* Implement zapnot with an immediate operand, which expands to some
1127 form of immediate AND. This is a basic building block in the
1128 definition of many of the other byte manipulation instructions. */
1129 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1131 switch (lit) {
1132 case 0x00:
1133 tcg_gen_movi_i64(dest, 0);
1134 break;
1135 case 0x01:
1136 tcg_gen_ext8u_i64(dest, src);
1137 break;
1138 case 0x03:
1139 tcg_gen_ext16u_i64(dest, src);
1140 break;
1141 case 0x0f:
1142 tcg_gen_ext32u_i64(dest, src);
1143 break;
1144 case 0xff:
1145 tcg_gen_mov_i64(dest, src);
1146 break;
1147 default:
1148 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1149 break;
1153 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1155 if (unlikely(rc == 31))
1156 return;
1157 else if (unlikely(ra == 31))
1158 tcg_gen_movi_i64(cpu_ir[rc], 0);
1159 else if (islit)
1160 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1161 else
1162 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1165 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1167 if (unlikely(rc == 31))
1168 return;
1169 else if (unlikely(ra == 31))
1170 tcg_gen_movi_i64(cpu_ir[rc], 0);
1171 else if (islit)
1172 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1173 else
1174 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1178 /* EXTWH, EXTLH, EXTQH */
1179 static void gen_ext_h(int ra, int rb, int rc, int islit,
1180 uint8_t lit, uint8_t byte_mask)
1182 if (unlikely(rc == 31))
1183 return;
1184 else if (unlikely(ra == 31))
1185 tcg_gen_movi_i64(cpu_ir[rc], 0);
1186 else {
1187 if (islit) {
1188 lit = (64 - (lit & 7) * 8) & 0x3f;
1189 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1190 } else {
1191 TCGv tmp1 = tcg_temp_new();
1192 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1193 tcg_gen_shli_i64(tmp1, tmp1, 3);
1194 tcg_gen_neg_i64(tmp1, tmp1);
1195 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1196 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1197 tcg_temp_free(tmp1);
1199 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1203 /* EXTBL, EXTWL, EXTLL, EXTQL */
1204 static void gen_ext_l(int ra, int rb, int rc, int islit,
1205 uint8_t lit, uint8_t byte_mask)
1207 if (unlikely(rc == 31))
1208 return;
1209 else if (unlikely(ra == 31))
1210 tcg_gen_movi_i64(cpu_ir[rc], 0);
1211 else {
1212 if (islit) {
1213 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1214 } else {
1215 TCGv tmp = tcg_temp_new();
1216 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1217 tcg_gen_shli_i64(tmp, tmp, 3);
1218 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1219 tcg_temp_free(tmp);
1221 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1225 /* INSWH, INSLH, INSQH */
1226 static void gen_ins_h(int ra, int rb, int rc, int islit,
1227 uint8_t lit, uint8_t byte_mask)
1229 if (unlikely(rc == 31))
1230 return;
1231 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1232 tcg_gen_movi_i64(cpu_ir[rc], 0);
1233 else {
1234 TCGv tmp = tcg_temp_new();
1236 /* The instruction description has us left-shift the byte mask
1237 and extract bits <15:8> and apply that zap at the end. This
1238 is equivalent to simply performing the zap first and shifting
1239 afterward. */
1240 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1242 if (islit) {
1243 /* Note that we have handled the lit==0 case above. */
1244 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1245 } else {
1246 TCGv shift = tcg_temp_new();
1248 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1249 Do this portably by splitting the shift into two parts:
1250 shift_count-1 and 1. Arrange for the -1 by using
1251 ones-complement instead of twos-complement in the negation:
1252 ~((B & 7) * 8) & 63. */
1254 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1255 tcg_gen_shli_i64(shift, shift, 3);
1256 tcg_gen_not_i64(shift, shift);
1257 tcg_gen_andi_i64(shift, shift, 0x3f);
1259 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1260 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1261 tcg_temp_free(shift);
1263 tcg_temp_free(tmp);
1267 /* INSBL, INSWL, INSLL, INSQL */
1268 static void gen_ins_l(int ra, int rb, int rc, int islit,
1269 uint8_t lit, uint8_t byte_mask)
1271 if (unlikely(rc == 31))
1272 return;
1273 else if (unlikely(ra == 31))
1274 tcg_gen_movi_i64(cpu_ir[rc], 0);
1275 else {
1276 TCGv tmp = tcg_temp_new();
1278 /* The instruction description has us left-shift the byte mask
1279 the same number of byte slots as the data and apply the zap
1280 at the end. This is equivalent to simply performing the zap
1281 first and shifting afterward. */
1282 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1284 if (islit) {
1285 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1286 } else {
1287 TCGv shift = tcg_temp_new();
1288 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1289 tcg_gen_shli_i64(shift, shift, 3);
1290 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1291 tcg_temp_free(shift);
1293 tcg_temp_free(tmp);
1297 /* MSKWH, MSKLH, MSKQH */
1298 static void gen_msk_h(int ra, int rb, int rc, int islit,
1299 uint8_t lit, uint8_t byte_mask)
1301 if (unlikely(rc == 31))
1302 return;
1303 else if (unlikely(ra == 31))
1304 tcg_gen_movi_i64(cpu_ir[rc], 0);
1305 else if (islit) {
1306 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1307 } else {
1308 TCGv shift = tcg_temp_new();
1309 TCGv mask = tcg_temp_new();
1311 /* The instruction description is as above, where the byte_mask
1312 is shifted left, and then we extract bits <15:8>. This can be
1313 emulated with a right-shift on the expanded byte mask. This
1314 requires extra care because for an input <2:0> == 0 we need a
1315 shift of 64 bits in order to generate a zero. This is done by
1316 splitting the shift into two parts, the variable shift - 1
1317 followed by a constant 1 shift. The code we expand below is
1318 equivalent to ~((B & 7) * 8) & 63. */
1320 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1321 tcg_gen_shli_i64(shift, shift, 3);
1322 tcg_gen_not_i64(shift, shift);
1323 tcg_gen_andi_i64(shift, shift, 0x3f);
1324 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1325 tcg_gen_shr_i64(mask, mask, shift);
1326 tcg_gen_shri_i64(mask, mask, 1);
1328 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1330 tcg_temp_free(mask);
1331 tcg_temp_free(shift);
1335 /* MSKBL, MSKWL, MSKLL, MSKQL */
1336 static void gen_msk_l(int ra, int rb, int rc, int islit,
1337 uint8_t lit, uint8_t byte_mask)
1339 if (unlikely(rc == 31))
1340 return;
1341 else if (unlikely(ra == 31))
1342 tcg_gen_movi_i64(cpu_ir[rc], 0);
1343 else if (islit) {
1344 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1345 } else {
1346 TCGv shift = tcg_temp_new();
1347 TCGv mask = tcg_temp_new();
1349 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1350 tcg_gen_shli_i64(shift, shift, 3);
1351 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1352 tcg_gen_shl_i64(mask, mask, shift);
1354 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1356 tcg_temp_free(mask);
1357 tcg_temp_free(shift);
1361 /* Code to call arith3 helpers */
1362 #define ARITH3(name) \
1363 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1364 uint8_t lit) \
1366 if (unlikely(rc == 31)) \
1367 return; \
1369 if (ra != 31) { \
1370 if (islit) { \
1371 TCGv tmp = tcg_const_i64(lit); \
1372 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1373 tcg_temp_free(tmp); \
1374 } else \
1375 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1376 } else { \
1377 TCGv tmp1 = tcg_const_i64(0); \
1378 if (islit) { \
1379 TCGv tmp2 = tcg_const_i64(lit); \
1380 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1381 tcg_temp_free(tmp2); \
1382 } else \
1383 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1384 tcg_temp_free(tmp1); \
1387 ARITH3(cmpbge)
1388 ARITH3(minub8)
1389 ARITH3(minsb8)
1390 ARITH3(minuw4)
1391 ARITH3(minsw4)
1392 ARITH3(maxub8)
1393 ARITH3(maxsb8)
1394 ARITH3(maxuw4)
1395 ARITH3(maxsw4)
1396 ARITH3(perr)
1398 /* Code to call arith3 helpers */
1399 #define ARITH3_EX(name) \
1400 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1401 int islit, uint8_t lit) \
1403 if (unlikely(rc == 31)) { \
1404 return; \
1406 if (ra != 31) { \
1407 if (islit) { \
1408 TCGv tmp = tcg_const_i64(lit); \
1409 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1410 cpu_ir[ra], tmp); \
1411 tcg_temp_free(tmp); \
1412 } else { \
1413 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1414 cpu_ir[ra], cpu_ir[rb]); \
1416 } else { \
1417 TCGv tmp1 = tcg_const_i64(0); \
1418 if (islit) { \
1419 TCGv tmp2 = tcg_const_i64(lit); \
1420 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1421 tcg_temp_free(tmp2); \
1422 } else { \
1423 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1425 tcg_temp_free(tmp1); \
1428 ARITH3_EX(addlv)
1429 ARITH3_EX(sublv)
1430 ARITH3_EX(addqv)
1431 ARITH3_EX(subqv)
1432 ARITH3_EX(mullv)
1433 ARITH3_EX(mulqv)
1435 #define MVIOP2(name) \
1436 static inline void glue(gen_, name)(int rb, int rc) \
1438 if (unlikely(rc == 31)) \
1439 return; \
1440 if (unlikely(rb == 31)) \
1441 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1442 else \
1443 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1445 MVIOP2(pklb)
1446 MVIOP2(pkwb)
1447 MVIOP2(unpkbl)
1448 MVIOP2(unpkbw)
1450 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1451 int islit, uint8_t lit)
1453 TCGv va, vb;
1455 if (unlikely(rc == 31)) {
1456 return;
1459 if (ra == 31) {
1460 va = tcg_const_i64(0);
1461 } else {
1462 va = cpu_ir[ra];
1464 if (islit) {
1465 vb = tcg_const_i64(lit);
1466 } else {
1467 vb = cpu_ir[rb];
1470 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1472 if (ra == 31) {
1473 tcg_temp_free(va);
1475 if (islit) {
1476 tcg_temp_free(vb);
1480 static void gen_rx(int ra, int set)
1482 TCGv_i32 tmp;
1484 if (ra != 31) {
1485 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1488 tmp = tcg_const_i32(set);
1489 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1490 tcg_temp_free_i32(tmp);
1493 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1495 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1496 to internal cpu registers. */
1498 /* Unprivileged PAL call */
1499 if (palcode >= 0x80 && palcode < 0xC0) {
1500 switch (palcode) {
1501 case 0x86:
1502 /* IMB */
1503 /* No-op inside QEMU. */
1504 break;
1505 case 0x9E:
1506 /* RDUNIQUE */
1507 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1508 break;
1509 case 0x9F:
1510 /* WRUNIQUE */
1511 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1512 break;
1513 default:
1514 palcode &= 0xbf;
1515 goto do_call_pal;
1517 return NO_EXIT;
1520 #ifndef CONFIG_USER_ONLY
1521 /* Privileged PAL code */
1522 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1523 switch (palcode) {
1524 case 0x01:
1525 /* CFLUSH */
1526 /* No-op inside QEMU. */
1527 break;
1528 case 0x02:
1529 /* DRAINA */
1530 /* No-op inside QEMU. */
1531 break;
1532 case 0x2D:
1533 /* WRVPTPTR */
1534 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
1535 break;
1536 case 0x31:
1537 /* WRVAL */
1538 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1539 break;
1540 case 0x32:
1541 /* RDVAL */
1542 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1543 break;
1545 case 0x35: {
1546 /* SWPIPL */
1547 TCGv tmp;
1549 /* Note that we already know we're in kernel mode, so we know
1550 that PS only contains the 3 IPL bits. */
1551 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1553 /* But make sure and store only the 3 IPL bits from the user. */
1554 tmp = tcg_temp_new();
1555 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1556 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1557 tcg_temp_free(tmp);
1558 break;
1561 case 0x36:
1562 /* RDPS */
1563 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1564 break;
1565 case 0x38:
1566 /* WRUSP */
1567 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1568 break;
1569 case 0x3A:
1570 /* RDUSP */
1571 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1572 break;
1573 case 0x3C:
1574 /* WHAMI */
1575 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1576 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1577 break;
1579 default:
1580 palcode &= 0x3f;
1581 goto do_call_pal;
1583 return NO_EXIT;
1585 #endif
1586 return gen_invalid(ctx);
1588 do_call_pal:
1589 #ifdef CONFIG_USER_ONLY
1590 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1591 #else
1593 TCGv pc = tcg_const_i64(ctx->pc);
1594 TCGv entry = tcg_const_i64(palcode & 0x80
1595 ? 0x2000 + (palcode - 0x80) * 64
1596 : 0x1000 + palcode * 64);
1598 gen_helper_call_pal(cpu_env, pc, entry);
1600 tcg_temp_free(entry);
1601 tcg_temp_free(pc);
1603 /* Since the destination is running in PALmode, we don't really
1604 need the page permissions check. We'll see the existence of
1605 the page when we create the TB, and we'll flush all TBs if
1606 we change the PAL base register. */
1607 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1608 tcg_gen_goto_tb(0);
1609 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1610 return EXIT_GOTO_TB;
1613 return EXIT_PC_UPDATED;
1615 #endif
1618 #ifndef CONFIG_USER_ONLY
1620 #define PR_BYTE 0x100000
1621 #define PR_LONG 0x200000
1623 static int cpu_pr_data(int pr)
1625 switch (pr) {
1626 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1627 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1628 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1629 case 3: return offsetof(CPUAlphaState, trap_arg0);
1630 case 4: return offsetof(CPUAlphaState, trap_arg1);
1631 case 5: return offsetof(CPUAlphaState, trap_arg2);
1632 case 6: return offsetof(CPUAlphaState, exc_addr);
1633 case 7: return offsetof(CPUAlphaState, palbr);
1634 case 8: return offsetof(CPUAlphaState, ptbr);
1635 case 9: return offsetof(CPUAlphaState, vptptr);
1636 case 10: return offsetof(CPUAlphaState, unique);
1637 case 11: return offsetof(CPUAlphaState, sysval);
1638 case 12: return offsetof(CPUAlphaState, usp);
1640 case 32 ... 39:
1641 return offsetof(CPUAlphaState, shadow[pr - 32]);
1642 case 40 ... 63:
1643 return offsetof(CPUAlphaState, scratch[pr - 40]);
1645 case 251:
1646 return offsetof(CPUAlphaState, alarm_expire);
1648 return 0;
1651 static ExitStatus gen_mfpr(int ra, int regno)
1653 int data = cpu_pr_data(regno);
1655 /* In our emulated PALcode, these processor registers have no
1656 side effects from reading. */
1657 if (ra == 31) {
1658 return NO_EXIT;
1661 /* Special help for VMTIME and WALLTIME. */
1662 if (regno == 250 || regno == 249) {
1663 void (*helper)(TCGv) = gen_helper_get_walltime;
1664 if (regno == 249) {
1665 helper = gen_helper_get_vmtime;
1667 if (use_icount) {
1668 gen_io_start();
1669 helper(cpu_ir[ra]);
1670 gen_io_end();
1671 return EXIT_PC_STALE;
1672 } else {
1673 helper(cpu_ir[ra]);
1674 return NO_EXIT;
1678 /* The basic registers are data only, and unknown registers
1679 are read-zero, write-ignore. */
1680 if (data == 0) {
1681 tcg_gen_movi_i64(cpu_ir[ra], 0);
1682 } else if (data & PR_BYTE) {
1683 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1684 } else if (data & PR_LONG) {
1685 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1686 } else {
1687 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1689 return NO_EXIT;
1692 static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1694 TCGv tmp;
1695 int data;
1697 if (rb == 31) {
1698 tmp = tcg_const_i64(0);
1699 } else {
1700 tmp = cpu_ir[rb];
1703 switch (regno) {
1704 case 255:
1705 /* TBIA */
1706 gen_helper_tbia(cpu_env);
1707 break;
1709 case 254:
1710 /* TBIS */
1711 gen_helper_tbis(cpu_env, tmp);
1712 break;
1714 case 253:
1715 /* WAIT */
1716 tmp = tcg_const_i64(1);
1717 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1718 offsetof(CPUState, halted));
1719 return gen_excp(ctx, EXCP_HLT, 0);
1721 case 252:
1722 /* HALT */
1723 gen_helper_halt(tmp);
1724 return EXIT_PC_STALE;
1726 case 251:
1727 /* ALARM */
1728 gen_helper_set_alarm(cpu_env, tmp);
1729 break;
1731 case 7:
1732 /* PALBR */
1733 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
1734 /* Changing the PAL base register implies un-chaining all of the TBs
1735 that ended with a CALL_PAL. Since the base register usually only
1736 changes during boot, flushing everything works well. */
1737 gen_helper_tb_flush(cpu_env);
1738 return EXIT_PC_STALE;
1740 default:
1741 /* The basic registers are data only, and unknown registers
1742 are read-zero, write-ignore. */
1743 data = cpu_pr_data(regno);
1744 if (data != 0) {
1745 if (data & PR_BYTE) {
1746 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1747 } else if (data & PR_LONG) {
1748 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1749 } else {
1750 tcg_gen_st_i64(tmp, cpu_env, data);
1753 break;
1756 if (rb == 31) {
1757 tcg_temp_free(tmp);
1760 return NO_EXIT;
1762 #endif /* !USER_ONLY*/
1764 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1766 uint32_t palcode;
1767 int32_t disp21, disp16;
1768 #ifndef CONFIG_USER_ONLY
1769 int32_t disp12;
1770 #endif
1771 uint16_t fn11;
1772 uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
1773 uint8_t lit;
1774 ExitStatus ret;
1776 /* Decode all instruction fields */
1777 opc = insn >> 26;
1778 ra = (insn >> 21) & 0x1F;
1779 rb = (insn >> 16) & 0x1F;
1780 rc = insn & 0x1F;
1781 real_islit = islit = (insn >> 12) & 1;
1782 if (rb == 31 && !islit) {
1783 islit = 1;
1784 lit = 0;
1785 } else
1786 lit = (insn >> 13) & 0xFF;
1787 palcode = insn & 0x03FFFFFF;
1788 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1789 disp16 = (int16_t)(insn & 0x0000FFFF);
1790 #ifndef CONFIG_USER_ONLY
1791 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1792 #endif
1793 fn11 = (insn >> 5) & 0x000007FF;
1794 fpfn = fn11 & 0x3F;
1795 fn7 = (insn >> 5) & 0x0000007F;
1796 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1797 opc, ra, rb, rc, disp16);
1799 ret = NO_EXIT;
1800 switch (opc) {
1801 case 0x00:
1802 /* CALL_PAL */
1803 ret = gen_call_pal(ctx, palcode);
1804 break;
1805 case 0x01:
1806 /* OPC01 */
1807 goto invalid_opc;
1808 case 0x02:
1809 /* OPC02 */
1810 goto invalid_opc;
1811 case 0x03:
1812 /* OPC03 */
1813 goto invalid_opc;
1814 case 0x04:
1815 /* OPC04 */
1816 goto invalid_opc;
1817 case 0x05:
1818 /* OPC05 */
1819 goto invalid_opc;
1820 case 0x06:
1821 /* OPC06 */
1822 goto invalid_opc;
1823 case 0x07:
1824 /* OPC07 */
1825 goto invalid_opc;
1826 case 0x08:
1827 /* LDA */
1828 if (likely(ra != 31)) {
1829 if (rb != 31)
1830 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1831 else
1832 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1834 break;
1835 case 0x09:
1836 /* LDAH */
1837 if (likely(ra != 31)) {
1838 if (rb != 31)
1839 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1840 else
1841 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1843 break;
1844 case 0x0A:
1845 /* LDBU */
1846 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1847 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1848 break;
1850 goto invalid_opc;
1851 case 0x0B:
1852 /* LDQ_U */
1853 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1854 break;
1855 case 0x0C:
1856 /* LDWU */
1857 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
1858 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1859 break;
1861 goto invalid_opc;
1862 case 0x0D:
1863 /* STW */
1864 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1865 break;
1866 case 0x0E:
1867 /* STB */
1868 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1869 break;
1870 case 0x0F:
1871 /* STQ_U */
1872 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1873 break;
1874 case 0x10:
1875 switch (fn7) {
1876 case 0x00:
1877 /* ADDL */
1878 if (likely(rc != 31)) {
1879 if (ra != 31) {
1880 if (islit) {
1881 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1882 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1883 } else {
1884 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1885 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1887 } else {
1888 if (islit)
1889 tcg_gen_movi_i64(cpu_ir[rc], lit);
1890 else
1891 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1894 break;
1895 case 0x02:
1896 /* S4ADDL */
1897 if (likely(rc != 31)) {
1898 if (ra != 31) {
1899 TCGv tmp = tcg_temp_new();
1900 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1901 if (islit)
1902 tcg_gen_addi_i64(tmp, tmp, lit);
1903 else
1904 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1905 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1906 tcg_temp_free(tmp);
1907 } else {
1908 if (islit)
1909 tcg_gen_movi_i64(cpu_ir[rc], lit);
1910 else
1911 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1914 break;
1915 case 0x09:
1916 /* SUBL */
1917 if (likely(rc != 31)) {
1918 if (ra != 31) {
1919 if (islit)
1920 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1921 else
1922 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1923 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1924 } else {
1925 if (islit)
1926 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1927 else {
1928 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1929 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1933 break;
1934 case 0x0B:
1935 /* S4SUBL */
1936 if (likely(rc != 31)) {
1937 if (ra != 31) {
1938 TCGv tmp = tcg_temp_new();
1939 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1940 if (islit)
1941 tcg_gen_subi_i64(tmp, tmp, lit);
1942 else
1943 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1944 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1945 tcg_temp_free(tmp);
1946 } else {
1947 if (islit)
1948 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1949 else {
1950 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1951 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1955 break;
1956 case 0x0F:
1957 /* CMPBGE */
1958 gen_cmpbge(ra, rb, rc, islit, lit);
1959 break;
1960 case 0x12:
1961 /* S8ADDL */
1962 if (likely(rc != 31)) {
1963 if (ra != 31) {
1964 TCGv tmp = tcg_temp_new();
1965 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1966 if (islit)
1967 tcg_gen_addi_i64(tmp, tmp, lit);
1968 else
1969 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1970 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1971 tcg_temp_free(tmp);
1972 } else {
1973 if (islit)
1974 tcg_gen_movi_i64(cpu_ir[rc], lit);
1975 else
1976 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1979 break;
1980 case 0x1B:
1981 /* S8SUBL */
1982 if (likely(rc != 31)) {
1983 if (ra != 31) {
1984 TCGv tmp = tcg_temp_new();
1985 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1986 if (islit)
1987 tcg_gen_subi_i64(tmp, tmp, lit);
1988 else
1989 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1990 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1991 tcg_temp_free(tmp);
1992 } else {
1993 if (islit)
1994 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1995 else {
1996 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1997 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2001 break;
2002 case 0x1D:
2003 /* CMPULT */
2004 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
2005 break;
2006 case 0x20:
2007 /* ADDQ */
2008 if (likely(rc != 31)) {
2009 if (ra != 31) {
2010 if (islit)
2011 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2012 else
2013 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2014 } else {
2015 if (islit)
2016 tcg_gen_movi_i64(cpu_ir[rc], lit);
2017 else
2018 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2021 break;
2022 case 0x22:
2023 /* S4ADDQ */
2024 if (likely(rc != 31)) {
2025 if (ra != 31) {
2026 TCGv tmp = tcg_temp_new();
2027 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2028 if (islit)
2029 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2030 else
2031 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2032 tcg_temp_free(tmp);
2033 } else {
2034 if (islit)
2035 tcg_gen_movi_i64(cpu_ir[rc], lit);
2036 else
2037 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2040 break;
2041 case 0x29:
2042 /* SUBQ */
2043 if (likely(rc != 31)) {
2044 if (ra != 31) {
2045 if (islit)
2046 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2047 else
2048 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2049 } else {
2050 if (islit)
2051 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2052 else
2053 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2056 break;
2057 case 0x2B:
2058 /* S4SUBQ */
2059 if (likely(rc != 31)) {
2060 if (ra != 31) {
2061 TCGv tmp = tcg_temp_new();
2062 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
2063 if (islit)
2064 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2065 else
2066 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2067 tcg_temp_free(tmp);
2068 } else {
2069 if (islit)
2070 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2071 else
2072 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2075 break;
2076 case 0x2D:
2077 /* CMPEQ */
2078 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
2079 break;
2080 case 0x32:
2081 /* S8ADDQ */
2082 if (likely(rc != 31)) {
2083 if (ra != 31) {
2084 TCGv tmp = tcg_temp_new();
2085 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2086 if (islit)
2087 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
2088 else
2089 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2090 tcg_temp_free(tmp);
2091 } else {
2092 if (islit)
2093 tcg_gen_movi_i64(cpu_ir[rc], lit);
2094 else
2095 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2098 break;
2099 case 0x3B:
2100 /* S8SUBQ */
2101 if (likely(rc != 31)) {
2102 if (ra != 31) {
2103 TCGv tmp = tcg_temp_new();
2104 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
2105 if (islit)
2106 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
2107 else
2108 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
2109 tcg_temp_free(tmp);
2110 } else {
2111 if (islit)
2112 tcg_gen_movi_i64(cpu_ir[rc], -lit);
2113 else
2114 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
2117 break;
2118 case 0x3D:
2119 /* CMPULE */
2120 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
2121 break;
2122 case 0x40:
2123 /* ADDL/V */
2124 gen_addlv(ra, rb, rc, islit, lit);
2125 break;
2126 case 0x49:
2127 /* SUBL/V */
2128 gen_sublv(ra, rb, rc, islit, lit);
2129 break;
2130 case 0x4D:
2131 /* CMPLT */
2132 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
2133 break;
2134 case 0x60:
2135 /* ADDQ/V */
2136 gen_addqv(ra, rb, rc, islit, lit);
2137 break;
2138 case 0x69:
2139 /* SUBQ/V */
2140 gen_subqv(ra, rb, rc, islit, lit);
2141 break;
2142 case 0x6D:
2143 /* CMPLE */
2144 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
2145 break;
2146 default:
2147 goto invalid_opc;
2149 break;
2150 case 0x11:
2151 switch (fn7) {
2152 case 0x00:
2153 /* AND */
2154 if (likely(rc != 31)) {
2155 if (ra == 31)
2156 tcg_gen_movi_i64(cpu_ir[rc], 0);
2157 else if (islit)
2158 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
2159 else
2160 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2162 break;
2163 case 0x08:
2164 /* BIC */
2165 if (likely(rc != 31)) {
2166 if (ra != 31) {
2167 if (islit)
2168 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2169 else
2170 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2171 } else
2172 tcg_gen_movi_i64(cpu_ir[rc], 0);
2174 break;
2175 case 0x14:
2176 /* CMOVLBS */
2177 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
2178 break;
2179 case 0x16:
2180 /* CMOVLBC */
2181 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
2182 break;
2183 case 0x20:
2184 /* BIS */
2185 if (likely(rc != 31)) {
2186 if (ra != 31) {
2187 if (islit)
2188 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2189 else
2190 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2191 } else {
2192 if (islit)
2193 tcg_gen_movi_i64(cpu_ir[rc], lit);
2194 else
2195 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2198 break;
2199 case 0x24:
2200 /* CMOVEQ */
2201 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
2202 break;
2203 case 0x26:
2204 /* CMOVNE */
2205 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
2206 break;
2207 case 0x28:
2208 /* ORNOT */
2209 if (likely(rc != 31)) {
2210 if (ra != 31) {
2211 if (islit)
2212 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2213 else
2214 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2215 } else {
2216 if (islit)
2217 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2218 else
2219 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2222 break;
2223 case 0x40:
2224 /* XOR */
2225 if (likely(rc != 31)) {
2226 if (ra != 31) {
2227 if (islit)
2228 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
2229 else
2230 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2231 } else {
2232 if (islit)
2233 tcg_gen_movi_i64(cpu_ir[rc], lit);
2234 else
2235 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
2238 break;
2239 case 0x44:
2240 /* CMOVLT */
2241 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
2242 break;
2243 case 0x46:
2244 /* CMOVGE */
2245 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
2246 break;
2247 case 0x48:
2248 /* EQV */
2249 if (likely(rc != 31)) {
2250 if (ra != 31) {
2251 if (islit)
2252 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
2253 else
2254 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2255 } else {
2256 if (islit)
2257 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
2258 else
2259 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
2262 break;
2263 case 0x61:
2264 /* AMASK */
2265 if (likely(rc != 31)) {
2266 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
2268 if (islit) {
2269 tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
2270 } else {
2271 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
2274 break;
2275 case 0x64:
2276 /* CMOVLE */
2277 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
2278 break;
2279 case 0x66:
2280 /* CMOVGT */
2281 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
2282 break;
2283 case 0x6C:
2284 /* IMPLVER */
2285 if (rc != 31) {
2286 tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
2288 break;
2289 default:
2290 goto invalid_opc;
2292 break;
2293 case 0x12:
2294 switch (fn7) {
2295 case 0x02:
2296 /* MSKBL */
2297 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
2298 break;
2299 case 0x06:
2300 /* EXTBL */
2301 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
2302 break;
2303 case 0x0B:
2304 /* INSBL */
2305 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
2306 break;
2307 case 0x12:
2308 /* MSKWL */
2309 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
2310 break;
2311 case 0x16:
2312 /* EXTWL */
2313 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
2314 break;
2315 case 0x1B:
2316 /* INSWL */
2317 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
2318 break;
2319 case 0x22:
2320 /* MSKLL */
2321 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
2322 break;
2323 case 0x26:
2324 /* EXTLL */
2325 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2326 break;
2327 case 0x2B:
2328 /* INSLL */
2329 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2330 break;
2331 case 0x30:
2332 /* ZAP */
2333 gen_zap(ra, rb, rc, islit, lit);
2334 break;
2335 case 0x31:
2336 /* ZAPNOT */
2337 gen_zapnot(ra, rb, rc, islit, lit);
2338 break;
2339 case 0x32:
2340 /* MSKQL */
2341 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2342 break;
2343 case 0x34:
2344 /* SRL */
2345 if (likely(rc != 31)) {
2346 if (ra != 31) {
2347 if (islit)
2348 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2349 else {
2350 TCGv shift = tcg_temp_new();
2351 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2352 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2353 tcg_temp_free(shift);
2355 } else
2356 tcg_gen_movi_i64(cpu_ir[rc], 0);
2358 break;
2359 case 0x36:
2360 /* EXTQL */
2361 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2362 break;
2363 case 0x39:
2364 /* SLL */
2365 if (likely(rc != 31)) {
2366 if (ra != 31) {
2367 if (islit)
2368 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2369 else {
2370 TCGv shift = tcg_temp_new();
2371 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2372 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2373 tcg_temp_free(shift);
2375 } else
2376 tcg_gen_movi_i64(cpu_ir[rc], 0);
2378 break;
2379 case 0x3B:
2380 /* INSQL */
2381 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2382 break;
2383 case 0x3C:
2384 /* SRA */
2385 if (likely(rc != 31)) {
2386 if (ra != 31) {
2387 if (islit)
2388 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2389 else {
2390 TCGv shift = tcg_temp_new();
2391 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2392 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2393 tcg_temp_free(shift);
2395 } else
2396 tcg_gen_movi_i64(cpu_ir[rc], 0);
2398 break;
2399 case 0x52:
2400 /* MSKWH */
2401 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2402 break;
2403 case 0x57:
2404 /* INSWH */
2405 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2406 break;
2407 case 0x5A:
2408 /* EXTWH */
2409 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2410 break;
2411 case 0x62:
2412 /* MSKLH */
2413 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2414 break;
2415 case 0x67:
2416 /* INSLH */
2417 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2418 break;
2419 case 0x6A:
2420 /* EXTLH */
2421 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2422 break;
2423 case 0x72:
2424 /* MSKQH */
2425 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2426 break;
2427 case 0x77:
2428 /* INSQH */
2429 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2430 break;
2431 case 0x7A:
2432 /* EXTQH */
2433 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2434 break;
2435 default:
2436 goto invalid_opc;
2438 break;
2439 case 0x13:
2440 switch (fn7) {
2441 case 0x00:
2442 /* MULL */
2443 if (likely(rc != 31)) {
2444 if (ra == 31)
2445 tcg_gen_movi_i64(cpu_ir[rc], 0);
2446 else {
2447 if (islit)
2448 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2449 else
2450 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2451 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2454 break;
2455 case 0x20:
2456 /* MULQ */
2457 if (likely(rc != 31)) {
2458 if (ra == 31)
2459 tcg_gen_movi_i64(cpu_ir[rc], 0);
2460 else if (islit)
2461 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2462 else
2463 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2465 break;
2466 case 0x30:
2467 /* UMULH */
2469 TCGv low;
2470 if (unlikely(rc == 31)){
2471 break;
2473 if (ra == 31) {
2474 tcg_gen_movi_i64(cpu_ir[rc], 0);
2475 break;
2477 low = tcg_temp_new();
2478 if (islit) {
2479 tcg_gen_movi_tl(low, lit);
2480 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
2481 } else {
2482 tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2484 tcg_temp_free(low);
2486 break;
2487 case 0x40:
2488 /* MULL/V */
2489 gen_mullv(ra, rb, rc, islit, lit);
2490 break;
2491 case 0x60:
2492 /* MULQ/V */
2493 gen_mulqv(ra, rb, rc, islit, lit);
2494 break;
2495 default:
2496 goto invalid_opc;
2498 break;
2499 case 0x14:
2500 switch (fpfn) { /* fn11 & 0x3F */
2501 case 0x04:
2502 /* ITOFS */
2503 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2504 goto invalid_opc;
2506 if (likely(rc != 31)) {
2507 if (ra != 31) {
2508 TCGv_i32 tmp = tcg_temp_new_i32();
2509 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2510 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2511 tcg_temp_free_i32(tmp);
2512 } else
2513 tcg_gen_movi_i64(cpu_fir[rc], 0);
2515 break;
2516 case 0x0A:
2517 /* SQRTF */
2518 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2519 gen_fsqrtf(rb, rc);
2520 break;
2522 goto invalid_opc;
2523 case 0x0B:
2524 /* SQRTS */
2525 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2526 gen_fsqrts(ctx, rb, rc, fn11);
2527 break;
2529 goto invalid_opc;
2530 case 0x14:
2531 /* ITOFF */
2532 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2533 goto invalid_opc;
2535 if (likely(rc != 31)) {
2536 if (ra != 31) {
2537 TCGv_i32 tmp = tcg_temp_new_i32();
2538 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2539 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2540 tcg_temp_free_i32(tmp);
2541 } else
2542 tcg_gen_movi_i64(cpu_fir[rc], 0);
2544 break;
2545 case 0x24:
2546 /* ITOFT */
2547 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
2548 goto invalid_opc;
2550 if (likely(rc != 31)) {
2551 if (ra != 31)
2552 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2553 else
2554 tcg_gen_movi_i64(cpu_fir[rc], 0);
2556 break;
2557 case 0x2A:
2558 /* SQRTG */
2559 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2560 gen_fsqrtg(rb, rc);
2561 break;
2563 goto invalid_opc;
2564 case 0x02B:
2565 /* SQRTT */
2566 if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
2567 gen_fsqrtt(ctx, rb, rc, fn11);
2568 break;
2570 goto invalid_opc;
2571 default:
2572 goto invalid_opc;
2574 break;
2575 case 0x15:
2576 /* VAX floating point */
2577 /* XXX: rounding mode and trap are ignored (!) */
2578 switch (fpfn) { /* fn11 & 0x3F */
2579 case 0x00:
2580 /* ADDF */
2581 gen_faddf(ra, rb, rc);
2582 break;
2583 case 0x01:
2584 /* SUBF */
2585 gen_fsubf(ra, rb, rc);
2586 break;
2587 case 0x02:
2588 /* MULF */
2589 gen_fmulf(ra, rb, rc);
2590 break;
2591 case 0x03:
2592 /* DIVF */
2593 gen_fdivf(ra, rb, rc);
2594 break;
2595 case 0x1E:
2596 /* CVTDG */
2597 #if 0 // TODO
2598 gen_fcvtdg(rb, rc);
2599 #else
2600 goto invalid_opc;
2601 #endif
2602 break;
2603 case 0x20:
2604 /* ADDG */
2605 gen_faddg(ra, rb, rc);
2606 break;
2607 case 0x21:
2608 /* SUBG */
2609 gen_fsubg(ra, rb, rc);
2610 break;
2611 case 0x22:
2612 /* MULG */
2613 gen_fmulg(ra, rb, rc);
2614 break;
2615 case 0x23:
2616 /* DIVG */
2617 gen_fdivg(ra, rb, rc);
2618 break;
2619 case 0x25:
2620 /* CMPGEQ */
2621 gen_fcmpgeq(ra, rb, rc);
2622 break;
2623 case 0x26:
2624 /* CMPGLT */
2625 gen_fcmpglt(ra, rb, rc);
2626 break;
2627 case 0x27:
2628 /* CMPGLE */
2629 gen_fcmpgle(ra, rb, rc);
2630 break;
2631 case 0x2C:
2632 /* CVTGF */
2633 gen_fcvtgf(rb, rc);
2634 break;
2635 case 0x2D:
2636 /* CVTGD */
2637 #if 0 // TODO
2638 gen_fcvtgd(rb, rc);
2639 #else
2640 goto invalid_opc;
2641 #endif
2642 break;
2643 case 0x2F:
2644 /* CVTGQ */
2645 gen_fcvtgq(rb, rc);
2646 break;
2647 case 0x3C:
2648 /* CVTQF */
2649 gen_fcvtqf(rb, rc);
2650 break;
2651 case 0x3E:
2652 /* CVTQG */
2653 gen_fcvtqg(rb, rc);
2654 break;
2655 default:
2656 goto invalid_opc;
2658 break;
2659 case 0x16:
2660 /* IEEE floating-point */
2661 switch (fpfn) { /* fn11 & 0x3F */
2662 case 0x00:
2663 /* ADDS */
2664 gen_fadds(ctx, ra, rb, rc, fn11);
2665 break;
2666 case 0x01:
2667 /* SUBS */
2668 gen_fsubs(ctx, ra, rb, rc, fn11);
2669 break;
2670 case 0x02:
2671 /* MULS */
2672 gen_fmuls(ctx, ra, rb, rc, fn11);
2673 break;
2674 case 0x03:
2675 /* DIVS */
2676 gen_fdivs(ctx, ra, rb, rc, fn11);
2677 break;
2678 case 0x20:
2679 /* ADDT */
2680 gen_faddt(ctx, ra, rb, rc, fn11);
2681 break;
2682 case 0x21:
2683 /* SUBT */
2684 gen_fsubt(ctx, ra, rb, rc, fn11);
2685 break;
2686 case 0x22:
2687 /* MULT */
2688 gen_fmult(ctx, ra, rb, rc, fn11);
2689 break;
2690 case 0x23:
2691 /* DIVT */
2692 gen_fdivt(ctx, ra, rb, rc, fn11);
2693 break;
2694 case 0x24:
2695 /* CMPTUN */
2696 gen_fcmptun(ctx, ra, rb, rc, fn11);
2697 break;
2698 case 0x25:
2699 /* CMPTEQ */
2700 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2701 break;
2702 case 0x26:
2703 /* CMPTLT */
2704 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2705 break;
2706 case 0x27:
2707 /* CMPTLE */
2708 gen_fcmptle(ctx, ra, rb, rc, fn11);
2709 break;
2710 case 0x2C:
2711 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2712 /* CVTST */
2713 gen_fcvtst(ctx, rb, rc, fn11);
2714 } else {
2715 /* CVTTS */
2716 gen_fcvtts(ctx, rb, rc, fn11);
2718 break;
2719 case 0x2F:
2720 /* CVTTQ */
2721 gen_fcvttq(ctx, rb, rc, fn11);
2722 break;
2723 case 0x3C:
2724 /* CVTQS */
2725 gen_fcvtqs(ctx, rb, rc, fn11);
2726 break;
2727 case 0x3E:
2728 /* CVTQT */
2729 gen_fcvtqt(ctx, rb, rc, fn11);
2730 break;
2731 default:
2732 goto invalid_opc;
2734 break;
2735 case 0x17:
2736 switch (fn11) {
2737 case 0x010:
2738 /* CVTLQ */
2739 gen_fcvtlq(rb, rc);
2740 break;
2741 case 0x020:
2742 if (likely(rc != 31)) {
2743 if (ra == rb) {
2744 /* FMOV */
2745 if (ra == 31)
2746 tcg_gen_movi_i64(cpu_fir[rc], 0);
2747 else
2748 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2749 } else {
2750 /* CPYS */
2751 gen_fcpys(ra, rb, rc);
2754 break;
2755 case 0x021:
2756 /* CPYSN */
2757 gen_fcpysn(ra, rb, rc);
2758 break;
2759 case 0x022:
2760 /* CPYSE */
2761 gen_fcpyse(ra, rb, rc);
2762 break;
2763 case 0x024:
2764 /* MT_FPCR */
2765 if (likely(ra != 31))
2766 gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
2767 else {
2768 TCGv tmp = tcg_const_i64(0);
2769 gen_helper_store_fpcr(cpu_env, tmp);
2770 tcg_temp_free(tmp);
2772 break;
2773 case 0x025:
2774 /* MF_FPCR */
2775 if (likely(ra != 31))
2776 gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
2777 break;
2778 case 0x02A:
2779 /* FCMOVEQ */
2780 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2781 break;
2782 case 0x02B:
2783 /* FCMOVNE */
2784 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2785 break;
2786 case 0x02C:
2787 /* FCMOVLT */
2788 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2789 break;
2790 case 0x02D:
2791 /* FCMOVGE */
2792 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2793 break;
2794 case 0x02E:
2795 /* FCMOVLE */
2796 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2797 break;
2798 case 0x02F:
2799 /* FCMOVGT */
2800 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2801 break;
2802 case 0x030:
2803 /* CVTQL */
2804 gen_fcvtql(rb, rc);
2805 break;
2806 case 0x130:
2807 /* CVTQL/V */
2808 case 0x530:
2809 /* CVTQL/SV */
2810 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2811 /v doesn't do. The only thing I can think is that /sv is a
2812 valid instruction merely for completeness in the ISA. */
2813 gen_fcvtql_v(ctx, rb, rc);
2814 break;
2815 default:
2816 goto invalid_opc;
2818 break;
2819 case 0x18:
2820 switch ((uint16_t)disp16) {
2821 case 0x0000:
2822 /* TRAPB */
2823 /* No-op. */
2824 break;
2825 case 0x0400:
2826 /* EXCB */
2827 /* No-op. */
2828 break;
2829 case 0x4000:
2830 /* MB */
2831 /* No-op */
2832 break;
2833 case 0x4400:
2834 /* WMB */
2835 /* No-op */
2836 break;
2837 case 0x8000:
2838 /* FETCH */
2839 /* No-op */
2840 break;
2841 case 0xA000:
2842 /* FETCH_M */
2843 /* No-op */
2844 break;
2845 case 0xC000:
2846 /* RPCC */
2847 if (ra != 31) {
2848 if (use_icount) {
2849 gen_io_start();
2850 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2851 gen_io_end();
2852 ret = EXIT_PC_STALE;
2853 } else {
2854 gen_helper_load_pcc(cpu_ir[ra], cpu_env);
2857 break;
2858 case 0xE000:
2859 /* RC */
2860 gen_rx(ra, 0);
2861 break;
2862 case 0xE800:
2863 /* ECB */
2864 break;
2865 case 0xF000:
2866 /* RS */
2867 gen_rx(ra, 1);
2868 break;
2869 case 0xF800:
2870 /* WH64 */
2871 /* No-op */
2872 break;
2873 default:
2874 goto invalid_opc;
2876 break;
2877 case 0x19:
2878 /* HW_MFPR (PALcode) */
2879 #ifndef CONFIG_USER_ONLY
2880 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2881 return gen_mfpr(ra, insn & 0xffff);
2883 #endif
2884 goto invalid_opc;
2885 case 0x1A:
2886 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2887 prediction stack action, which of course we don't implement. */
2888 if (rb != 31) {
2889 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2890 } else {
2891 tcg_gen_movi_i64(cpu_pc, 0);
2893 if (ra != 31) {
2894 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2896 ret = EXIT_PC_UPDATED;
2897 break;
2898 case 0x1B:
2899 /* HW_LD (PALcode) */
2900 #ifndef CONFIG_USER_ONLY
2901 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
2902 TCGv addr;
2904 if (ra == 31) {
2905 break;
2908 addr = tcg_temp_new();
2909 if (rb != 31)
2910 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2911 else
2912 tcg_gen_movi_i64(addr, disp12);
2913 switch ((insn >> 12) & 0xF) {
2914 case 0x0:
2915 /* Longword physical access (hw_ldl/p) */
2916 gen_helper_ldl_phys(cpu_ir[ra], cpu_env, addr);
2917 break;
2918 case 0x1:
2919 /* Quadword physical access (hw_ldq/p) */
2920 gen_helper_ldq_phys(cpu_ir[ra], cpu_env, addr);
2921 break;
2922 case 0x2:
2923 /* Longword physical access with lock (hw_ldl_l/p) */
2924 gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
2925 break;
2926 case 0x3:
2927 /* Quadword physical access with lock (hw_ldq_l/p) */
2928 gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
2929 break;
2930 case 0x4:
2931 /* Longword virtual PTE fetch (hw_ldl/v) */
2932 goto invalid_opc;
2933 case 0x5:
2934 /* Quadword virtual PTE fetch (hw_ldq/v) */
2935 goto invalid_opc;
2936 break;
2937 case 0x6:
2938 /* Incpu_ir[ra]id */
2939 goto invalid_opc;
2940 case 0x7:
2941 /* Incpu_ir[ra]id */
2942 goto invalid_opc;
2943 case 0x8:
2944 /* Longword virtual access (hw_ldl) */
2945 goto invalid_opc;
2946 case 0x9:
2947 /* Quadword virtual access (hw_ldq) */
2948 goto invalid_opc;
2949 case 0xA:
2950 /* Longword virtual access with protection check (hw_ldl/w) */
2951 tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LESL);
2952 break;
2953 case 0xB:
2954 /* Quadword virtual access with protection check (hw_ldq/w) */
2955 tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LEQ);
2956 break;
2957 case 0xC:
2958 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2959 goto invalid_opc;
2960 case 0xD:
2961 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2962 goto invalid_opc;
2963 case 0xE:
2964 /* Longword virtual access with alternate access mode and
2965 protection checks (hw_ldl/wa) */
2966 tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LESL);
2967 break;
2968 case 0xF:
2969 /* Quadword virtual access with alternate access mode and
2970 protection checks (hw_ldq/wa) */
2971 tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LEQ);
2972 break;
2974 tcg_temp_free(addr);
2975 break;
2977 #endif
2978 goto invalid_opc;
2979 case 0x1C:
2980 switch (fn7) {
2981 case 0x00:
2982 /* SEXTB */
2983 if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
2984 goto invalid_opc;
2986 if (likely(rc != 31)) {
2987 if (islit)
2988 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2989 else
2990 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2992 break;
2993 case 0x01:
2994 /* SEXTW */
2995 if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
2996 if (likely(rc != 31)) {
2997 if (islit) {
2998 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2999 } else {
3000 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
3003 break;
3005 goto invalid_opc;
3006 case 0x30:
3007 /* CTPOP */
3008 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3009 if (likely(rc != 31)) {
3010 if (islit) {
3011 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
3012 } else {
3013 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
3016 break;
3018 goto invalid_opc;
3019 case 0x31:
3020 /* PERR */
3021 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3022 gen_perr(ra, rb, rc, islit, lit);
3023 break;
3025 goto invalid_opc;
3026 case 0x32:
3027 /* CTLZ */
3028 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3029 if (likely(rc != 31)) {
3030 if (islit) {
3031 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
3032 } else {
3033 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
3036 break;
3038 goto invalid_opc;
3039 case 0x33:
3040 /* CTTZ */
3041 if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
3042 if (likely(rc != 31)) {
3043 if (islit) {
3044 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
3045 } else {
3046 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
3049 break;
3051 goto invalid_opc;
3052 case 0x34:
3053 /* UNPKBW */
3054 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3055 if (real_islit || ra != 31) {
3056 goto invalid_opc;
3058 gen_unpkbw(rb, rc);
3059 break;
3061 goto invalid_opc;
3062 case 0x35:
3063 /* UNPKBL */
3064 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3065 if (real_islit || ra != 31) {
3066 goto invalid_opc;
3068 gen_unpkbl(rb, rc);
3069 break;
3071 goto invalid_opc;
3072 case 0x36:
3073 /* PKWB */
3074 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3075 if (real_islit || ra != 31) {
3076 goto invalid_opc;
3078 gen_pkwb(rb, rc);
3079 break;
3081 goto invalid_opc;
3082 case 0x37:
3083 /* PKLB */
3084 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3085 if (real_islit || ra != 31) {
3086 goto invalid_opc;
3088 gen_pklb(rb, rc);
3089 break;
3091 goto invalid_opc;
3092 case 0x38:
3093 /* MINSB8 */
3094 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3095 gen_minsb8(ra, rb, rc, islit, lit);
3096 break;
3098 goto invalid_opc;
3099 case 0x39:
3100 /* MINSW4 */
3101 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3102 gen_minsw4(ra, rb, rc, islit, lit);
3103 break;
3105 goto invalid_opc;
3106 case 0x3A:
3107 /* MINUB8 */
3108 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3109 gen_minub8(ra, rb, rc, islit, lit);
3110 break;
3112 goto invalid_opc;
3113 case 0x3B:
3114 /* MINUW4 */
3115 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3116 gen_minuw4(ra, rb, rc, islit, lit);
3117 break;
3119 goto invalid_opc;
3120 case 0x3C:
3121 /* MAXUB8 */
3122 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3123 gen_maxub8(ra, rb, rc, islit, lit);
3124 break;
3126 goto invalid_opc;
3127 case 0x3D:
3128 /* MAXUW4 */
3129 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3130 gen_maxuw4(ra, rb, rc, islit, lit);
3131 break;
3133 goto invalid_opc;
3134 case 0x3E:
3135 /* MAXSB8 */
3136 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3137 gen_maxsb8(ra, rb, rc, islit, lit);
3138 break;
3140 goto invalid_opc;
3141 case 0x3F:
3142 /* MAXSW4 */
3143 if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
3144 gen_maxsw4(ra, rb, rc, islit, lit);
3145 break;
3147 goto invalid_opc;
3148 case 0x70:
3149 /* FTOIT */
3150 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3151 goto invalid_opc;
3153 if (likely(rc != 31)) {
3154 if (ra != 31)
3155 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
3156 else
3157 tcg_gen_movi_i64(cpu_ir[rc], 0);
3159 break;
3160 case 0x78:
3161 /* FTOIS */
3162 if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
3163 goto invalid_opc;
3165 if (rc != 31) {
3166 TCGv_i32 tmp1 = tcg_temp_new_i32();
3167 if (ra != 31)
3168 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
3169 else {
3170 TCGv tmp2 = tcg_const_i64(0);
3171 gen_helper_s_to_memory(tmp1, tmp2);
3172 tcg_temp_free(tmp2);
3174 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
3175 tcg_temp_free_i32(tmp1);
3177 break;
3178 default:
3179 goto invalid_opc;
3181 break;
3182 case 0x1D:
3183 /* HW_MTPR (PALcode) */
3184 #ifndef CONFIG_USER_ONLY
3185 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3186 return gen_mtpr(ctx, rb, insn & 0xffff);
3188 #endif
3189 goto invalid_opc;
3190 case 0x1E:
3191 /* HW_RET (PALcode) */
3192 #ifndef CONFIG_USER_ONLY
3193 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3194 if (rb == 31) {
3195 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3196 address from EXC_ADDR. This turns out to be useful for our
3197 emulation PALcode, so continue to accept it. */
3198 TCGv tmp = tcg_temp_new();
3199 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
3200 gen_helper_hw_ret(cpu_env, tmp);
3201 tcg_temp_free(tmp);
3202 } else {
3203 gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
3205 ret = EXIT_PC_UPDATED;
3206 break;
3208 #endif
3209 goto invalid_opc;
3210 case 0x1F:
3211 /* HW_ST (PALcode) */
3212 #ifndef CONFIG_USER_ONLY
3213 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
3214 TCGv addr, val;
3215 addr = tcg_temp_new();
3216 if (rb != 31)
3217 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
3218 else
3219 tcg_gen_movi_i64(addr, disp12);
3220 if (ra != 31)
3221 val = cpu_ir[ra];
3222 else {
3223 val = tcg_temp_new();
3224 tcg_gen_movi_i64(val, 0);
3226 switch ((insn >> 12) & 0xF) {
3227 case 0x0:
3228 /* Longword physical access */
3229 gen_helper_stl_phys(cpu_env, addr, val);
3230 break;
3231 case 0x1:
3232 /* Quadword physical access */
3233 gen_helper_stq_phys(cpu_env, addr, val);
3234 break;
3235 case 0x2:
3236 /* Longword physical access with lock */
3237 gen_helper_stl_c_phys(val, cpu_env, addr, val);
3238 break;
3239 case 0x3:
3240 /* Quadword physical access with lock */
3241 gen_helper_stq_c_phys(val, cpu_env, addr, val);
3242 break;
3243 case 0x4:
3244 /* Longword virtual access */
3245 goto invalid_opc;
3246 case 0x5:
3247 /* Quadword virtual access */
3248 goto invalid_opc;
3249 case 0x6:
3250 /* Invalid */
3251 goto invalid_opc;
3252 case 0x7:
3253 /* Invalid */
3254 goto invalid_opc;
3255 case 0x8:
3256 /* Invalid */
3257 goto invalid_opc;
3258 case 0x9:
3259 /* Invalid */
3260 goto invalid_opc;
3261 case 0xA:
3262 /* Invalid */
3263 goto invalid_opc;
3264 case 0xB:
3265 /* Invalid */
3266 goto invalid_opc;
3267 case 0xC:
3268 /* Longword virtual access with alternate access mode */
3269 goto invalid_opc;
3270 case 0xD:
3271 /* Quadword virtual access with alternate access mode */
3272 goto invalid_opc;
3273 case 0xE:
3274 /* Invalid */
3275 goto invalid_opc;
3276 case 0xF:
3277 /* Invalid */
3278 goto invalid_opc;
3280 if (ra == 31)
3281 tcg_temp_free(val);
3282 tcg_temp_free(addr);
3283 break;
3285 #endif
3286 goto invalid_opc;
3287 case 0x20:
3288 /* LDF */
3289 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
3290 break;
3291 case 0x21:
3292 /* LDG */
3293 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
3294 break;
3295 case 0x22:
3296 /* LDS */
3297 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
3298 break;
3299 case 0x23:
3300 /* LDT */
3301 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
3302 break;
3303 case 0x24:
3304 /* STF */
3305 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
3306 break;
3307 case 0x25:
3308 /* STG */
3309 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
3310 break;
3311 case 0x26:
3312 /* STS */
3313 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
3314 break;
3315 case 0x27:
3316 /* STT */
3317 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
3318 break;
3319 case 0x28:
3320 /* LDL */
3321 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
3322 break;
3323 case 0x29:
3324 /* LDQ */
3325 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
3326 break;
3327 case 0x2A:
3328 /* LDL_L */
3329 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
3330 break;
3331 case 0x2B:
3332 /* LDQ_L */
3333 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
3334 break;
3335 case 0x2C:
3336 /* STL */
3337 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
3338 break;
3339 case 0x2D:
3340 /* STQ */
3341 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
3342 break;
3343 case 0x2E:
3344 /* STL_C */
3345 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
3346 break;
3347 case 0x2F:
3348 /* STQ_C */
3349 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
3350 break;
3351 case 0x30:
3352 /* BR */
3353 ret = gen_bdirect(ctx, ra, disp21);
3354 break;
3355 case 0x31: /* FBEQ */
3356 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3357 break;
3358 case 0x32: /* FBLT */
3359 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3360 break;
3361 case 0x33: /* FBLE */
3362 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3363 break;
3364 case 0x34:
3365 /* BSR */
3366 ret = gen_bdirect(ctx, ra, disp21);
3367 break;
3368 case 0x35: /* FBNE */
3369 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3370 break;
3371 case 0x36: /* FBGE */
3372 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3373 break;
3374 case 0x37: /* FBGT */
3375 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3376 break;
3377 case 0x38:
3378 /* BLBC */
3379 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3380 break;
3381 case 0x39:
3382 /* BEQ */
3383 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3384 break;
3385 case 0x3A:
3386 /* BLT */
3387 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3388 break;
3389 case 0x3B:
3390 /* BLE */
3391 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3392 break;
3393 case 0x3C:
3394 /* BLBS */
3395 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3396 break;
3397 case 0x3D:
3398 /* BNE */
3399 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3400 break;
3401 case 0x3E:
3402 /* BGE */
3403 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3404 break;
3405 case 0x3F:
3406 /* BGT */
3407 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3408 break;
3409 invalid_opc:
3410 ret = gen_invalid(ctx);
3411 break;
3414 return ret;
3417 static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
3418 TranslationBlock *tb,
3419 bool search_pc)
3421 CPUState *cs = CPU(cpu);
3422 CPUAlphaState *env = &cpu->env;
3423 DisasContext ctx, *ctxp = &ctx;
3424 target_ulong pc_start;
3425 target_ulong pc_mask;
3426 uint32_t insn;
3427 uint16_t *gen_opc_end;
3428 CPUBreakpoint *bp;
3429 int j, lj = -1;
3430 ExitStatus ret;
3431 int num_insns;
3432 int max_insns;
3434 pc_start = tb->pc;
3435 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
3437 ctx.tb = tb;
3438 ctx.pc = pc_start;
3439 ctx.mem_idx = cpu_mmu_index(env);
3440 ctx.implver = env->implver;
3441 ctx.singlestep_enabled = cs->singlestep_enabled;
3443 /* ??? Every TB begins with unset rounding mode, to be initialized on
3444 the first fp insn of the TB. Alternately we could define a proper
3445 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3446 to reset the FP_STATUS to that default at the end of any TB that
3447 changes the default. We could even (gasp) dynamiclly figure out
3448 what default would be most efficient given the running program. */
3449 ctx.tb_rm = -1;
3450 /* Similarly for flush-to-zero. */
3451 ctx.tb_ftz = -1;
3453 num_insns = 0;
3454 max_insns = tb->cflags & CF_COUNT_MASK;
3455 if (max_insns == 0) {
3456 max_insns = CF_COUNT_MASK;
3459 if (in_superpage(&ctx, pc_start)) {
3460 pc_mask = (1ULL << 41) - 1;
3461 } else {
3462 pc_mask = ~TARGET_PAGE_MASK;
3465 gen_tb_start();
3466 do {
3467 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
3468 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
3469 if (bp->pc == ctx.pc) {
3470 gen_excp(&ctx, EXCP_DEBUG, 0);
3471 break;
3475 if (search_pc) {
3476 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3477 if (lj < j) {
3478 lj++;
3479 while (lj < j)
3480 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3482 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
3483 tcg_ctx.gen_opc_instr_start[lj] = 1;
3484 tcg_ctx.gen_opc_icount[lj] = num_insns;
3486 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3487 gen_io_start();
3488 insn = cpu_ldl_code(env, ctx.pc);
3489 num_insns++;
3491 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3492 tcg_gen_debug_insn_start(ctx.pc);
3495 ctx.pc += 4;
3496 ret = translate_one(ctxp, insn);
3498 /* If we reach a page boundary, are single stepping,
3499 or exhaust instruction count, stop generation. */
3500 if (ret == NO_EXIT
3501 && ((ctx.pc & pc_mask) == 0
3502 || tcg_ctx.gen_opc_ptr >= gen_opc_end
3503 || num_insns >= max_insns
3504 || singlestep
3505 || ctx.singlestep_enabled)) {
3506 ret = EXIT_PC_STALE;
3508 } while (ret == NO_EXIT);
3510 if (tb->cflags & CF_LAST_IO) {
3511 gen_io_end();
3514 switch (ret) {
3515 case EXIT_GOTO_TB:
3516 case EXIT_NORETURN:
3517 break;
3518 case EXIT_PC_STALE:
3519 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3520 /* FALLTHRU */
3521 case EXIT_PC_UPDATED:
3522 if (ctx.singlestep_enabled) {
3523 gen_excp_1(EXCP_DEBUG, 0);
3524 } else {
3525 tcg_gen_exit_tb(0);
3527 break;
3528 default:
3529 abort();
3532 gen_tb_end(tb, num_insns);
3533 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3534 if (search_pc) {
3535 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3536 lj++;
3537 while (lj <= j)
3538 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3539 } else {
3540 tb->size = ctx.pc - pc_start;
3541 tb->icount = num_insns;
3544 #ifdef DEBUG_DISAS
3545 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3546 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3547 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
3548 qemu_log("\n");
3550 #endif
3553 void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
3555 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
3558 void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
3560 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
3563 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
3565 env->pc = tcg_ctx.gen_opc_pc[pc_pos];