target-alpha: Emit goto_tb opcodes.
[qemu.git] / target-alpha / translate.c
blob2a9cee9c6bdd5a55f4ae012cdd578d8f2dc22386
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 struct TranslationBlock *tb;
47 CPUAlphaState *env;
48 uint64_t pc;
49 int mem_idx;
50 #if !defined (CONFIG_USER_ONLY)
51 int pal_mode;
52 #endif
53 uint32_t amask;
55 /* Current rounding mode for this TB. */
56 int tb_rm;
57 /* Current flush-to-zero setting for this TB. */
58 int tb_ftz;
61 /* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
64 typedef enum {
65 NO_EXIT,
67 /* We have emitted one or more goto_tb. No fixup required. */
68 EXIT_GOTO_TB,
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
72 exiting the TB. */
73 EXIT_PC_UPDATED,
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
77 EXIT_PC_STALE
78 } ExitStatus;
80 /* global register indexes */
81 static TCGv_ptr cpu_env;
82 static TCGv cpu_ir[31];
83 static TCGv cpu_fir[31];
84 static TCGv cpu_pc;
85 static TCGv cpu_lock;
86 #ifdef CONFIG_USER_ONLY
87 static TCGv cpu_uniq;
88 #endif
90 /* register names */
91 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
93 #include "gen-icount.h"
95 static void alpha_translate_init(void)
97 int i;
98 char *p;
99 static int done_init = 0;
101 if (done_init)
102 return;
104 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
106 p = cpu_reg_names;
107 for (i = 0; i < 31; i++) {
108 sprintf(p, "ir%d", i);
109 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
110 offsetof(CPUState, ir[i]), p);
111 p += (i < 10) ? 4 : 5;
113 sprintf(p, "fir%d", i);
114 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
115 offsetof(CPUState, fir[i]), p);
116 p += (i < 10) ? 5 : 6;
119 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
120 offsetof(CPUState, pc), "pc");
122 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
123 offsetof(CPUState, lock), "lock");
125 #ifdef CONFIG_USER_ONLY
126 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
127 offsetof(CPUState, unique), "uniq");
128 #endif
130 /* register helpers */
131 #define GEN_HELPER 2
132 #include "helper.h"
134 done_init = 1;
137 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
139 TCGv_i32 tmp1, tmp2;
141 tcg_gen_movi_i64(cpu_pc, ctx->pc);
142 tmp1 = tcg_const_i32(exception);
143 tmp2 = tcg_const_i32(error_code);
144 gen_helper_excp(tmp1, tmp2);
145 tcg_temp_free_i32(tmp2);
146 tcg_temp_free_i32(tmp1);
149 static inline void gen_invalid(DisasContext *ctx)
151 gen_excp(ctx, EXCP_OPCDEC, 0);
154 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
156 TCGv tmp = tcg_temp_new();
157 TCGv_i32 tmp32 = tcg_temp_new_i32();
158 tcg_gen_qemu_ld32u(tmp, t1, flags);
159 tcg_gen_trunc_i64_i32(tmp32, tmp);
160 gen_helper_memory_to_f(t0, tmp32);
161 tcg_temp_free_i32(tmp32);
162 tcg_temp_free(tmp);
165 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
167 TCGv tmp = tcg_temp_new();
168 tcg_gen_qemu_ld64(tmp, t1, flags);
169 gen_helper_memory_to_g(t0, tmp);
170 tcg_temp_free(tmp);
173 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
175 TCGv tmp = tcg_temp_new();
176 TCGv_i32 tmp32 = tcg_temp_new_i32();
177 tcg_gen_qemu_ld32u(tmp, t1, flags);
178 tcg_gen_trunc_i64_i32(tmp32, tmp);
179 gen_helper_memory_to_s(t0, tmp32);
180 tcg_temp_free_i32(tmp32);
181 tcg_temp_free(tmp);
184 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
186 tcg_gen_mov_i64(cpu_lock, t1);
187 tcg_gen_qemu_ld32s(t0, t1, flags);
190 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
192 tcg_gen_mov_i64(cpu_lock, t1);
193 tcg_gen_qemu_ld64(t0, t1, flags);
196 static inline void gen_load_mem(DisasContext *ctx,
197 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
198 int flags),
199 int ra, int rb, int32_t disp16, int fp,
200 int clear)
202 TCGv addr;
204 if (unlikely(ra == 31))
205 return;
207 addr = tcg_temp_new();
208 if (rb != 31) {
209 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
210 if (clear)
211 tcg_gen_andi_i64(addr, addr, ~0x7);
212 } else {
213 if (clear)
214 disp16 &= ~0x7;
215 tcg_gen_movi_i64(addr, disp16);
217 if (fp)
218 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
219 else
220 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
221 tcg_temp_free(addr);
224 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
226 TCGv_i32 tmp32 = tcg_temp_new_i32();
227 TCGv tmp = tcg_temp_new();
228 gen_helper_f_to_memory(tmp32, t0);
229 tcg_gen_extu_i32_i64(tmp, tmp32);
230 tcg_gen_qemu_st32(tmp, t1, flags);
231 tcg_temp_free(tmp);
232 tcg_temp_free_i32(tmp32);
235 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
237 TCGv tmp = tcg_temp_new();
238 gen_helper_g_to_memory(tmp, t0);
239 tcg_gen_qemu_st64(tmp, t1, flags);
240 tcg_temp_free(tmp);
243 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
245 TCGv_i32 tmp32 = tcg_temp_new_i32();
246 TCGv tmp = tcg_temp_new();
247 gen_helper_s_to_memory(tmp32, t0);
248 tcg_gen_extu_i32_i64(tmp, tmp32);
249 tcg_gen_qemu_st32(tmp, t1, flags);
250 tcg_temp_free(tmp);
251 tcg_temp_free_i32(tmp32);
254 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
256 int l1, l2;
258 l1 = gen_new_label();
259 l2 = gen_new_label();
260 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
261 tcg_gen_qemu_st32(t0, t1, flags);
262 tcg_gen_movi_i64(t0, 1);
263 tcg_gen_br(l2);
264 gen_set_label(l1);
265 tcg_gen_movi_i64(t0, 0);
266 gen_set_label(l2);
267 tcg_gen_movi_i64(cpu_lock, -1);
270 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
272 int l1, l2;
274 l1 = gen_new_label();
275 l2 = gen_new_label();
276 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
277 tcg_gen_qemu_st64(t0, t1, flags);
278 tcg_gen_movi_i64(t0, 1);
279 tcg_gen_br(l2);
280 gen_set_label(l1);
281 tcg_gen_movi_i64(t0, 0);
282 gen_set_label(l2);
283 tcg_gen_movi_i64(cpu_lock, -1);
286 static inline void gen_store_mem(DisasContext *ctx,
287 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
288 int flags),
289 int ra, int rb, int32_t disp16, int fp,
290 int clear, int local)
292 TCGv addr;
293 if (local)
294 addr = tcg_temp_local_new();
295 else
296 addr = tcg_temp_new();
297 if (rb != 31) {
298 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
299 if (clear)
300 tcg_gen_andi_i64(addr, addr, ~0x7);
301 } else {
302 if (clear)
303 disp16 &= ~0x7;
304 tcg_gen_movi_i64(addr, disp16);
306 if (ra != 31) {
307 if (fp)
308 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
309 else
310 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
311 } else {
312 TCGv zero;
313 if (local)
314 zero = tcg_const_local_i64(0);
315 else
316 zero = tcg_const_i64(0);
317 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
318 tcg_temp_free(zero);
320 tcg_temp_free(addr);
323 static int use_goto_tb(DisasContext *ctx, uint64_t dest)
325 /* Check for the dest on the same page as the start of the TB. We
326 also want to suppress goto_tb in the case of single-steping and IO. */
327 return (((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0
328 && !ctx->env->singlestep_enabled
329 && !(ctx->tb->cflags & CF_LAST_IO));
332 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
334 uint64_t dest = ctx->pc + (disp << 2);
336 if (ra != 31) {
337 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
340 /* Notice branch-to-next; used to initialize RA with the PC. */
341 if (disp == 0) {
342 return 0;
343 } else if (use_goto_tb(ctx, dest)) {
344 tcg_gen_goto_tb(0);
345 tcg_gen_movi_i64(cpu_pc, dest);
346 tcg_gen_exit_tb((long)ctx->tb);
347 return EXIT_GOTO_TB;
348 } else {
349 tcg_gen_movi_i64(cpu_pc, dest);
350 return EXIT_PC_UPDATED;
354 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
355 TCGv cmp, int32_t disp)
357 uint64_t dest = ctx->pc + (disp << 2);
358 int lab_true = gen_new_label();
360 if (use_goto_tb(ctx, dest)) {
361 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
363 tcg_gen_goto_tb(0);
364 tcg_gen_movi_i64(cpu_pc, ctx->pc);
365 tcg_gen_exit_tb((long)ctx->tb);
367 gen_set_label(lab_true);
368 tcg_gen_goto_tb(1);
369 tcg_gen_movi_i64(cpu_pc, dest);
370 tcg_gen_exit_tb((long)ctx->tb + 1);
372 return EXIT_GOTO_TB;
373 } else {
374 int lab_over = gen_new_label();
376 /* ??? Consider using either
377 movi pc, next
378 addi tmp, pc, disp
379 movcond pc, cond, 0, tmp, pc
381 setcond tmp, cond, 0
382 movi pc, next
383 neg tmp, tmp
384 andi tmp, tmp, disp
385 add pc, pc, tmp
386 The current diamond subgraph surely isn't efficient. */
388 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
389 tcg_gen_movi_i64(cpu_pc, ctx->pc);
390 tcg_gen_br(lab_over);
391 gen_set_label(lab_true);
392 tcg_gen_movi_i64(cpu_pc, dest);
393 gen_set_label(lab_over);
395 return EXIT_PC_UPDATED;
399 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
400 int32_t disp, int mask)
402 TCGv cmp_tmp;
404 if (unlikely(ra == 31)) {
405 cmp_tmp = tcg_const_i64(0);
406 } else {
407 cmp_tmp = tcg_temp_new();
408 if (mask) {
409 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
410 } else {
411 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
415 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
418 /* Fold -0.0 for comparison with COND. */
420 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
422 uint64_t mzero = 1ull << 63;
424 switch (cond) {
425 case TCG_COND_LE:
426 case TCG_COND_GT:
427 /* For <= or >, the -0.0 value directly compares the way we want. */
428 tcg_gen_mov_i64(dest, src);
429 break;
431 case TCG_COND_EQ:
432 case TCG_COND_NE:
433 /* For == or !=, we can simply mask off the sign bit and compare. */
434 tcg_gen_andi_i64(dest, src, mzero - 1);
435 break;
437 case TCG_COND_GE:
438 case TCG_COND_LT:
439 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
440 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
441 tcg_gen_neg_i64(dest, dest);
442 tcg_gen_and_i64(dest, dest, src);
443 break;
445 default:
446 abort();
450 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
451 int32_t disp)
453 TCGv cmp_tmp;
455 if (unlikely(ra == 31)) {
456 /* Very uncommon case, but easier to optimize it to an integer
457 comparison than continuing with the floating point comparison. */
458 return gen_bcond(ctx, cond, ra, disp, 0);
461 cmp_tmp = tcg_temp_new();
462 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
463 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
466 static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
467 int islit, uint8_t lit, int mask)
469 TCGCond inv_cond = tcg_invert_cond(cond);
470 int l1;
472 if (unlikely(rc == 31))
473 return;
475 l1 = gen_new_label();
477 if (ra != 31) {
478 if (mask) {
479 TCGv tmp = tcg_temp_new();
480 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
481 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
482 tcg_temp_free(tmp);
483 } else
484 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
485 } else {
486 /* Very uncommon case - Do not bother to optimize. */
487 TCGv tmp = tcg_const_i64(0);
488 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
489 tcg_temp_free(tmp);
492 if (islit)
493 tcg_gen_movi_i64(cpu_ir[rc], lit);
494 else
495 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
496 gen_set_label(l1);
499 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
501 TCGv cmp_tmp;
502 int l1;
504 if (unlikely(rc == 31)) {
505 return;
508 cmp_tmp = tcg_temp_new();
509 if (unlikely(ra == 31)) {
510 tcg_gen_movi_i64(cmp_tmp, 0);
511 } else {
512 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
515 l1 = gen_new_label();
516 tcg_gen_brcondi_i64(tcg_invert_cond(cond), cmp_tmp, 0, l1);
517 tcg_temp_free(cmp_tmp);
519 if (rb != 31)
520 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
521 else
522 tcg_gen_movi_i64(cpu_fir[rc], 0);
523 gen_set_label(l1);
526 #define QUAL_RM_N 0x080 /* Round mode nearest even */
527 #define QUAL_RM_C 0x000 /* Round mode chopped */
528 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
529 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
530 #define QUAL_RM_MASK 0x0c0
532 #define QUAL_U 0x100 /* Underflow enable (fp output) */
533 #define QUAL_V 0x100 /* Overflow enable (int output) */
534 #define QUAL_S 0x400 /* Software completion enable */
535 #define QUAL_I 0x200 /* Inexact detection enable */
537 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
539 TCGv_i32 tmp;
541 fn11 &= QUAL_RM_MASK;
542 if (fn11 == ctx->tb_rm) {
543 return;
545 ctx->tb_rm = fn11;
547 tmp = tcg_temp_new_i32();
548 switch (fn11) {
549 case QUAL_RM_N:
550 tcg_gen_movi_i32(tmp, float_round_nearest_even);
551 break;
552 case QUAL_RM_C:
553 tcg_gen_movi_i32(tmp, float_round_to_zero);
554 break;
555 case QUAL_RM_M:
556 tcg_gen_movi_i32(tmp, float_round_down);
557 break;
558 case QUAL_RM_D:
559 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
560 break;
563 #if defined(CONFIG_SOFTFLOAT_INLINE)
564 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
565 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
566 sets the one field. */
567 tcg_gen_st8_i32(tmp, cpu_env,
568 offsetof(CPUState, fp_status.float_rounding_mode));
569 #else
570 gen_helper_setroundmode(tmp);
571 #endif
573 tcg_temp_free_i32(tmp);
576 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
578 TCGv_i32 tmp;
580 fn11 &= QUAL_U;
581 if (fn11 == ctx->tb_ftz) {
582 return;
584 ctx->tb_ftz = fn11;
586 tmp = tcg_temp_new_i32();
587 if (fn11) {
588 /* Underflow is enabled, use the FPCR setting. */
589 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
590 } else {
591 /* Underflow is disabled, force flush-to-zero. */
592 tcg_gen_movi_i32(tmp, 1);
595 #if defined(CONFIG_SOFTFLOAT_INLINE)
596 tcg_gen_st8_i32(tmp, cpu_env,
597 offsetof(CPUState, fp_status.flush_to_zero));
598 #else
599 gen_helper_setflushzero(tmp);
600 #endif
602 tcg_temp_free_i32(tmp);
605 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
607 TCGv val = tcg_temp_new();
608 if (reg == 31) {
609 tcg_gen_movi_i64(val, 0);
610 } else if (fn11 & QUAL_S) {
611 gen_helper_ieee_input_s(val, cpu_fir[reg]);
612 } else if (is_cmp) {
613 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
614 } else {
615 gen_helper_ieee_input(val, cpu_fir[reg]);
617 return val;
620 static void gen_fp_exc_clear(void)
622 #if defined(CONFIG_SOFTFLOAT_INLINE)
623 TCGv_i32 zero = tcg_const_i32(0);
624 tcg_gen_st8_i32(zero, cpu_env,
625 offsetof(CPUState, fp_status.float_exception_flags));
626 tcg_temp_free_i32(zero);
627 #else
628 gen_helper_fp_exc_clear();
629 #endif
632 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
634 /* ??? We ought to be able to do something with imprecise exceptions.
635 E.g. notice we're still in the trap shadow of something within the
636 TB and do not generate the code to signal the exception; end the TB
637 when an exception is forced to arrive, either by consumption of a
638 register value or TRAPB or EXCB. */
639 TCGv_i32 exc = tcg_temp_new_i32();
640 TCGv_i32 reg;
642 #if defined(CONFIG_SOFTFLOAT_INLINE)
643 tcg_gen_ld8u_i32(exc, cpu_env,
644 offsetof(CPUState, fp_status.float_exception_flags));
645 #else
646 gen_helper_fp_exc_get(exc);
647 #endif
649 if (ignore) {
650 tcg_gen_andi_i32(exc, exc, ~ignore);
653 /* ??? Pass in the regno of the destination so that the helper can
654 set EXC_MASK, which contains a bitmask of destination registers
655 that have caused arithmetic traps. A simple userspace emulation
656 does not require this. We do need it for a guest kernel's entArith,
657 or if we were to do something clever with imprecise exceptions. */
658 reg = tcg_const_i32(rc + 32);
660 if (fn11 & QUAL_S) {
661 gen_helper_fp_exc_raise_s(exc, reg);
662 } else {
663 gen_helper_fp_exc_raise(exc, reg);
666 tcg_temp_free_i32(reg);
667 tcg_temp_free_i32(exc);
670 static inline void gen_fp_exc_raise(int rc, int fn11)
672 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
675 static void gen_fcvtlq(int rb, int rc)
677 if (unlikely(rc == 31)) {
678 return;
680 if (unlikely(rb == 31)) {
681 tcg_gen_movi_i64(cpu_fir[rc], 0);
682 } else {
683 TCGv tmp = tcg_temp_new();
685 /* The arithmetic right shift here, plus the sign-extended mask below
686 yields a sign-extended result without an explicit ext32s_i64. */
687 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
688 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
689 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
690 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
691 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
693 tcg_temp_free(tmp);
697 static void gen_fcvtql(int rb, int rc)
699 if (unlikely(rc == 31)) {
700 return;
702 if (unlikely(rb == 31)) {
703 tcg_gen_movi_i64(cpu_fir[rc], 0);
704 } else {
705 TCGv tmp = tcg_temp_new();
707 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
708 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
709 tcg_gen_shli_i64(tmp, tmp, 32);
710 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
711 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
713 tcg_temp_free(tmp);
717 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
719 if (rb != 31) {
720 int lab = gen_new_label();
721 TCGv tmp = tcg_temp_new();
723 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
724 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
725 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
727 gen_set_label(lab);
729 gen_fcvtql(rb, rc);
732 #define FARITH2(name) \
733 static inline void glue(gen_f, name)(int rb, int rc) \
735 if (unlikely(rc == 31)) { \
736 return; \
738 if (rb != 31) { \
739 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
740 } else { \
741 TCGv tmp = tcg_const_i64(0); \
742 gen_helper_ ## name (cpu_fir[rc], tmp); \
743 tcg_temp_free(tmp); \
747 /* ??? VAX instruction qualifiers ignored. */
748 FARITH2(sqrtf)
749 FARITH2(sqrtg)
750 FARITH2(cvtgf)
751 FARITH2(cvtgq)
752 FARITH2(cvtqf)
753 FARITH2(cvtqg)
755 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
756 int rb, int rc, int fn11)
758 TCGv vb;
760 /* ??? This is wrong: the instruction is not a nop, it still may
761 raise exceptions. */
762 if (unlikely(rc == 31)) {
763 return;
766 gen_qual_roundmode(ctx, fn11);
767 gen_qual_flushzero(ctx, fn11);
768 gen_fp_exc_clear();
770 vb = gen_ieee_input(rb, fn11, 0);
771 helper(cpu_fir[rc], vb);
772 tcg_temp_free(vb);
774 gen_fp_exc_raise(rc, fn11);
777 #define IEEE_ARITH2(name) \
778 static inline void glue(gen_f, name)(DisasContext *ctx, \
779 int rb, int rc, int fn11) \
781 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
783 IEEE_ARITH2(sqrts)
784 IEEE_ARITH2(sqrtt)
785 IEEE_ARITH2(cvtst)
786 IEEE_ARITH2(cvtts)
788 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
790 TCGv vb;
791 int ignore = 0;
793 /* ??? This is wrong: the instruction is not a nop, it still may
794 raise exceptions. */
795 if (unlikely(rc == 31)) {
796 return;
799 /* No need to set flushzero, since we have an integer output. */
800 gen_fp_exc_clear();
801 vb = gen_ieee_input(rb, fn11, 0);
803 /* Almost all integer conversions use cropped rounding, and most
804 also do not have integer overflow enabled. Special case that. */
805 switch (fn11) {
806 case QUAL_RM_C:
807 gen_helper_cvttq_c(cpu_fir[rc], vb);
808 break;
809 case QUAL_V | QUAL_RM_C:
810 case QUAL_S | QUAL_V | QUAL_RM_C:
811 ignore = float_flag_inexact;
812 /* FALLTHRU */
813 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
814 gen_helper_cvttq_svic(cpu_fir[rc], vb);
815 break;
816 default:
817 gen_qual_roundmode(ctx, fn11);
818 gen_helper_cvttq(cpu_fir[rc], vb);
819 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
820 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
821 break;
823 tcg_temp_free(vb);
825 gen_fp_exc_raise_ignore(rc, fn11, ignore);
828 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
829 int rb, int rc, int fn11)
831 TCGv vb;
833 /* ??? This is wrong: the instruction is not a nop, it still may
834 raise exceptions. */
835 if (unlikely(rc == 31)) {
836 return;
839 gen_qual_roundmode(ctx, fn11);
841 if (rb == 31) {
842 vb = tcg_const_i64(0);
843 } else {
844 vb = cpu_fir[rb];
847 /* The only exception that can be raised by integer conversion
848 is inexact. Thus we only need to worry about exceptions when
849 inexact handling is requested. */
850 if (fn11 & QUAL_I) {
851 gen_fp_exc_clear();
852 helper(cpu_fir[rc], vb);
853 gen_fp_exc_raise(rc, fn11);
854 } else {
855 helper(cpu_fir[rc], vb);
858 if (rb == 31) {
859 tcg_temp_free(vb);
863 #define IEEE_INTCVT(name) \
864 static inline void glue(gen_f, name)(DisasContext *ctx, \
865 int rb, int rc, int fn11) \
867 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
869 IEEE_INTCVT(cvtqs)
870 IEEE_INTCVT(cvtqt)
872 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
874 TCGv va, vb, vmask;
875 int za = 0, zb = 0;
877 if (unlikely(rc == 31)) {
878 return;
881 vmask = tcg_const_i64(mask);
883 TCGV_UNUSED_I64(va);
884 if (ra == 31) {
885 if (inv_a) {
886 va = vmask;
887 } else {
888 za = 1;
890 } else {
891 va = tcg_temp_new_i64();
892 tcg_gen_mov_i64(va, cpu_fir[ra]);
893 if (inv_a) {
894 tcg_gen_andc_i64(va, vmask, va);
895 } else {
896 tcg_gen_and_i64(va, va, vmask);
900 TCGV_UNUSED_I64(vb);
901 if (rb == 31) {
902 zb = 1;
903 } else {
904 vb = tcg_temp_new_i64();
905 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
908 switch (za << 1 | zb) {
909 case 0 | 0:
910 tcg_gen_or_i64(cpu_fir[rc], va, vb);
911 break;
912 case 0 | 1:
913 tcg_gen_mov_i64(cpu_fir[rc], va);
914 break;
915 case 2 | 0:
916 tcg_gen_mov_i64(cpu_fir[rc], vb);
917 break;
918 case 2 | 1:
919 tcg_gen_movi_i64(cpu_fir[rc], 0);
920 break;
923 tcg_temp_free(vmask);
924 if (ra != 31) {
925 tcg_temp_free(va);
927 if (rb != 31) {
928 tcg_temp_free(vb);
932 static inline void gen_fcpys(int ra, int rb, int rc)
934 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
937 static inline void gen_fcpysn(int ra, int rb, int rc)
939 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
942 static inline void gen_fcpyse(int ra, int rb, int rc)
944 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
947 #define FARITH3(name) \
948 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
950 TCGv va, vb; \
952 if (unlikely(rc == 31)) { \
953 return; \
955 if (ra == 31) { \
956 va = tcg_const_i64(0); \
957 } else { \
958 va = cpu_fir[ra]; \
960 if (rb == 31) { \
961 vb = tcg_const_i64(0); \
962 } else { \
963 vb = cpu_fir[rb]; \
966 gen_helper_ ## name (cpu_fir[rc], va, vb); \
968 if (ra == 31) { \
969 tcg_temp_free(va); \
971 if (rb == 31) { \
972 tcg_temp_free(vb); \
976 /* ??? VAX instruction qualifiers ignored. */
977 FARITH3(addf)
978 FARITH3(subf)
979 FARITH3(mulf)
980 FARITH3(divf)
981 FARITH3(addg)
982 FARITH3(subg)
983 FARITH3(mulg)
984 FARITH3(divg)
985 FARITH3(cmpgeq)
986 FARITH3(cmpglt)
987 FARITH3(cmpgle)
989 static void gen_ieee_arith3(DisasContext *ctx,
990 void (*helper)(TCGv, TCGv, TCGv),
991 int ra, int rb, int rc, int fn11)
993 TCGv va, vb;
995 /* ??? This is wrong: the instruction is not a nop, it still may
996 raise exceptions. */
997 if (unlikely(rc == 31)) {
998 return;
1001 gen_qual_roundmode(ctx, fn11);
1002 gen_qual_flushzero(ctx, fn11);
1003 gen_fp_exc_clear();
1005 va = gen_ieee_input(ra, fn11, 0);
1006 vb = gen_ieee_input(rb, fn11, 0);
1007 helper(cpu_fir[rc], va, vb);
1008 tcg_temp_free(va);
1009 tcg_temp_free(vb);
1011 gen_fp_exc_raise(rc, fn11);
1014 #define IEEE_ARITH3(name) \
1015 static inline void glue(gen_f, name)(DisasContext *ctx, \
1016 int ra, int rb, int rc, int fn11) \
1018 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1020 IEEE_ARITH3(adds)
1021 IEEE_ARITH3(subs)
1022 IEEE_ARITH3(muls)
1023 IEEE_ARITH3(divs)
1024 IEEE_ARITH3(addt)
1025 IEEE_ARITH3(subt)
1026 IEEE_ARITH3(mult)
1027 IEEE_ARITH3(divt)
1029 static void gen_ieee_compare(DisasContext *ctx,
1030 void (*helper)(TCGv, TCGv, TCGv),
1031 int ra, int rb, int rc, int fn11)
1033 TCGv va, vb;
1035 /* ??? This is wrong: the instruction is not a nop, it still may
1036 raise exceptions. */
1037 if (unlikely(rc == 31)) {
1038 return;
1041 gen_fp_exc_clear();
1043 va = gen_ieee_input(ra, fn11, 1);
1044 vb = gen_ieee_input(rb, fn11, 1);
1045 helper(cpu_fir[rc], va, vb);
1046 tcg_temp_free(va);
1047 tcg_temp_free(vb);
1049 gen_fp_exc_raise(rc, fn11);
1052 #define IEEE_CMP3(name) \
1053 static inline void glue(gen_f, name)(DisasContext *ctx, \
1054 int ra, int rb, int rc, int fn11) \
1056 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1058 IEEE_CMP3(cmptun)
1059 IEEE_CMP3(cmpteq)
1060 IEEE_CMP3(cmptlt)
1061 IEEE_CMP3(cmptle)
1063 static inline uint64_t zapnot_mask(uint8_t lit)
1065 uint64_t mask = 0;
1066 int i;
1068 for (i = 0; i < 8; ++i) {
1069 if ((lit >> i) & 1)
1070 mask |= 0xffull << (i * 8);
1072 return mask;
1075 /* Implement zapnot with an immediate operand, which expands to some
1076 form of immediate AND. This is a basic building block in the
1077 definition of many of the other byte manipulation instructions. */
1078 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1080 switch (lit) {
1081 case 0x00:
1082 tcg_gen_movi_i64(dest, 0);
1083 break;
1084 case 0x01:
1085 tcg_gen_ext8u_i64(dest, src);
1086 break;
1087 case 0x03:
1088 tcg_gen_ext16u_i64(dest, src);
1089 break;
1090 case 0x0f:
1091 tcg_gen_ext32u_i64(dest, src);
1092 break;
1093 case 0xff:
1094 tcg_gen_mov_i64(dest, src);
1095 break;
1096 default:
1097 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
1098 break;
1102 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
1104 if (unlikely(rc == 31))
1105 return;
1106 else if (unlikely(ra == 31))
1107 tcg_gen_movi_i64(cpu_ir[rc], 0);
1108 else if (islit)
1109 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
1110 else
1111 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1114 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
1116 if (unlikely(rc == 31))
1117 return;
1118 else if (unlikely(ra == 31))
1119 tcg_gen_movi_i64(cpu_ir[rc], 0);
1120 else if (islit)
1121 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
1122 else
1123 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1127 /* EXTWH, EXTLH, EXTQH */
1128 static void gen_ext_h(int ra, int rb, int rc, int islit,
1129 uint8_t lit, uint8_t byte_mask)
1131 if (unlikely(rc == 31))
1132 return;
1133 else if (unlikely(ra == 31))
1134 tcg_gen_movi_i64(cpu_ir[rc], 0);
1135 else {
1136 if (islit) {
1137 lit = (64 - (lit & 7) * 8) & 0x3f;
1138 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1139 } else {
1140 TCGv tmp1 = tcg_temp_new();
1141 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
1142 tcg_gen_shli_i64(tmp1, tmp1, 3);
1143 tcg_gen_neg_i64(tmp1, tmp1);
1144 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
1145 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
1146 tcg_temp_free(tmp1);
1148 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1152 /* EXTBL, EXTWL, EXTLL, EXTQL */
1153 static void gen_ext_l(int ra, int rb, int rc, int islit,
1154 uint8_t lit, uint8_t byte_mask)
1156 if (unlikely(rc == 31))
1157 return;
1158 else if (unlikely(ra == 31))
1159 tcg_gen_movi_i64(cpu_ir[rc], 0);
1160 else {
1161 if (islit) {
1162 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
1163 } else {
1164 TCGv tmp = tcg_temp_new();
1165 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
1166 tcg_gen_shli_i64(tmp, tmp, 3);
1167 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
1168 tcg_temp_free(tmp);
1170 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
1174 /* INSWH, INSLH, INSQH */
1175 static void gen_ins_h(int ra, int rb, int rc, int islit,
1176 uint8_t lit, uint8_t byte_mask)
1178 if (unlikely(rc == 31))
1179 return;
1180 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
1181 tcg_gen_movi_i64(cpu_ir[rc], 0);
1182 else {
1183 TCGv tmp = tcg_temp_new();
1185 /* The instruction description has us left-shift the byte mask
1186 and extract bits <15:8> and apply that zap at the end. This
1187 is equivalent to simply performing the zap first and shifting
1188 afterward. */
1189 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1191 if (islit) {
1192 /* Note that we have handled the lit==0 case above. */
1193 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
1194 } else {
1195 TCGv shift = tcg_temp_new();
1197 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1198 Do this portably by splitting the shift into two parts:
1199 shift_count-1 and 1. Arrange for the -1 by using
1200 ones-complement instead of twos-complement in the negation:
1201 ~((B & 7) * 8) & 63. */
1203 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1204 tcg_gen_shli_i64(shift, shift, 3);
1205 tcg_gen_not_i64(shift, shift);
1206 tcg_gen_andi_i64(shift, shift, 0x3f);
1208 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1209 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1210 tcg_temp_free(shift);
1212 tcg_temp_free(tmp);
1216 /* INSBL, INSWL, INSLL, INSQL */
1217 static void gen_ins_l(int ra, int rb, int rc, int islit,
1218 uint8_t lit, uint8_t byte_mask)
1220 if (unlikely(rc == 31))
1221 return;
1222 else if (unlikely(ra == 31))
1223 tcg_gen_movi_i64(cpu_ir[rc], 0);
1224 else {
1225 TCGv tmp = tcg_temp_new();
1227 /* The instruction description has us left-shift the byte mask
1228 the same number of byte slots as the data and apply the zap
1229 at the end. This is equivalent to simply performing the zap
1230 first and shifting afterward. */
1231 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1233 if (islit) {
1234 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1235 } else {
1236 TCGv shift = tcg_temp_new();
1237 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1238 tcg_gen_shli_i64(shift, shift, 3);
1239 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1240 tcg_temp_free(shift);
1242 tcg_temp_free(tmp);
1246 /* MSKWH, MSKLH, MSKQH */
1247 static void gen_msk_h(int ra, int rb, int rc, int islit,
1248 uint8_t lit, uint8_t byte_mask)
1250 if (unlikely(rc == 31))
1251 return;
1252 else if (unlikely(ra == 31))
1253 tcg_gen_movi_i64(cpu_ir[rc], 0);
1254 else if (islit) {
1255 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1256 } else {
1257 TCGv shift = tcg_temp_new();
1258 TCGv mask = tcg_temp_new();
1260 /* The instruction description is as above, where the byte_mask
1261 is shifted left, and then we extract bits <15:8>. This can be
1262 emulated with a right-shift on the expanded byte mask. This
1263 requires extra care because for an input <2:0> == 0 we need a
1264 shift of 64 bits in order to generate a zero. This is done by
1265 splitting the shift into two parts, the variable shift - 1
1266 followed by a constant 1 shift. The code we expand below is
1267 equivalent to ~((B & 7) * 8) & 63. */
1269 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1270 tcg_gen_shli_i64(shift, shift, 3);
1271 tcg_gen_not_i64(shift, shift);
1272 tcg_gen_andi_i64(shift, shift, 0x3f);
1273 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1274 tcg_gen_shr_i64(mask, mask, shift);
1275 tcg_gen_shri_i64(mask, mask, 1);
1277 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1279 tcg_temp_free(mask);
1280 tcg_temp_free(shift);
1284 /* MSKBL, MSKWL, MSKLL, MSKQL */
1285 static void gen_msk_l(int ra, int rb, int rc, int islit,
1286 uint8_t lit, uint8_t byte_mask)
1288 if (unlikely(rc == 31))
1289 return;
1290 else if (unlikely(ra == 31))
1291 tcg_gen_movi_i64(cpu_ir[rc], 0);
1292 else if (islit) {
1293 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1294 } else {
1295 TCGv shift = tcg_temp_new();
1296 TCGv mask = tcg_temp_new();
1298 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1299 tcg_gen_shli_i64(shift, shift, 3);
1300 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1301 tcg_gen_shl_i64(mask, mask, shift);
1303 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1305 tcg_temp_free(mask);
1306 tcg_temp_free(shift);
1310 /* Code to call arith3 helpers */
1311 #define ARITH3(name) \
1312 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1313 uint8_t lit) \
1315 if (unlikely(rc == 31)) \
1316 return; \
1318 if (ra != 31) { \
1319 if (islit) { \
1320 TCGv tmp = tcg_const_i64(lit); \
1321 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1322 tcg_temp_free(tmp); \
1323 } else \
1324 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1325 } else { \
1326 TCGv tmp1 = tcg_const_i64(0); \
1327 if (islit) { \
1328 TCGv tmp2 = tcg_const_i64(lit); \
1329 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1330 tcg_temp_free(tmp2); \
1331 } else \
1332 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1333 tcg_temp_free(tmp1); \
1336 ARITH3(cmpbge)
1337 ARITH3(addlv)
1338 ARITH3(sublv)
1339 ARITH3(addqv)
1340 ARITH3(subqv)
1341 ARITH3(umulh)
1342 ARITH3(mullv)
1343 ARITH3(mulqv)
1344 ARITH3(minub8)
1345 ARITH3(minsb8)
1346 ARITH3(minuw4)
1347 ARITH3(minsw4)
1348 ARITH3(maxub8)
1349 ARITH3(maxsb8)
1350 ARITH3(maxuw4)
1351 ARITH3(maxsw4)
1352 ARITH3(perr)
1354 #define MVIOP2(name) \
1355 static inline void glue(gen_, name)(int rb, int rc) \
1357 if (unlikely(rc == 31)) \
1358 return; \
1359 if (unlikely(rb == 31)) \
1360 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1361 else \
1362 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1364 MVIOP2(pklb)
1365 MVIOP2(pkwb)
1366 MVIOP2(unpkbl)
1367 MVIOP2(unpkbw)
1369 static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
1370 int islit, uint8_t lit)
1372 TCGv va, vb;
1374 if (unlikely(rc == 31)) {
1375 return;
1378 if (ra == 31) {
1379 va = tcg_const_i64(0);
1380 } else {
1381 va = cpu_ir[ra];
1383 if (islit) {
1384 vb = tcg_const_i64(lit);
1385 } else {
1386 vb = cpu_ir[rb];
1389 tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
1391 if (ra == 31) {
1392 tcg_temp_free(va);
1394 if (islit) {
1395 tcg_temp_free(vb);
1399 static void gen_rx(int ra, int set)
1401 TCGv_i32 tmp;
1403 if (ra != 31) {
1404 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUState, intr_flag));
1407 tmp = tcg_const_i32(set);
1408 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUState, intr_flag));
1409 tcg_temp_free_i32(tmp);
1412 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1414 uint32_t palcode;
1415 int32_t disp21, disp16, disp12;
1416 uint16_t fn11;
1417 uint8_t opc, ra, rb, rc, fpfn, fn7, fn2, islit, real_islit;
1418 uint8_t lit;
1419 ExitStatus ret;
1421 /* Decode all instruction fields */
1422 opc = insn >> 26;
1423 ra = (insn >> 21) & 0x1F;
1424 rb = (insn >> 16) & 0x1F;
1425 rc = insn & 0x1F;
1426 real_islit = islit = (insn >> 12) & 1;
1427 if (rb == 31 && !islit) {
1428 islit = 1;
1429 lit = 0;
1430 } else
1431 lit = (insn >> 13) & 0xFF;
1432 palcode = insn & 0x03FFFFFF;
1433 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1434 disp16 = (int16_t)(insn & 0x0000FFFF);
1435 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1436 fn11 = (insn >> 5) & 0x000007FF;
1437 fpfn = fn11 & 0x3F;
1438 fn7 = (insn >> 5) & 0x0000007F;
1439 fn2 = (insn >> 5) & 0x00000003;
1440 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1441 opc, ra, rb, rc, disp16);
1443 ret = NO_EXIT;
1444 switch (opc) {
1445 case 0x00:
1446 /* CALL_PAL */
1447 #ifdef CONFIG_USER_ONLY
1448 if (palcode == 0x9E) {
1449 /* RDUNIQUE */
1450 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1451 break;
1452 } else if (palcode == 0x9F) {
1453 /* WRUNIQUE */
1454 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1455 break;
1457 #endif
1458 if (palcode >= 0x80 && palcode < 0xC0) {
1459 /* Unprivileged PAL call */
1460 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
1461 /* PC updated by gen_excp. */
1462 ret = EXIT_PC_UPDATED;
1463 break;
1465 #ifndef CONFIG_USER_ONLY
1466 if (palcode < 0x40) {
1467 /* Privileged PAL code */
1468 if (ctx->mem_idx & 1)
1469 goto invalid_opc;
1470 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1472 #endif
1473 /* Invalid PAL call */
1474 goto invalid_opc;
1475 case 0x01:
1476 /* OPC01 */
1477 goto invalid_opc;
1478 case 0x02:
1479 /* OPC02 */
1480 goto invalid_opc;
1481 case 0x03:
1482 /* OPC03 */
1483 goto invalid_opc;
1484 case 0x04:
1485 /* OPC04 */
1486 goto invalid_opc;
1487 case 0x05:
1488 /* OPC05 */
1489 goto invalid_opc;
1490 case 0x06:
1491 /* OPC06 */
1492 goto invalid_opc;
1493 case 0x07:
1494 /* OPC07 */
1495 goto invalid_opc;
1496 case 0x08:
1497 /* LDA */
1498 if (likely(ra != 31)) {
1499 if (rb != 31)
1500 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1501 else
1502 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1504 break;
1505 case 0x09:
1506 /* LDAH */
1507 if (likely(ra != 31)) {
1508 if (rb != 31)
1509 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1510 else
1511 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1513 break;
1514 case 0x0A:
1515 /* LDBU */
1516 if (!(ctx->amask & AMASK_BWX))
1517 goto invalid_opc;
1518 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1519 break;
1520 case 0x0B:
1521 /* LDQ_U */
1522 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1523 break;
1524 case 0x0C:
1525 /* LDWU */
1526 if (!(ctx->amask & AMASK_BWX))
1527 goto invalid_opc;
1528 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1529 break;
1530 case 0x0D:
1531 /* STW */
1532 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
1533 break;
1534 case 0x0E:
1535 /* STB */
1536 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
1537 break;
1538 case 0x0F:
1539 /* STQ_U */
1540 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
1541 break;
1542 case 0x10:
1543 switch (fn7) {
1544 case 0x00:
1545 /* ADDL */
1546 if (likely(rc != 31)) {
1547 if (ra != 31) {
1548 if (islit) {
1549 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1550 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1551 } else {
1552 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1553 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1555 } else {
1556 if (islit)
1557 tcg_gen_movi_i64(cpu_ir[rc], lit);
1558 else
1559 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1562 break;
1563 case 0x02:
1564 /* S4ADDL */
1565 if (likely(rc != 31)) {
1566 if (ra != 31) {
1567 TCGv tmp = tcg_temp_new();
1568 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1569 if (islit)
1570 tcg_gen_addi_i64(tmp, tmp, lit);
1571 else
1572 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1573 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1574 tcg_temp_free(tmp);
1575 } else {
1576 if (islit)
1577 tcg_gen_movi_i64(cpu_ir[rc], lit);
1578 else
1579 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1582 break;
1583 case 0x09:
1584 /* SUBL */
1585 if (likely(rc != 31)) {
1586 if (ra != 31) {
1587 if (islit)
1588 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1589 else
1590 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1591 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1592 } else {
1593 if (islit)
1594 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1595 else {
1596 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1597 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1600 break;
1601 case 0x0B:
1602 /* S4SUBL */
1603 if (likely(rc != 31)) {
1604 if (ra != 31) {
1605 TCGv tmp = tcg_temp_new();
1606 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1607 if (islit)
1608 tcg_gen_subi_i64(tmp, tmp, lit);
1609 else
1610 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1611 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1612 tcg_temp_free(tmp);
1613 } else {
1614 if (islit)
1615 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1616 else {
1617 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1618 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1622 break;
1623 case 0x0F:
1624 /* CMPBGE */
1625 gen_cmpbge(ra, rb, rc, islit, lit);
1626 break;
1627 case 0x12:
1628 /* S8ADDL */
1629 if (likely(rc != 31)) {
1630 if (ra != 31) {
1631 TCGv tmp = tcg_temp_new();
1632 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1633 if (islit)
1634 tcg_gen_addi_i64(tmp, tmp, lit);
1635 else
1636 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1637 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1638 tcg_temp_free(tmp);
1639 } else {
1640 if (islit)
1641 tcg_gen_movi_i64(cpu_ir[rc], lit);
1642 else
1643 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1646 break;
1647 case 0x1B:
1648 /* S8SUBL */
1649 if (likely(rc != 31)) {
1650 if (ra != 31) {
1651 TCGv tmp = tcg_temp_new();
1652 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1653 if (islit)
1654 tcg_gen_subi_i64(tmp, tmp, lit);
1655 else
1656 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1657 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1658 tcg_temp_free(tmp);
1659 } else {
1660 if (islit)
1661 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1662 else
1663 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1664 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1668 break;
1669 case 0x1D:
1670 /* CMPULT */
1671 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1672 break;
1673 case 0x20:
1674 /* ADDQ */
1675 if (likely(rc != 31)) {
1676 if (ra != 31) {
1677 if (islit)
1678 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1679 else
1680 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1681 } else {
1682 if (islit)
1683 tcg_gen_movi_i64(cpu_ir[rc], lit);
1684 else
1685 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1688 break;
1689 case 0x22:
1690 /* S4ADDQ */
1691 if (likely(rc != 31)) {
1692 if (ra != 31) {
1693 TCGv tmp = tcg_temp_new();
1694 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1695 if (islit)
1696 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1697 else
1698 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1699 tcg_temp_free(tmp);
1700 } else {
1701 if (islit)
1702 tcg_gen_movi_i64(cpu_ir[rc], lit);
1703 else
1704 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1707 break;
1708 case 0x29:
1709 /* SUBQ */
1710 if (likely(rc != 31)) {
1711 if (ra != 31) {
1712 if (islit)
1713 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1714 else
1715 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1716 } else {
1717 if (islit)
1718 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1719 else
1720 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1723 break;
1724 case 0x2B:
1725 /* S4SUBQ */
1726 if (likely(rc != 31)) {
1727 if (ra != 31) {
1728 TCGv tmp = tcg_temp_new();
1729 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1730 if (islit)
1731 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1732 else
1733 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1734 tcg_temp_free(tmp);
1735 } else {
1736 if (islit)
1737 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1738 else
1739 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1742 break;
1743 case 0x2D:
1744 /* CMPEQ */
1745 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1746 break;
1747 case 0x32:
1748 /* S8ADDQ */
1749 if (likely(rc != 31)) {
1750 if (ra != 31) {
1751 TCGv tmp = tcg_temp_new();
1752 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1753 if (islit)
1754 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1755 else
1756 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1757 tcg_temp_free(tmp);
1758 } else {
1759 if (islit)
1760 tcg_gen_movi_i64(cpu_ir[rc], lit);
1761 else
1762 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1765 break;
1766 case 0x3B:
1767 /* S8SUBQ */
1768 if (likely(rc != 31)) {
1769 if (ra != 31) {
1770 TCGv tmp = tcg_temp_new();
1771 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1772 if (islit)
1773 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1774 else
1775 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1776 tcg_temp_free(tmp);
1777 } else {
1778 if (islit)
1779 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1780 else
1781 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1784 break;
1785 case 0x3D:
1786 /* CMPULE */
1787 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1788 break;
1789 case 0x40:
1790 /* ADDL/V */
1791 gen_addlv(ra, rb, rc, islit, lit);
1792 break;
1793 case 0x49:
1794 /* SUBL/V */
1795 gen_sublv(ra, rb, rc, islit, lit);
1796 break;
1797 case 0x4D:
1798 /* CMPLT */
1799 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1800 break;
1801 case 0x60:
1802 /* ADDQ/V */
1803 gen_addqv(ra, rb, rc, islit, lit);
1804 break;
1805 case 0x69:
1806 /* SUBQ/V */
1807 gen_subqv(ra, rb, rc, islit, lit);
1808 break;
1809 case 0x6D:
1810 /* CMPLE */
1811 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1812 break;
1813 default:
1814 goto invalid_opc;
1816 break;
1817 case 0x11:
1818 switch (fn7) {
1819 case 0x00:
1820 /* AND */
1821 if (likely(rc != 31)) {
1822 if (ra == 31)
1823 tcg_gen_movi_i64(cpu_ir[rc], 0);
1824 else if (islit)
1825 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1826 else
1827 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1829 break;
1830 case 0x08:
1831 /* BIC */
1832 if (likely(rc != 31)) {
1833 if (ra != 31) {
1834 if (islit)
1835 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1836 else
1837 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1838 } else
1839 tcg_gen_movi_i64(cpu_ir[rc], 0);
1841 break;
1842 case 0x14:
1843 /* CMOVLBS */
1844 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1845 break;
1846 case 0x16:
1847 /* CMOVLBC */
1848 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1849 break;
1850 case 0x20:
1851 /* BIS */
1852 if (likely(rc != 31)) {
1853 if (ra != 31) {
1854 if (islit)
1855 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1856 else
1857 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1858 } else {
1859 if (islit)
1860 tcg_gen_movi_i64(cpu_ir[rc], lit);
1861 else
1862 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1865 break;
1866 case 0x24:
1867 /* CMOVEQ */
1868 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1869 break;
1870 case 0x26:
1871 /* CMOVNE */
1872 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1873 break;
1874 case 0x28:
1875 /* ORNOT */
1876 if (likely(rc != 31)) {
1877 if (ra != 31) {
1878 if (islit)
1879 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1880 else
1881 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1882 } else {
1883 if (islit)
1884 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1885 else
1886 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1889 break;
1890 case 0x40:
1891 /* XOR */
1892 if (likely(rc != 31)) {
1893 if (ra != 31) {
1894 if (islit)
1895 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1896 else
1897 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1898 } else {
1899 if (islit)
1900 tcg_gen_movi_i64(cpu_ir[rc], lit);
1901 else
1902 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1905 break;
1906 case 0x44:
1907 /* CMOVLT */
1908 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1909 break;
1910 case 0x46:
1911 /* CMOVGE */
1912 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1913 break;
1914 case 0x48:
1915 /* EQV */
1916 if (likely(rc != 31)) {
1917 if (ra != 31) {
1918 if (islit)
1919 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1920 else
1921 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1922 } else {
1923 if (islit)
1924 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1925 else
1926 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1929 break;
1930 case 0x61:
1931 /* AMASK */
1932 if (likely(rc != 31)) {
1933 if (islit)
1934 tcg_gen_movi_i64(cpu_ir[rc], lit);
1935 else
1936 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1937 switch (ctx->env->implver) {
1938 case IMPLVER_2106x:
1939 /* EV4, EV45, LCA, LCA45 & EV5 */
1940 break;
1941 case IMPLVER_21164:
1942 case IMPLVER_21264:
1943 case IMPLVER_21364:
1944 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1945 ~(uint64_t)ctx->amask);
1946 break;
1949 break;
1950 case 0x64:
1951 /* CMOVLE */
1952 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1953 break;
1954 case 0x66:
1955 /* CMOVGT */
1956 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1957 break;
1958 case 0x6C:
1959 /* IMPLVER */
1960 if (rc != 31)
1961 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1962 break;
1963 default:
1964 goto invalid_opc;
1966 break;
1967 case 0x12:
1968 switch (fn7) {
1969 case 0x02:
1970 /* MSKBL */
1971 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
1972 break;
1973 case 0x06:
1974 /* EXTBL */
1975 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1976 break;
1977 case 0x0B:
1978 /* INSBL */
1979 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
1980 break;
1981 case 0x12:
1982 /* MSKWL */
1983 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
1984 break;
1985 case 0x16:
1986 /* EXTWL */
1987 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1988 break;
1989 case 0x1B:
1990 /* INSWL */
1991 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
1992 break;
1993 case 0x22:
1994 /* MSKLL */
1995 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
1996 break;
1997 case 0x26:
1998 /* EXTLL */
1999 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
2000 break;
2001 case 0x2B:
2002 /* INSLL */
2003 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
2004 break;
2005 case 0x30:
2006 /* ZAP */
2007 gen_zap(ra, rb, rc, islit, lit);
2008 break;
2009 case 0x31:
2010 /* ZAPNOT */
2011 gen_zapnot(ra, rb, rc, islit, lit);
2012 break;
2013 case 0x32:
2014 /* MSKQL */
2015 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
2016 break;
2017 case 0x34:
2018 /* SRL */
2019 if (likely(rc != 31)) {
2020 if (ra != 31) {
2021 if (islit)
2022 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2023 else {
2024 TCGv shift = tcg_temp_new();
2025 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2026 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
2027 tcg_temp_free(shift);
2029 } else
2030 tcg_gen_movi_i64(cpu_ir[rc], 0);
2032 break;
2033 case 0x36:
2034 /* EXTQL */
2035 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
2036 break;
2037 case 0x39:
2038 /* SLL */
2039 if (likely(rc != 31)) {
2040 if (ra != 31) {
2041 if (islit)
2042 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2043 else {
2044 TCGv shift = tcg_temp_new();
2045 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2046 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
2047 tcg_temp_free(shift);
2049 } else
2050 tcg_gen_movi_i64(cpu_ir[rc], 0);
2052 break;
2053 case 0x3B:
2054 /* INSQL */
2055 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
2056 break;
2057 case 0x3C:
2058 /* SRA */
2059 if (likely(rc != 31)) {
2060 if (ra != 31) {
2061 if (islit)
2062 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
2063 else {
2064 TCGv shift = tcg_temp_new();
2065 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
2066 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
2067 tcg_temp_free(shift);
2069 } else
2070 tcg_gen_movi_i64(cpu_ir[rc], 0);
2072 break;
2073 case 0x52:
2074 /* MSKWH */
2075 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
2076 break;
2077 case 0x57:
2078 /* INSWH */
2079 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
2080 break;
2081 case 0x5A:
2082 /* EXTWH */
2083 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
2084 break;
2085 case 0x62:
2086 /* MSKLH */
2087 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
2088 break;
2089 case 0x67:
2090 /* INSLH */
2091 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
2092 break;
2093 case 0x6A:
2094 /* EXTLH */
2095 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
2096 break;
2097 case 0x72:
2098 /* MSKQH */
2099 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
2100 break;
2101 case 0x77:
2102 /* INSQH */
2103 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
2104 break;
2105 case 0x7A:
2106 /* EXTQH */
2107 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
2108 break;
2109 default:
2110 goto invalid_opc;
2112 break;
2113 case 0x13:
2114 switch (fn7) {
2115 case 0x00:
2116 /* MULL */
2117 if (likely(rc != 31)) {
2118 if (ra == 31)
2119 tcg_gen_movi_i64(cpu_ir[rc], 0);
2120 else {
2121 if (islit)
2122 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2123 else
2124 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2125 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
2128 break;
2129 case 0x20:
2130 /* MULQ */
2131 if (likely(rc != 31)) {
2132 if (ra == 31)
2133 tcg_gen_movi_i64(cpu_ir[rc], 0);
2134 else if (islit)
2135 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
2136 else
2137 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
2139 break;
2140 case 0x30:
2141 /* UMULH */
2142 gen_umulh(ra, rb, rc, islit, lit);
2143 break;
2144 case 0x40:
2145 /* MULL/V */
2146 gen_mullv(ra, rb, rc, islit, lit);
2147 break;
2148 case 0x60:
2149 /* MULQ/V */
2150 gen_mulqv(ra, rb, rc, islit, lit);
2151 break;
2152 default:
2153 goto invalid_opc;
2155 break;
2156 case 0x14:
2157 switch (fpfn) { /* fn11 & 0x3F */
2158 case 0x04:
2159 /* ITOFS */
2160 if (!(ctx->amask & AMASK_FIX))
2161 goto invalid_opc;
2162 if (likely(rc != 31)) {
2163 if (ra != 31) {
2164 TCGv_i32 tmp = tcg_temp_new_i32();
2165 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2166 gen_helper_memory_to_s(cpu_fir[rc], tmp);
2167 tcg_temp_free_i32(tmp);
2168 } else
2169 tcg_gen_movi_i64(cpu_fir[rc], 0);
2171 break;
2172 case 0x0A:
2173 /* SQRTF */
2174 if (!(ctx->amask & AMASK_FIX))
2175 goto invalid_opc;
2176 gen_fsqrtf(rb, rc);
2177 break;
2178 case 0x0B:
2179 /* SQRTS */
2180 if (!(ctx->amask & AMASK_FIX))
2181 goto invalid_opc;
2182 gen_fsqrts(ctx, rb, rc, fn11);
2183 break;
2184 case 0x14:
2185 /* ITOFF */
2186 if (!(ctx->amask & AMASK_FIX))
2187 goto invalid_opc;
2188 if (likely(rc != 31)) {
2189 if (ra != 31) {
2190 TCGv_i32 tmp = tcg_temp_new_i32();
2191 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
2192 gen_helper_memory_to_f(cpu_fir[rc], tmp);
2193 tcg_temp_free_i32(tmp);
2194 } else
2195 tcg_gen_movi_i64(cpu_fir[rc], 0);
2197 break;
2198 case 0x24:
2199 /* ITOFT */
2200 if (!(ctx->amask & AMASK_FIX))
2201 goto invalid_opc;
2202 if (likely(rc != 31)) {
2203 if (ra != 31)
2204 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
2205 else
2206 tcg_gen_movi_i64(cpu_fir[rc], 0);
2208 break;
2209 case 0x2A:
2210 /* SQRTG */
2211 if (!(ctx->amask & AMASK_FIX))
2212 goto invalid_opc;
2213 gen_fsqrtg(rb, rc);
2214 break;
2215 case 0x02B:
2216 /* SQRTT */
2217 if (!(ctx->amask & AMASK_FIX))
2218 goto invalid_opc;
2219 gen_fsqrtt(ctx, rb, rc, fn11);
2220 break;
2221 default:
2222 goto invalid_opc;
2224 break;
2225 case 0x15:
2226 /* VAX floating point */
2227 /* XXX: rounding mode and trap are ignored (!) */
2228 switch (fpfn) { /* fn11 & 0x3F */
2229 case 0x00:
2230 /* ADDF */
2231 gen_faddf(ra, rb, rc);
2232 break;
2233 case 0x01:
2234 /* SUBF */
2235 gen_fsubf(ra, rb, rc);
2236 break;
2237 case 0x02:
2238 /* MULF */
2239 gen_fmulf(ra, rb, rc);
2240 break;
2241 case 0x03:
2242 /* DIVF */
2243 gen_fdivf(ra, rb, rc);
2244 break;
2245 case 0x1E:
2246 /* CVTDG */
2247 #if 0 // TODO
2248 gen_fcvtdg(rb, rc);
2249 #else
2250 goto invalid_opc;
2251 #endif
2252 break;
2253 case 0x20:
2254 /* ADDG */
2255 gen_faddg(ra, rb, rc);
2256 break;
2257 case 0x21:
2258 /* SUBG */
2259 gen_fsubg(ra, rb, rc);
2260 break;
2261 case 0x22:
2262 /* MULG */
2263 gen_fmulg(ra, rb, rc);
2264 break;
2265 case 0x23:
2266 /* DIVG */
2267 gen_fdivg(ra, rb, rc);
2268 break;
2269 case 0x25:
2270 /* CMPGEQ */
2271 gen_fcmpgeq(ra, rb, rc);
2272 break;
2273 case 0x26:
2274 /* CMPGLT */
2275 gen_fcmpglt(ra, rb, rc);
2276 break;
2277 case 0x27:
2278 /* CMPGLE */
2279 gen_fcmpgle(ra, rb, rc);
2280 break;
2281 case 0x2C:
2282 /* CVTGF */
2283 gen_fcvtgf(rb, rc);
2284 break;
2285 case 0x2D:
2286 /* CVTGD */
2287 #if 0 // TODO
2288 gen_fcvtgd(rb, rc);
2289 #else
2290 goto invalid_opc;
2291 #endif
2292 break;
2293 case 0x2F:
2294 /* CVTGQ */
2295 gen_fcvtgq(rb, rc);
2296 break;
2297 case 0x3C:
2298 /* CVTQF */
2299 gen_fcvtqf(rb, rc);
2300 break;
2301 case 0x3E:
2302 /* CVTQG */
2303 gen_fcvtqg(rb, rc);
2304 break;
2305 default:
2306 goto invalid_opc;
2308 break;
2309 case 0x16:
2310 /* IEEE floating-point */
2311 switch (fpfn) { /* fn11 & 0x3F */
2312 case 0x00:
2313 /* ADDS */
2314 gen_fadds(ctx, ra, rb, rc, fn11);
2315 break;
2316 case 0x01:
2317 /* SUBS */
2318 gen_fsubs(ctx, ra, rb, rc, fn11);
2319 break;
2320 case 0x02:
2321 /* MULS */
2322 gen_fmuls(ctx, ra, rb, rc, fn11);
2323 break;
2324 case 0x03:
2325 /* DIVS */
2326 gen_fdivs(ctx, ra, rb, rc, fn11);
2327 break;
2328 case 0x20:
2329 /* ADDT */
2330 gen_faddt(ctx, ra, rb, rc, fn11);
2331 break;
2332 case 0x21:
2333 /* SUBT */
2334 gen_fsubt(ctx, ra, rb, rc, fn11);
2335 break;
2336 case 0x22:
2337 /* MULT */
2338 gen_fmult(ctx, ra, rb, rc, fn11);
2339 break;
2340 case 0x23:
2341 /* DIVT */
2342 gen_fdivt(ctx, ra, rb, rc, fn11);
2343 break;
2344 case 0x24:
2345 /* CMPTUN */
2346 gen_fcmptun(ctx, ra, rb, rc, fn11);
2347 break;
2348 case 0x25:
2349 /* CMPTEQ */
2350 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2351 break;
2352 case 0x26:
2353 /* CMPTLT */
2354 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2355 break;
2356 case 0x27:
2357 /* CMPTLE */
2358 gen_fcmptle(ctx, ra, rb, rc, fn11);
2359 break;
2360 case 0x2C:
2361 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2362 /* CVTST */
2363 gen_fcvtst(ctx, rb, rc, fn11);
2364 } else {
2365 /* CVTTS */
2366 gen_fcvtts(ctx, rb, rc, fn11);
2368 break;
2369 case 0x2F:
2370 /* CVTTQ */
2371 gen_fcvttq(ctx, rb, rc, fn11);
2372 break;
2373 case 0x3C:
2374 /* CVTQS */
2375 gen_fcvtqs(ctx, rb, rc, fn11);
2376 break;
2377 case 0x3E:
2378 /* CVTQT */
2379 gen_fcvtqt(ctx, rb, rc, fn11);
2380 break;
2381 default:
2382 goto invalid_opc;
2384 break;
2385 case 0x17:
2386 switch (fn11) {
2387 case 0x010:
2388 /* CVTLQ */
2389 gen_fcvtlq(rb, rc);
2390 break;
2391 case 0x020:
2392 if (likely(rc != 31)) {
2393 if (ra == rb) {
2394 /* FMOV */
2395 if (ra == 31)
2396 tcg_gen_movi_i64(cpu_fir[rc], 0);
2397 else
2398 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2399 } else {
2400 /* CPYS */
2401 gen_fcpys(ra, rb, rc);
2404 break;
2405 case 0x021:
2406 /* CPYSN */
2407 gen_fcpysn(ra, rb, rc);
2408 break;
2409 case 0x022:
2410 /* CPYSE */
2411 gen_fcpyse(ra, rb, rc);
2412 break;
2413 case 0x024:
2414 /* MT_FPCR */
2415 if (likely(ra != 31))
2416 gen_helper_store_fpcr(cpu_fir[ra]);
2417 else {
2418 TCGv tmp = tcg_const_i64(0);
2419 gen_helper_store_fpcr(tmp);
2420 tcg_temp_free(tmp);
2422 break;
2423 case 0x025:
2424 /* MF_FPCR */
2425 if (likely(ra != 31))
2426 gen_helper_load_fpcr(cpu_fir[ra]);
2427 break;
2428 case 0x02A:
2429 /* FCMOVEQ */
2430 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2431 break;
2432 case 0x02B:
2433 /* FCMOVNE */
2434 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2435 break;
2436 case 0x02C:
2437 /* FCMOVLT */
2438 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2439 break;
2440 case 0x02D:
2441 /* FCMOVGE */
2442 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2443 break;
2444 case 0x02E:
2445 /* FCMOVLE */
2446 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2447 break;
2448 case 0x02F:
2449 /* FCMOVGT */
2450 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2451 break;
2452 case 0x030:
2453 /* CVTQL */
2454 gen_fcvtql(rb, rc);
2455 break;
2456 case 0x130:
2457 /* CVTQL/V */
2458 case 0x530:
2459 /* CVTQL/SV */
2460 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2461 /v doesn't do. The only thing I can think is that /sv is a
2462 valid instruction merely for completeness in the ISA. */
2463 gen_fcvtql_v(ctx, rb, rc);
2464 break;
2465 default:
2466 goto invalid_opc;
2468 break;
2469 case 0x18:
2470 switch ((uint16_t)disp16) {
2471 case 0x0000:
2472 /* TRAPB */
2473 /* No-op. */
2474 break;
2475 case 0x0400:
2476 /* EXCB */
2477 /* No-op. */
2478 break;
2479 case 0x4000:
2480 /* MB */
2481 /* No-op */
2482 break;
2483 case 0x4400:
2484 /* WMB */
2485 /* No-op */
2486 break;
2487 case 0x8000:
2488 /* FETCH */
2489 /* No-op */
2490 break;
2491 case 0xA000:
2492 /* FETCH_M */
2493 /* No-op */
2494 break;
2495 case 0xC000:
2496 /* RPCC */
2497 if (ra != 31)
2498 gen_helper_load_pcc(cpu_ir[ra]);
2499 break;
2500 case 0xE000:
2501 /* RC */
2502 gen_rx(ra, 0);
2503 break;
2504 case 0xE800:
2505 /* ECB */
2506 break;
2507 case 0xF000:
2508 /* RS */
2509 gen_rx(ra, 1);
2510 break;
2511 case 0xF800:
2512 /* WH64 */
2513 /* No-op */
2514 break;
2515 default:
2516 goto invalid_opc;
2518 break;
2519 case 0x19:
2520 /* HW_MFPR (PALcode) */
2521 #if defined (CONFIG_USER_ONLY)
2522 goto invalid_opc;
2523 #else
2524 if (!ctx->pal_mode)
2525 goto invalid_opc;
2526 if (ra != 31) {
2527 TCGv tmp = tcg_const_i32(insn & 0xFF);
2528 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
2529 tcg_temp_free(tmp);
2531 break;
2532 #endif
2533 case 0x1A:
2534 if (rb != 31)
2535 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2536 else
2537 tcg_gen_movi_i64(cpu_pc, 0);
2538 if (ra != 31)
2539 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2540 /* Those four jumps only differ by the branch prediction hint */
2541 ret = EXIT_PC_UPDATED;
2542 break;
2543 case 0x1B:
2544 /* HW_LD (PALcode) */
2545 #if defined (CONFIG_USER_ONLY)
2546 goto invalid_opc;
2547 #else
2548 if (!ctx->pal_mode)
2549 goto invalid_opc;
2550 if (ra != 31) {
2551 TCGv addr = tcg_temp_new();
2552 if (rb != 31)
2553 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2554 else
2555 tcg_gen_movi_i64(addr, disp12);
2556 switch ((insn >> 12) & 0xF) {
2557 case 0x0:
2558 /* Longword physical access (hw_ldl/p) */
2559 gen_helper_ldl_raw(cpu_ir[ra], addr);
2560 break;
2561 case 0x1:
2562 /* Quadword physical access (hw_ldq/p) */
2563 gen_helper_ldq_raw(cpu_ir[ra], addr);
2564 break;
2565 case 0x2:
2566 /* Longword physical access with lock (hw_ldl_l/p) */
2567 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
2568 break;
2569 case 0x3:
2570 /* Quadword physical access with lock (hw_ldq_l/p) */
2571 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
2572 break;
2573 case 0x4:
2574 /* Longword virtual PTE fetch (hw_ldl/v) */
2575 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2576 break;
2577 case 0x5:
2578 /* Quadword virtual PTE fetch (hw_ldq/v) */
2579 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2580 break;
2581 case 0x6:
2582 /* Incpu_ir[ra]id */
2583 goto invalid_opc;
2584 case 0x7:
2585 /* Incpu_ir[ra]id */
2586 goto invalid_opc;
2587 case 0x8:
2588 /* Longword virtual access (hw_ldl) */
2589 gen_helper_st_virt_to_phys(addr, addr);
2590 gen_helper_ldl_raw(cpu_ir[ra], addr);
2591 break;
2592 case 0x9:
2593 /* Quadword virtual access (hw_ldq) */
2594 gen_helper_st_virt_to_phys(addr, addr);
2595 gen_helper_ldq_raw(cpu_ir[ra], addr);
2596 break;
2597 case 0xA:
2598 /* Longword virtual access with protection check (hw_ldl/w) */
2599 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2600 break;
2601 case 0xB:
2602 /* Quadword virtual access with protection check (hw_ldq/w) */
2603 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2604 break;
2605 case 0xC:
2606 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2607 gen_helper_set_alt_mode();
2608 gen_helper_st_virt_to_phys(addr, addr);
2609 gen_helper_ldl_raw(cpu_ir[ra], addr);
2610 gen_helper_restore_mode();
2611 break;
2612 case 0xD:
2613 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2614 gen_helper_set_alt_mode();
2615 gen_helper_st_virt_to_phys(addr, addr);
2616 gen_helper_ldq_raw(cpu_ir[ra], addr);
2617 gen_helper_restore_mode();
2618 break;
2619 case 0xE:
2620 /* Longword virtual access with alternate access mode and
2621 * protection checks (hw_ldl/wa)
2623 gen_helper_set_alt_mode();
2624 gen_helper_ldl_data(cpu_ir[ra], addr);
2625 gen_helper_restore_mode();
2626 break;
2627 case 0xF:
2628 /* Quadword virtual access with alternate access mode and
2629 * protection checks (hw_ldq/wa)
2631 gen_helper_set_alt_mode();
2632 gen_helper_ldq_data(cpu_ir[ra], addr);
2633 gen_helper_restore_mode();
2634 break;
2636 tcg_temp_free(addr);
2638 break;
2639 #endif
2640 case 0x1C:
2641 switch (fn7) {
2642 case 0x00:
2643 /* SEXTB */
2644 if (!(ctx->amask & AMASK_BWX))
2645 goto invalid_opc;
2646 if (likely(rc != 31)) {
2647 if (islit)
2648 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2649 else
2650 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2652 break;
2653 case 0x01:
2654 /* SEXTW */
2655 if (!(ctx->amask & AMASK_BWX))
2656 goto invalid_opc;
2657 if (likely(rc != 31)) {
2658 if (islit)
2659 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2660 else
2661 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2663 break;
2664 case 0x30:
2665 /* CTPOP */
2666 if (!(ctx->amask & AMASK_CIX))
2667 goto invalid_opc;
2668 if (likely(rc != 31)) {
2669 if (islit)
2670 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2671 else
2672 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2674 break;
2675 case 0x31:
2676 /* PERR */
2677 if (!(ctx->amask & AMASK_MVI))
2678 goto invalid_opc;
2679 gen_perr(ra, rb, rc, islit, lit);
2680 break;
2681 case 0x32:
2682 /* CTLZ */
2683 if (!(ctx->amask & AMASK_CIX))
2684 goto invalid_opc;
2685 if (likely(rc != 31)) {
2686 if (islit)
2687 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2688 else
2689 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2691 break;
2692 case 0x33:
2693 /* CTTZ */
2694 if (!(ctx->amask & AMASK_CIX))
2695 goto invalid_opc;
2696 if (likely(rc != 31)) {
2697 if (islit)
2698 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2699 else
2700 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2702 break;
2703 case 0x34:
2704 /* UNPKBW */
2705 if (!(ctx->amask & AMASK_MVI))
2706 goto invalid_opc;
2707 if (real_islit || ra != 31)
2708 goto invalid_opc;
2709 gen_unpkbw (rb, rc);
2710 break;
2711 case 0x35:
2712 /* UNPKBL */
2713 if (!(ctx->amask & AMASK_MVI))
2714 goto invalid_opc;
2715 if (real_islit || ra != 31)
2716 goto invalid_opc;
2717 gen_unpkbl (rb, rc);
2718 break;
2719 case 0x36:
2720 /* PKWB */
2721 if (!(ctx->amask & AMASK_MVI))
2722 goto invalid_opc;
2723 if (real_islit || ra != 31)
2724 goto invalid_opc;
2725 gen_pkwb (rb, rc);
2726 break;
2727 case 0x37:
2728 /* PKLB */
2729 if (!(ctx->amask & AMASK_MVI))
2730 goto invalid_opc;
2731 if (real_islit || ra != 31)
2732 goto invalid_opc;
2733 gen_pklb (rb, rc);
2734 break;
2735 case 0x38:
2736 /* MINSB8 */
2737 if (!(ctx->amask & AMASK_MVI))
2738 goto invalid_opc;
2739 gen_minsb8 (ra, rb, rc, islit, lit);
2740 break;
2741 case 0x39:
2742 /* MINSW4 */
2743 if (!(ctx->amask & AMASK_MVI))
2744 goto invalid_opc;
2745 gen_minsw4 (ra, rb, rc, islit, lit);
2746 break;
2747 case 0x3A:
2748 /* MINUB8 */
2749 if (!(ctx->amask & AMASK_MVI))
2750 goto invalid_opc;
2751 gen_minub8 (ra, rb, rc, islit, lit);
2752 break;
2753 case 0x3B:
2754 /* MINUW4 */
2755 if (!(ctx->amask & AMASK_MVI))
2756 goto invalid_opc;
2757 gen_minuw4 (ra, rb, rc, islit, lit);
2758 break;
2759 case 0x3C:
2760 /* MAXUB8 */
2761 if (!(ctx->amask & AMASK_MVI))
2762 goto invalid_opc;
2763 gen_maxub8 (ra, rb, rc, islit, lit);
2764 break;
2765 case 0x3D:
2766 /* MAXUW4 */
2767 if (!(ctx->amask & AMASK_MVI))
2768 goto invalid_opc;
2769 gen_maxuw4 (ra, rb, rc, islit, lit);
2770 break;
2771 case 0x3E:
2772 /* MAXSB8 */
2773 if (!(ctx->amask & AMASK_MVI))
2774 goto invalid_opc;
2775 gen_maxsb8 (ra, rb, rc, islit, lit);
2776 break;
2777 case 0x3F:
2778 /* MAXSW4 */
2779 if (!(ctx->amask & AMASK_MVI))
2780 goto invalid_opc;
2781 gen_maxsw4 (ra, rb, rc, islit, lit);
2782 break;
2783 case 0x70:
2784 /* FTOIT */
2785 if (!(ctx->amask & AMASK_FIX))
2786 goto invalid_opc;
2787 if (likely(rc != 31)) {
2788 if (ra != 31)
2789 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2790 else
2791 tcg_gen_movi_i64(cpu_ir[rc], 0);
2793 break;
2794 case 0x78:
2795 /* FTOIS */
2796 if (!(ctx->amask & AMASK_FIX))
2797 goto invalid_opc;
2798 if (rc != 31) {
2799 TCGv_i32 tmp1 = tcg_temp_new_i32();
2800 if (ra != 31)
2801 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2802 else {
2803 TCGv tmp2 = tcg_const_i64(0);
2804 gen_helper_s_to_memory(tmp1, tmp2);
2805 tcg_temp_free(tmp2);
2807 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2808 tcg_temp_free_i32(tmp1);
2810 break;
2811 default:
2812 goto invalid_opc;
2814 break;
2815 case 0x1D:
2816 /* HW_MTPR (PALcode) */
2817 #if defined (CONFIG_USER_ONLY)
2818 goto invalid_opc;
2819 #else
2820 if (!ctx->pal_mode)
2821 goto invalid_opc;
2822 else {
2823 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2824 if (ra != 31)
2825 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2826 else {
2827 TCGv tmp2 = tcg_const_i64(0);
2828 gen_helper_mtpr(tmp1, tmp2);
2829 tcg_temp_free(tmp2);
2831 tcg_temp_free(tmp1);
2832 ret = EXIT_PC_STALE;
2834 break;
2835 #endif
2836 case 0x1E:
2837 /* HW_REI (PALcode) */
2838 #if defined (CONFIG_USER_ONLY)
2839 goto invalid_opc;
2840 #else
2841 if (!ctx->pal_mode)
2842 goto invalid_opc;
2843 if (rb == 31) {
2844 /* "Old" alpha */
2845 gen_helper_hw_rei();
2846 } else {
2847 TCGv tmp;
2849 if (ra != 31) {
2850 tmp = tcg_temp_new();
2851 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2852 } else
2853 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2854 gen_helper_hw_ret(tmp);
2855 tcg_temp_free(tmp);
2857 ret = EXIT_PC_UPDATED;
2858 break;
2859 #endif
2860 case 0x1F:
2861 /* HW_ST (PALcode) */
2862 #if defined (CONFIG_USER_ONLY)
2863 goto invalid_opc;
2864 #else
2865 if (!ctx->pal_mode)
2866 goto invalid_opc;
2867 else {
2868 TCGv addr, val;
2869 addr = tcg_temp_new();
2870 if (rb != 31)
2871 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2872 else
2873 tcg_gen_movi_i64(addr, disp12);
2874 if (ra != 31)
2875 val = cpu_ir[ra];
2876 else {
2877 val = tcg_temp_new();
2878 tcg_gen_movi_i64(val, 0);
2880 switch ((insn >> 12) & 0xF) {
2881 case 0x0:
2882 /* Longword physical access */
2883 gen_helper_stl_raw(val, addr);
2884 break;
2885 case 0x1:
2886 /* Quadword physical access */
2887 gen_helper_stq_raw(val, addr);
2888 break;
2889 case 0x2:
2890 /* Longword physical access with lock */
2891 gen_helper_stl_c_raw(val, val, addr);
2892 break;
2893 case 0x3:
2894 /* Quadword physical access with lock */
2895 gen_helper_stq_c_raw(val, val, addr);
2896 break;
2897 case 0x4:
2898 /* Longword virtual access */
2899 gen_helper_st_virt_to_phys(addr, addr);
2900 gen_helper_stl_raw(val, addr);
2901 break;
2902 case 0x5:
2903 /* Quadword virtual access */
2904 gen_helper_st_virt_to_phys(addr, addr);
2905 gen_helper_stq_raw(val, addr);
2906 break;
2907 case 0x6:
2908 /* Invalid */
2909 goto invalid_opc;
2910 case 0x7:
2911 /* Invalid */
2912 goto invalid_opc;
2913 case 0x8:
2914 /* Invalid */
2915 goto invalid_opc;
2916 case 0x9:
2917 /* Invalid */
2918 goto invalid_opc;
2919 case 0xA:
2920 /* Invalid */
2921 goto invalid_opc;
2922 case 0xB:
2923 /* Invalid */
2924 goto invalid_opc;
2925 case 0xC:
2926 /* Longword virtual access with alternate access mode */
2927 gen_helper_set_alt_mode();
2928 gen_helper_st_virt_to_phys(addr, addr);
2929 gen_helper_stl_raw(val, addr);
2930 gen_helper_restore_mode();
2931 break;
2932 case 0xD:
2933 /* Quadword virtual access with alternate access mode */
2934 gen_helper_set_alt_mode();
2935 gen_helper_st_virt_to_phys(addr, addr);
2936 gen_helper_stl_raw(val, addr);
2937 gen_helper_restore_mode();
2938 break;
2939 case 0xE:
2940 /* Invalid */
2941 goto invalid_opc;
2942 case 0xF:
2943 /* Invalid */
2944 goto invalid_opc;
2946 if (ra == 31)
2947 tcg_temp_free(val);
2948 tcg_temp_free(addr);
2950 break;
2951 #endif
2952 case 0x20:
2953 /* LDF */
2954 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2955 break;
2956 case 0x21:
2957 /* LDG */
2958 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2959 break;
2960 case 0x22:
2961 /* LDS */
2962 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2963 break;
2964 case 0x23:
2965 /* LDT */
2966 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2967 break;
2968 case 0x24:
2969 /* STF */
2970 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2971 break;
2972 case 0x25:
2973 /* STG */
2974 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2975 break;
2976 case 0x26:
2977 /* STS */
2978 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2979 break;
2980 case 0x27:
2981 /* STT */
2982 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2983 break;
2984 case 0x28:
2985 /* LDL */
2986 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2987 break;
2988 case 0x29:
2989 /* LDQ */
2990 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2991 break;
2992 case 0x2A:
2993 /* LDL_L */
2994 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2995 break;
2996 case 0x2B:
2997 /* LDQ_L */
2998 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2999 break;
3000 case 0x2C:
3001 /* STL */
3002 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
3003 break;
3004 case 0x2D:
3005 /* STQ */
3006 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
3007 break;
3008 case 0x2E:
3009 /* STL_C */
3010 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
3011 break;
3012 case 0x2F:
3013 /* STQ_C */
3014 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
3015 break;
3016 case 0x30:
3017 /* BR */
3018 ret = gen_bdirect(ctx, ra, disp21);
3019 break;
3020 case 0x31: /* FBEQ */
3021 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
3022 break;
3023 case 0x32: /* FBLT */
3024 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
3025 break;
3026 case 0x33: /* FBLE */
3027 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
3028 break;
3029 case 0x34:
3030 /* BSR */
3031 ret = gen_bdirect(ctx, ra, disp21);
3032 break;
3033 case 0x35: /* FBNE */
3034 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
3035 break;
3036 case 0x36: /* FBGE */
3037 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
3038 break;
3039 case 0x37: /* FBGT */
3040 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
3041 break;
3042 case 0x38:
3043 /* BLBC */
3044 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
3045 break;
3046 case 0x39:
3047 /* BEQ */
3048 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
3049 break;
3050 case 0x3A:
3051 /* BLT */
3052 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
3053 break;
3054 case 0x3B:
3055 /* BLE */
3056 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
3057 break;
3058 case 0x3C:
3059 /* BLBS */
3060 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
3061 break;
3062 case 0x3D:
3063 /* BNE */
3064 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
3065 break;
3066 case 0x3E:
3067 /* BGE */
3068 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
3069 break;
3070 case 0x3F:
3071 /* BGT */
3072 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
3073 break;
3074 invalid_opc:
3075 gen_invalid(ctx);
3076 /* PC updated by gen_excp. */
3077 ret = EXIT_PC_UPDATED;
3078 break;
3081 return ret;
3084 static inline void gen_intermediate_code_internal(CPUState *env,
3085 TranslationBlock *tb,
3086 int search_pc)
3088 DisasContext ctx, *ctxp = &ctx;
3089 target_ulong pc_start;
3090 uint32_t insn;
3091 uint16_t *gen_opc_end;
3092 CPUBreakpoint *bp;
3093 int j, lj = -1;
3094 ExitStatus ret;
3095 int num_insns;
3096 int max_insns;
3098 pc_start = tb->pc;
3099 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
3101 ctx.tb = tb;
3102 ctx.env = env;
3103 ctx.pc = pc_start;
3104 ctx.amask = env->amask;
3105 #if defined (CONFIG_USER_ONLY)
3106 ctx.mem_idx = 0;
3107 #else
3108 ctx.mem_idx = ((env->ps >> 3) & 3);
3109 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
3110 #endif
3112 /* ??? Every TB begins with unset rounding mode, to be initialized on
3113 the first fp insn of the TB. Alternately we could define a proper
3114 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3115 to reset the FP_STATUS to that default at the end of any TB that
3116 changes the default. We could even (gasp) dynamiclly figure out
3117 what default would be most efficient given the running program. */
3118 ctx.tb_rm = -1;
3119 /* Similarly for flush-to-zero. */
3120 ctx.tb_ftz = -1;
3122 num_insns = 0;
3123 max_insns = tb->cflags & CF_COUNT_MASK;
3124 if (max_insns == 0)
3125 max_insns = CF_COUNT_MASK;
3127 gen_icount_start();
3128 do {
3129 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3130 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3131 if (bp->pc == ctx.pc) {
3132 gen_excp(&ctx, EXCP_DEBUG, 0);
3133 break;
3137 if (search_pc) {
3138 j = gen_opc_ptr - gen_opc_buf;
3139 if (lj < j) {
3140 lj++;
3141 while (lj < j)
3142 gen_opc_instr_start[lj++] = 0;
3144 gen_opc_pc[lj] = ctx.pc;
3145 gen_opc_instr_start[lj] = 1;
3146 gen_opc_icount[lj] = num_insns;
3148 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3149 gen_io_start();
3150 insn = ldl_code(ctx.pc);
3151 num_insns++;
3153 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
3154 tcg_gen_debug_insn_start(ctx.pc);
3157 ctx.pc += 4;
3158 ret = translate_one(ctxp, insn);
3160 if (ret == NO_EXIT) {
3161 /* If we reach a page boundary, are single stepping,
3162 or exhaust instruction count, stop generation. */
3163 if (env->singlestep_enabled) {
3164 gen_excp(&ctx, EXCP_DEBUG, 0);
3165 ret = EXIT_PC_UPDATED;
3166 } else if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0
3167 || gen_opc_ptr >= gen_opc_end
3168 || num_insns >= max_insns
3169 || singlestep) {
3170 ret = EXIT_PC_STALE;
3173 } while (ret == NO_EXIT);
3175 if (tb->cflags & CF_LAST_IO) {
3176 gen_io_end();
3179 switch (ret) {
3180 case EXIT_GOTO_TB:
3181 break;
3182 case EXIT_PC_STALE:
3183 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3184 /* FALLTHRU */
3185 case EXIT_PC_UPDATED:
3186 tcg_gen_exit_tb(0);
3187 break;
3188 default:
3189 abort();
3192 gen_icount_end(tb, num_insns);
3193 *gen_opc_ptr = INDEX_op_end;
3194 if (search_pc) {
3195 j = gen_opc_ptr - gen_opc_buf;
3196 lj++;
3197 while (lj <= j)
3198 gen_opc_instr_start[lj++] = 0;
3199 } else {
3200 tb->size = ctx.pc - pc_start;
3201 tb->icount = num_insns;
3204 #ifdef DEBUG_DISAS
3205 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3206 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3207 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3208 qemu_log("\n");
3210 #endif
3213 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3215 gen_intermediate_code_internal(env, tb, 0);
3218 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3220 gen_intermediate_code_internal(env, tb, 1);
3223 struct cpu_def_t {
3224 const char *name;
3225 int implver, amask;
3228 static const struct cpu_def_t cpu_defs[] = {
3229 { "ev4", IMPLVER_2106x, 0 },
3230 { "ev5", IMPLVER_21164, 0 },
3231 { "ev56", IMPLVER_21164, AMASK_BWX },
3232 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3233 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3234 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3235 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3236 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3237 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3238 { "21064", IMPLVER_2106x, 0 },
3239 { "21164", IMPLVER_21164, 0 },
3240 { "21164a", IMPLVER_21164, AMASK_BWX },
3241 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3242 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3243 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3244 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3247 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3249 CPUAlphaState *env;
3250 int implver, amask, i, max;
3252 env = qemu_mallocz(sizeof(CPUAlphaState));
3253 cpu_exec_init(env);
3254 alpha_translate_init();
3255 tlb_flush(env, 1);
3257 /* Default to ev67; no reason not to emulate insns by default. */
3258 implver = IMPLVER_21264;
3259 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3260 | AMASK_TRAP | AMASK_PREFETCH);
3262 max = ARRAY_SIZE(cpu_defs);
3263 for (i = 0; i < max; i++) {
3264 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3265 implver = cpu_defs[i].implver;
3266 amask = cpu_defs[i].amask;
3267 break;
3270 env->implver = implver;
3271 env->amask = amask;
3273 env->ps = 0x1F00;
3274 #if defined (CONFIG_USER_ONLY)
3275 env->ps |= 1 << 3;
3276 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3277 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3278 #else
3279 pal_init(env);
3280 #endif
3282 /* Initialize IPR */
3283 #if defined (CONFIG_USER_ONLY)
3284 env->ipr[IPR_EXC_ADDR] = 0;
3285 env->ipr[IPR_EXC_SUM] = 0;
3286 env->ipr[IPR_EXC_MASK] = 0;
3287 #else
3289 // uint64_t hwpcb;
3290 // hwpcb = env->ipr[IPR_PCBB];
3291 env->ipr[IPR_ASN] = 0;
3292 env->ipr[IPR_ASTEN] = 0;
3293 env->ipr[IPR_ASTSR] = 0;
3294 env->ipr[IPR_DATFX] = 0;
3295 /* XXX: fix this */
3296 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3297 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3298 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3299 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3300 env->ipr[IPR_FEN] = 0;
3301 env->ipr[IPR_IPL] = 31;
3302 env->ipr[IPR_MCES] = 0;
3303 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3304 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3305 env->ipr[IPR_SISR] = 0;
3306 env->ipr[IPR_VIRBND] = -1ULL;
3308 #endif
3310 qemu_init_vcpu(env);
3311 return env;
3314 void gen_pc_load(CPUState *env, TranslationBlock *tb,
3315 unsigned long searched_pc, int pc_pos, void *puc)
3317 env->pc = gen_opc_pc[pc_pos];