QMP: Drop DEBUG event
[qemu/cris-port.git] / target-alpha / translate.c
blob719b42319ae67d526649c914b93aa17a2dc1a862
1 /*
2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
24 #include "cpu.h"
25 #include "exec-all.h"
26 #include "disas.h"
27 #include "host-utils.h"
28 #include "tcg-op.h"
29 #include "qemu-common.h"
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 uint64_t pc;
47 int mem_idx;
48 #if !defined (CONFIG_USER_ONLY)
49 int pal_mode;
50 #endif
51 CPUAlphaState *env;
52 uint32_t amask;
54 /* Current rounding mode for this TB. */
55 int tb_rm;
56 /* Current flush-to-zero setting for this TB. */
57 int tb_ftz;
60 /* global register indexes */
61 static TCGv_ptr cpu_env;
62 static TCGv cpu_ir[31];
63 static TCGv cpu_fir[31];
64 static TCGv cpu_pc;
65 static TCGv cpu_lock;
66 #ifdef CONFIG_USER_ONLY
67 static TCGv cpu_uniq;
68 #endif
70 /* register names */
71 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
73 #include "gen-icount.h"
75 static void alpha_translate_init(void)
77 int i;
78 char *p;
79 static int done_init = 0;
81 if (done_init)
82 return;
84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
86 p = cpu_reg_names;
87 for (i = 0; i < 31; i++) {
88 sprintf(p, "ir%d", i);
89 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
90 offsetof(CPUState, ir[i]), p);
91 p += (i < 10) ? 4 : 5;
93 sprintf(p, "fir%d", i);
94 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
95 offsetof(CPUState, fir[i]), p);
96 p += (i < 10) ? 5 : 6;
99 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
100 offsetof(CPUState, pc), "pc");
102 cpu_lock = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUState, lock), "lock");
105 #ifdef CONFIG_USER_ONLY
106 cpu_uniq = tcg_global_mem_new_i64(TCG_AREG0,
107 offsetof(CPUState, unique), "uniq");
108 #endif
110 /* register helpers */
111 #define GEN_HELPER 2
112 #include "helper.h"
114 done_init = 1;
117 static inline void gen_excp(DisasContext *ctx, int exception, int error_code)
119 TCGv_i32 tmp1, tmp2;
121 tcg_gen_movi_i64(cpu_pc, ctx->pc);
122 tmp1 = tcg_const_i32(exception);
123 tmp2 = tcg_const_i32(error_code);
124 gen_helper_excp(tmp1, tmp2);
125 tcg_temp_free_i32(tmp2);
126 tcg_temp_free_i32(tmp1);
129 static inline void gen_invalid(DisasContext *ctx)
131 gen_excp(ctx, EXCP_OPCDEC, 0);
134 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
136 TCGv tmp = tcg_temp_new();
137 TCGv_i32 tmp32 = tcg_temp_new_i32();
138 tcg_gen_qemu_ld32u(tmp, t1, flags);
139 tcg_gen_trunc_i64_i32(tmp32, tmp);
140 gen_helper_memory_to_f(t0, tmp32);
141 tcg_temp_free_i32(tmp32);
142 tcg_temp_free(tmp);
145 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
147 TCGv tmp = tcg_temp_new();
148 tcg_gen_qemu_ld64(tmp, t1, flags);
149 gen_helper_memory_to_g(t0, tmp);
150 tcg_temp_free(tmp);
153 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
155 TCGv tmp = tcg_temp_new();
156 TCGv_i32 tmp32 = tcg_temp_new_i32();
157 tcg_gen_qemu_ld32u(tmp, t1, flags);
158 tcg_gen_trunc_i64_i32(tmp32, tmp);
159 gen_helper_memory_to_s(t0, tmp32);
160 tcg_temp_free_i32(tmp32);
161 tcg_temp_free(tmp);
164 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
166 tcg_gen_mov_i64(cpu_lock, t1);
167 tcg_gen_qemu_ld32s(t0, t1, flags);
170 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
172 tcg_gen_mov_i64(cpu_lock, t1);
173 tcg_gen_qemu_ld64(t0, t1, flags);
176 static inline void gen_load_mem(DisasContext *ctx,
177 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
178 int flags),
179 int ra, int rb, int32_t disp16, int fp,
180 int clear)
182 TCGv addr;
184 if (unlikely(ra == 31))
185 return;
187 addr = tcg_temp_new();
188 if (rb != 31) {
189 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
190 if (clear)
191 tcg_gen_andi_i64(addr, addr, ~0x7);
192 } else {
193 if (clear)
194 disp16 &= ~0x7;
195 tcg_gen_movi_i64(addr, disp16);
197 if (fp)
198 tcg_gen_qemu_load(cpu_fir[ra], addr, ctx->mem_idx);
199 else
200 tcg_gen_qemu_load(cpu_ir[ra], addr, ctx->mem_idx);
201 tcg_temp_free(addr);
204 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
206 TCGv_i32 tmp32 = tcg_temp_new_i32();
207 TCGv tmp = tcg_temp_new();
208 gen_helper_f_to_memory(tmp32, t0);
209 tcg_gen_extu_i32_i64(tmp, tmp32);
210 tcg_gen_qemu_st32(tmp, t1, flags);
211 tcg_temp_free(tmp);
212 tcg_temp_free_i32(tmp32);
215 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
217 TCGv tmp = tcg_temp_new();
218 gen_helper_g_to_memory(tmp, t0);
219 tcg_gen_qemu_st64(tmp, t1, flags);
220 tcg_temp_free(tmp);
223 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
225 TCGv_i32 tmp32 = tcg_temp_new_i32();
226 TCGv tmp = tcg_temp_new();
227 gen_helper_s_to_memory(tmp32, t0);
228 tcg_gen_extu_i32_i64(tmp, tmp32);
229 tcg_gen_qemu_st32(tmp, t1, flags);
230 tcg_temp_free(tmp);
231 tcg_temp_free_i32(tmp32);
234 static inline void gen_qemu_stl_c(TCGv t0, TCGv t1, int flags)
236 int l1, l2;
238 l1 = gen_new_label();
239 l2 = gen_new_label();
240 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
241 tcg_gen_qemu_st32(t0, t1, flags);
242 tcg_gen_movi_i64(t0, 1);
243 tcg_gen_br(l2);
244 gen_set_label(l1);
245 tcg_gen_movi_i64(t0, 0);
246 gen_set_label(l2);
247 tcg_gen_movi_i64(cpu_lock, -1);
250 static inline void gen_qemu_stq_c(TCGv t0, TCGv t1, int flags)
252 int l1, l2;
254 l1 = gen_new_label();
255 l2 = gen_new_label();
256 tcg_gen_brcond_i64(TCG_COND_NE, cpu_lock, t1, l1);
257 tcg_gen_qemu_st64(t0, t1, flags);
258 tcg_gen_movi_i64(t0, 1);
259 tcg_gen_br(l2);
260 gen_set_label(l1);
261 tcg_gen_movi_i64(t0, 0);
262 gen_set_label(l2);
263 tcg_gen_movi_i64(cpu_lock, -1);
266 static inline void gen_store_mem(DisasContext *ctx,
267 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
268 int flags),
269 int ra, int rb, int32_t disp16, int fp,
270 int clear, int local)
272 TCGv addr;
273 if (local)
274 addr = tcg_temp_local_new();
275 else
276 addr = tcg_temp_new();
277 if (rb != 31) {
278 tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
279 if (clear)
280 tcg_gen_andi_i64(addr, addr, ~0x7);
281 } else {
282 if (clear)
283 disp16 &= ~0x7;
284 tcg_gen_movi_i64(addr, disp16);
286 if (ra != 31) {
287 if (fp)
288 tcg_gen_qemu_store(cpu_fir[ra], addr, ctx->mem_idx);
289 else
290 tcg_gen_qemu_store(cpu_ir[ra], addr, ctx->mem_idx);
291 } else {
292 TCGv zero;
293 if (local)
294 zero = tcg_const_local_i64(0);
295 else
296 zero = tcg_const_i64(0);
297 tcg_gen_qemu_store(zero, addr, ctx->mem_idx);
298 tcg_temp_free(zero);
300 tcg_temp_free(addr);
303 static void gen_bcond_pcload(DisasContext *ctx, int32_t disp, int lab_true)
305 int lab_over = gen_new_label();
307 tcg_gen_movi_i64(cpu_pc, ctx->pc);
308 tcg_gen_br(lab_over);
309 gen_set_label(lab_true);
310 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp << 2));
311 gen_set_label(lab_over);
314 static void gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
315 int32_t disp, int mask)
317 int lab_true = gen_new_label();
319 if (likely(ra != 31)) {
320 if (mask) {
321 TCGv tmp = tcg_temp_new();
322 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
323 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
324 tcg_temp_free(tmp);
325 } else {
326 tcg_gen_brcondi_i64(cond, cpu_ir[ra], 0, lab_true);
328 } else {
329 /* Very uncommon case - Do not bother to optimize. */
330 TCGv tmp = tcg_const_i64(0);
331 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
332 tcg_temp_free(tmp);
334 gen_bcond_pcload(ctx, disp, lab_true);
337 /* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
338 This is complicated by the fact that -0.0 compares the same as +0.0. */
340 static void gen_fbcond_internal(TCGCond cond, TCGv src, int lab_true)
342 int lab_false = -1;
343 uint64_t mzero = 1ull << 63;
344 TCGv tmp;
346 switch (cond) {
347 case TCG_COND_LE:
348 case TCG_COND_GT:
349 /* For <= or >, the -0.0 value directly compares the way we want. */
350 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
351 break;
353 case TCG_COND_EQ:
354 case TCG_COND_NE:
355 /* For == or !=, we can simply mask off the sign bit and compare. */
356 /* ??? Assume that the temporary is reclaimed at the branch. */
357 tmp = tcg_temp_new();
358 tcg_gen_andi_i64(tmp, src, mzero - 1);
359 tcg_gen_brcondi_i64(cond, tmp, 0, lab_true);
360 break;
362 case TCG_COND_GE:
363 /* For >=, emit two branches to the destination. */
364 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
365 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_true);
366 break;
368 case TCG_COND_LT:
369 /* For <, first filter out -0.0 to what will be the fallthru. */
370 lab_false = gen_new_label();
371 tcg_gen_brcondi_i64(TCG_COND_EQ, src, mzero, lab_false);
372 tcg_gen_brcondi_i64(cond, src, 0, lab_true);
373 gen_set_label(lab_false);
374 break;
376 default:
377 abort();
381 static void gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp)
383 int lab_true;
385 if (unlikely(ra == 31)) {
386 /* Very uncommon case, but easier to optimize it to an integer
387 comparison than continuing with the floating point comparison. */
388 gen_bcond(ctx, cond, ra, disp, 0);
389 return;
392 lab_true = gen_new_label();
393 gen_fbcond_internal(cond, cpu_fir[ra], lab_true);
394 gen_bcond_pcload(ctx, disp, lab_true);
397 static inline void gen_cmov(TCGCond inv_cond, int ra, int rb, int rc,
398 int islit, uint8_t lit, int mask)
400 int l1;
402 if (unlikely(rc == 31))
403 return;
405 l1 = gen_new_label();
407 if (ra != 31) {
408 if (mask) {
409 TCGv tmp = tcg_temp_new();
410 tcg_gen_andi_i64(tmp, cpu_ir[ra], 1);
411 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
412 tcg_temp_free(tmp);
413 } else
414 tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1);
415 } else {
416 /* Very uncommon case - Do not bother to optimize. */
417 TCGv tmp = tcg_const_i64(0);
418 tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1);
419 tcg_temp_free(tmp);
422 if (islit)
423 tcg_gen_movi_i64(cpu_ir[rc], lit);
424 else
425 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
426 gen_set_label(l1);
429 static void gen_fcmov(TCGCond inv_cond, int ra, int rb, int rc)
431 TCGv va = cpu_fir[ra];
432 int l1;
434 if (unlikely(rc == 31))
435 return;
436 if (unlikely(ra == 31)) {
437 /* ??? Assume that the temporary is reclaimed at the branch. */
438 va = tcg_const_i64(0);
441 l1 = gen_new_label();
442 gen_fbcond_internal(inv_cond, va, l1);
444 if (rb != 31)
445 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[rb]);
446 else
447 tcg_gen_movi_i64(cpu_fir[rc], 0);
448 gen_set_label(l1);
451 #define QUAL_RM_N 0x080 /* Round mode nearest even */
452 #define QUAL_RM_C 0x000 /* Round mode chopped */
453 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
454 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
455 #define QUAL_RM_MASK 0x0c0
457 #define QUAL_U 0x100 /* Underflow enable (fp output) */
458 #define QUAL_V 0x100 /* Overflow enable (int output) */
459 #define QUAL_S 0x400 /* Software completion enable */
460 #define QUAL_I 0x200 /* Inexact detection enable */
462 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
464 TCGv_i32 tmp;
466 fn11 &= QUAL_RM_MASK;
467 if (fn11 == ctx->tb_rm) {
468 return;
470 ctx->tb_rm = fn11;
472 tmp = tcg_temp_new_i32();
473 switch (fn11) {
474 case QUAL_RM_N:
475 tcg_gen_movi_i32(tmp, float_round_nearest_even);
476 break;
477 case QUAL_RM_C:
478 tcg_gen_movi_i32(tmp, float_round_to_zero);
479 break;
480 case QUAL_RM_M:
481 tcg_gen_movi_i32(tmp, float_round_down);
482 break;
483 case QUAL_RM_D:
484 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_dyn_round));
485 break;
488 #if defined(CONFIG_SOFTFLOAT_INLINE)
489 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
490 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
491 sets the one field. */
492 tcg_gen_st8_i32(tmp, cpu_env,
493 offsetof(CPUState, fp_status.float_rounding_mode));
494 #else
495 gen_helper_setroundmode(tmp);
496 #endif
498 tcg_temp_free_i32(tmp);
501 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
503 TCGv_i32 tmp;
505 fn11 &= QUAL_U;
506 if (fn11 == ctx->tb_ftz) {
507 return;
509 ctx->tb_ftz = fn11;
511 tmp = tcg_temp_new_i32();
512 if (fn11) {
513 /* Underflow is enabled, use the FPCR setting. */
514 tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUState, fpcr_flush_to_zero));
515 } else {
516 /* Underflow is disabled, force flush-to-zero. */
517 tcg_gen_movi_i32(tmp, 1);
520 #if defined(CONFIG_SOFTFLOAT_INLINE)
521 tcg_gen_st8_i32(tmp, cpu_env,
522 offsetof(CPUState, fp_status.flush_to_zero));
523 #else
524 gen_helper_setflushzero(tmp);
525 #endif
527 tcg_temp_free_i32(tmp);
530 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
532 TCGv val = tcg_temp_new();
533 if (reg == 31) {
534 tcg_gen_movi_i64(val, 0);
535 } else if (fn11 & QUAL_S) {
536 gen_helper_ieee_input_s(val, cpu_fir[reg]);
537 } else if (is_cmp) {
538 gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
539 } else {
540 gen_helper_ieee_input(val, cpu_fir[reg]);
542 return val;
545 static void gen_fp_exc_clear(void)
547 #if defined(CONFIG_SOFTFLOAT_INLINE)
548 TCGv_i32 zero = tcg_const_i32(0);
549 tcg_gen_st8_i32(zero, cpu_env,
550 offsetof(CPUState, fp_status.float_exception_flags));
551 tcg_temp_free_i32(zero);
552 #else
553 gen_helper_fp_exc_clear();
554 #endif
557 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
559 /* ??? We ought to be able to do something with imprecise exceptions.
560 E.g. notice we're still in the trap shadow of something within the
561 TB and do not generate the code to signal the exception; end the TB
562 when an exception is forced to arrive, either by consumption of a
563 register value or TRAPB or EXCB. */
564 TCGv_i32 exc = tcg_temp_new_i32();
565 TCGv_i32 reg;
567 #if defined(CONFIG_SOFTFLOAT_INLINE)
568 tcg_gen_ld8u_i32(exc, cpu_env,
569 offsetof(CPUState, fp_status.float_exception_flags));
570 #else
571 gen_helper_fp_exc_get(exc);
572 #endif
574 if (ignore) {
575 tcg_gen_andi_i32(exc, exc, ~ignore);
578 /* ??? Pass in the regno of the destination so that the helper can
579 set EXC_MASK, which contains a bitmask of destination registers
580 that have caused arithmetic traps. A simple userspace emulation
581 does not require this. We do need it for a guest kernel's entArith,
582 or if we were to do something clever with imprecise exceptions. */
583 reg = tcg_const_i32(rc + 32);
585 if (fn11 & QUAL_S) {
586 gen_helper_fp_exc_raise_s(exc, reg);
587 } else {
588 gen_helper_fp_exc_raise(exc, reg);
591 tcg_temp_free_i32(reg);
592 tcg_temp_free_i32(exc);
595 static inline void gen_fp_exc_raise(int rc, int fn11)
597 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
600 #define FARITH2(name) \
601 static inline void glue(gen_f, name)(int rb, int rc) \
603 if (unlikely(rc == 31)) { \
604 return; \
606 if (rb != 31) { \
607 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
608 } else { \
609 TCGv tmp = tcg_const_i64(0); \
610 gen_helper_ ## name (cpu_fir[rc], tmp); \
611 tcg_temp_free(tmp); \
614 FARITH2(cvtlq)
615 FARITH2(cvtql)
616 FARITH2(cvtql_v)
617 FARITH2(cvtql_sv)
619 /* ??? VAX instruction qualifiers ignored. */
620 FARITH2(sqrtf)
621 FARITH2(sqrtg)
622 FARITH2(cvtgf)
623 FARITH2(cvtgq)
624 FARITH2(cvtqf)
625 FARITH2(cvtqg)
627 static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
628 int rb, int rc, int fn11)
630 TCGv vb;
632 /* ??? This is wrong: the instruction is not a nop, it still may
633 raise exceptions. */
634 if (unlikely(rc == 31)) {
635 return;
638 gen_qual_roundmode(ctx, fn11);
639 gen_qual_flushzero(ctx, fn11);
640 gen_fp_exc_clear();
642 vb = gen_ieee_input(rb, fn11, 0);
643 helper(cpu_fir[rc], vb);
644 tcg_temp_free(vb);
646 gen_fp_exc_raise(rc, fn11);
649 #define IEEE_ARITH2(name) \
650 static inline void glue(gen_f, name)(DisasContext *ctx, \
651 int rb, int rc, int fn11) \
653 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
655 IEEE_ARITH2(sqrts)
656 IEEE_ARITH2(sqrtt)
657 IEEE_ARITH2(cvtst)
658 IEEE_ARITH2(cvtts)
660 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
662 TCGv vb;
663 int ignore = 0;
665 /* ??? This is wrong: the instruction is not a nop, it still may
666 raise exceptions. */
667 if (unlikely(rc == 31)) {
668 return;
671 /* No need to set flushzero, since we have an integer output. */
672 gen_fp_exc_clear();
673 vb = gen_ieee_input(rb, fn11, 0);
675 /* Almost all integer conversions use cropped rounding, and most
676 also do not have integer overflow enabled. Special case that. */
677 switch (fn11) {
678 case QUAL_RM_C:
679 gen_helper_cvttq_c(cpu_fir[rc], vb);
680 break;
681 case QUAL_V | QUAL_RM_C:
682 case QUAL_S | QUAL_V | QUAL_RM_C:
683 ignore = float_flag_inexact;
684 /* FALLTHRU */
685 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
686 gen_helper_cvttq_svic(cpu_fir[rc], vb);
687 break;
688 default:
689 gen_qual_roundmode(ctx, fn11);
690 gen_helper_cvttq(cpu_fir[rc], vb);
691 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
692 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
693 break;
695 tcg_temp_free(vb);
697 gen_fp_exc_raise_ignore(rc, fn11, ignore);
700 static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
701 int rb, int rc, int fn11)
703 TCGv vb;
705 /* ??? This is wrong: the instruction is not a nop, it still may
706 raise exceptions. */
707 if (unlikely(rc == 31)) {
708 return;
711 gen_qual_roundmode(ctx, fn11);
713 if (rb == 31) {
714 vb = tcg_const_i64(0);
715 } else {
716 vb = cpu_fir[rb];
719 /* The only exception that can be raised by integer conversion
720 is inexact. Thus we only need to worry about exceptions when
721 inexact handling is requested. */
722 if (fn11 & QUAL_I) {
723 gen_fp_exc_clear();
724 helper(cpu_fir[rc], vb);
725 gen_fp_exc_raise(rc, fn11);
726 } else {
727 helper(cpu_fir[rc], vb);
730 if (rb == 31) {
731 tcg_temp_free(vb);
735 #define IEEE_INTCVT(name) \
736 static inline void glue(gen_f, name)(DisasContext *ctx, \
737 int rb, int rc, int fn11) \
739 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
741 IEEE_INTCVT(cvtqs)
742 IEEE_INTCVT(cvtqt)
744 #define FARITH3(name) \
745 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
747 TCGv va, vb; \
749 if (unlikely(rc == 31)) { \
750 return; \
752 if (ra == 31) { \
753 va = tcg_const_i64(0); \
754 } else { \
755 va = cpu_fir[ra]; \
757 if (rb == 31) { \
758 vb = tcg_const_i64(0); \
759 } else { \
760 vb = cpu_fir[rb]; \
763 gen_helper_ ## name (cpu_fir[rc], va, vb); \
765 if (ra == 31) { \
766 tcg_temp_free(va); \
768 if (rb == 31) { \
769 tcg_temp_free(vb); \
772 /* ??? Ought to expand these inline; simple masking operations. */
773 FARITH3(cpys)
774 FARITH3(cpysn)
775 FARITH3(cpyse)
777 /* ??? VAX instruction qualifiers ignored. */
778 FARITH3(addf)
779 FARITH3(subf)
780 FARITH3(mulf)
781 FARITH3(divf)
782 FARITH3(addg)
783 FARITH3(subg)
784 FARITH3(mulg)
785 FARITH3(divg)
786 FARITH3(cmpgeq)
787 FARITH3(cmpglt)
788 FARITH3(cmpgle)
790 static void gen_ieee_arith3(DisasContext *ctx,
791 void (*helper)(TCGv, TCGv, TCGv),
792 int ra, int rb, int rc, int fn11)
794 TCGv va, vb;
796 /* ??? This is wrong: the instruction is not a nop, it still may
797 raise exceptions. */
798 if (unlikely(rc == 31)) {
799 return;
802 gen_qual_roundmode(ctx, fn11);
803 gen_qual_flushzero(ctx, fn11);
804 gen_fp_exc_clear();
806 va = gen_ieee_input(ra, fn11, 0);
807 vb = gen_ieee_input(rb, fn11, 0);
808 helper(cpu_fir[rc], va, vb);
809 tcg_temp_free(va);
810 tcg_temp_free(vb);
812 gen_fp_exc_raise(rc, fn11);
815 #define IEEE_ARITH3(name) \
816 static inline void glue(gen_f, name)(DisasContext *ctx, \
817 int ra, int rb, int rc, int fn11) \
819 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
821 IEEE_ARITH3(adds)
822 IEEE_ARITH3(subs)
823 IEEE_ARITH3(muls)
824 IEEE_ARITH3(divs)
825 IEEE_ARITH3(addt)
826 IEEE_ARITH3(subt)
827 IEEE_ARITH3(mult)
828 IEEE_ARITH3(divt)
830 static void gen_ieee_compare(DisasContext *ctx,
831 void (*helper)(TCGv, TCGv, TCGv),
832 int ra, int rb, int rc, int fn11)
834 TCGv va, vb;
836 /* ??? This is wrong: the instruction is not a nop, it still may
837 raise exceptions. */
838 if (unlikely(rc == 31)) {
839 return;
842 gen_fp_exc_clear();
844 va = gen_ieee_input(ra, fn11, 1);
845 vb = gen_ieee_input(rb, fn11, 1);
846 helper(cpu_fir[rc], va, vb);
847 tcg_temp_free(va);
848 tcg_temp_free(vb);
850 gen_fp_exc_raise(rc, fn11);
853 #define IEEE_CMP3(name) \
854 static inline void glue(gen_f, name)(DisasContext *ctx, \
855 int ra, int rb, int rc, int fn11) \
857 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
859 IEEE_CMP3(cmptun)
860 IEEE_CMP3(cmpteq)
861 IEEE_CMP3(cmptlt)
862 IEEE_CMP3(cmptle)
864 static inline uint64_t zapnot_mask(uint8_t lit)
866 uint64_t mask = 0;
867 int i;
869 for (i = 0; i < 8; ++i) {
870 if ((lit >> i) & 1)
871 mask |= 0xffull << (i * 8);
873 return mask;
876 /* Implement zapnot with an immediate operand, which expands to some
877 form of immediate AND. This is a basic building block in the
878 definition of many of the other byte manipulation instructions. */
879 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
881 switch (lit) {
882 case 0x00:
883 tcg_gen_movi_i64(dest, 0);
884 break;
885 case 0x01:
886 tcg_gen_ext8u_i64(dest, src);
887 break;
888 case 0x03:
889 tcg_gen_ext16u_i64(dest, src);
890 break;
891 case 0x0f:
892 tcg_gen_ext32u_i64(dest, src);
893 break;
894 case 0xff:
895 tcg_gen_mov_i64(dest, src);
896 break;
897 default:
898 tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
899 break;
903 static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
905 if (unlikely(rc == 31))
906 return;
907 else if (unlikely(ra == 31))
908 tcg_gen_movi_i64(cpu_ir[rc], 0);
909 else if (islit)
910 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
911 else
912 gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
915 static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
917 if (unlikely(rc == 31))
918 return;
919 else if (unlikely(ra == 31))
920 tcg_gen_movi_i64(cpu_ir[rc], 0);
921 else if (islit)
922 gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
923 else
924 gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
928 /* EXTWH, EXTLH, EXTQH */
929 static void gen_ext_h(int ra, int rb, int rc, int islit,
930 uint8_t lit, uint8_t byte_mask)
932 if (unlikely(rc == 31))
933 return;
934 else if (unlikely(ra == 31))
935 tcg_gen_movi_i64(cpu_ir[rc], 0);
936 else {
937 if (islit) {
938 lit = (64 - (lit & 7) * 8) & 0x3f;
939 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
940 } else {
941 TCGv tmp1 = tcg_temp_new();
942 tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
943 tcg_gen_shli_i64(tmp1, tmp1, 3);
944 tcg_gen_neg_i64(tmp1, tmp1);
945 tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
946 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
947 tcg_temp_free(tmp1);
949 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
953 /* EXTBL, EXTWL, EXTLL, EXTQL */
954 static void gen_ext_l(int ra, int rb, int rc, int islit,
955 uint8_t lit, uint8_t byte_mask)
957 if (unlikely(rc == 31))
958 return;
959 else if (unlikely(ra == 31))
960 tcg_gen_movi_i64(cpu_ir[rc], 0);
961 else {
962 if (islit) {
963 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
964 } else {
965 TCGv tmp = tcg_temp_new();
966 tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
967 tcg_gen_shli_i64(tmp, tmp, 3);
968 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
969 tcg_temp_free(tmp);
971 gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
975 /* INSWH, INSLH, INSQH */
976 static void gen_ins_h(int ra, int rb, int rc, int islit,
977 uint8_t lit, uint8_t byte_mask)
979 if (unlikely(rc == 31))
980 return;
981 else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
982 tcg_gen_movi_i64(cpu_ir[rc], 0);
983 else {
984 TCGv tmp = tcg_temp_new();
986 /* The instruction description has us left-shift the byte mask
987 and extract bits <15:8> and apply that zap at the end. This
988 is equivalent to simply performing the zap first and shifting
989 afterward. */
990 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
992 if (islit) {
993 /* Note that we have handled the lit==0 case above. */
994 tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
995 } else {
996 TCGv shift = tcg_temp_new();
998 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
999 Do this portably by splitting the shift into two parts:
1000 shift_count-1 and 1. Arrange for the -1 by using
1001 ones-complement instead of twos-complement in the negation:
1002 ~((B & 7) * 8) & 63. */
1004 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1005 tcg_gen_shli_i64(shift, shift, 3);
1006 tcg_gen_not_i64(shift, shift);
1007 tcg_gen_andi_i64(shift, shift, 0x3f);
1009 tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
1010 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
1011 tcg_temp_free(shift);
1013 tcg_temp_free(tmp);
1017 /* INSBL, INSWL, INSLL, INSQL */
1018 static void gen_ins_l(int ra, int rb, int rc, int islit,
1019 uint8_t lit, uint8_t byte_mask)
1021 if (unlikely(rc == 31))
1022 return;
1023 else if (unlikely(ra == 31))
1024 tcg_gen_movi_i64(cpu_ir[rc], 0);
1025 else {
1026 TCGv tmp = tcg_temp_new();
1028 /* The instruction description has us left-shift the byte mask
1029 the same number of byte slots as the data and apply the zap
1030 at the end. This is equivalent to simply performing the zap
1031 first and shifting afterward. */
1032 gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
1034 if (islit) {
1035 tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
1036 } else {
1037 TCGv shift = tcg_temp_new();
1038 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1039 tcg_gen_shli_i64(shift, shift, 3);
1040 tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
1041 tcg_temp_free(shift);
1043 tcg_temp_free(tmp);
1047 /* MSKWH, MSKLH, MSKQH */
1048 static void gen_msk_h(int ra, int rb, int rc, int islit,
1049 uint8_t lit, uint8_t byte_mask)
1051 if (unlikely(rc == 31))
1052 return;
1053 else if (unlikely(ra == 31))
1054 tcg_gen_movi_i64(cpu_ir[rc], 0);
1055 else if (islit) {
1056 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
1057 } else {
1058 TCGv shift = tcg_temp_new();
1059 TCGv mask = tcg_temp_new();
1061 /* The instruction description is as above, where the byte_mask
1062 is shifted left, and then we extract bits <15:8>. This can be
1063 emulated with a right-shift on the expanded byte mask. This
1064 requires extra care because for an input <2:0> == 0 we need a
1065 shift of 64 bits in order to generate a zero. This is done by
1066 splitting the shift into two parts, the variable shift - 1
1067 followed by a constant 1 shift. The code we expand below is
1068 equivalent to ~((B & 7) * 8) & 63. */
1070 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1071 tcg_gen_shli_i64(shift, shift, 3);
1072 tcg_gen_not_i64(shift, shift);
1073 tcg_gen_andi_i64(shift, shift, 0x3f);
1074 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1075 tcg_gen_shr_i64(mask, mask, shift);
1076 tcg_gen_shri_i64(mask, mask, 1);
1078 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1080 tcg_temp_free(mask);
1081 tcg_temp_free(shift);
1085 /* MSKBL, MSKWL, MSKLL, MSKQL */
1086 static void gen_msk_l(int ra, int rb, int rc, int islit,
1087 uint8_t lit, uint8_t byte_mask)
1089 if (unlikely(rc == 31))
1090 return;
1091 else if (unlikely(ra == 31))
1092 tcg_gen_movi_i64(cpu_ir[rc], 0);
1093 else if (islit) {
1094 gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
1095 } else {
1096 TCGv shift = tcg_temp_new();
1097 TCGv mask = tcg_temp_new();
1099 tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
1100 tcg_gen_shli_i64(shift, shift, 3);
1101 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1102 tcg_gen_shl_i64(mask, mask, shift);
1104 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
1106 tcg_temp_free(mask);
1107 tcg_temp_free(shift);
1111 /* Code to call arith3 helpers */
1112 #define ARITH3(name) \
1113 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1114 uint8_t lit) \
1116 if (unlikely(rc == 31)) \
1117 return; \
1119 if (ra != 31) { \
1120 if (islit) { \
1121 TCGv tmp = tcg_const_i64(lit); \
1122 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1123 tcg_temp_free(tmp); \
1124 } else \
1125 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1126 } else { \
1127 TCGv tmp1 = tcg_const_i64(0); \
1128 if (islit) { \
1129 TCGv tmp2 = tcg_const_i64(lit); \
1130 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1131 tcg_temp_free(tmp2); \
1132 } else \
1133 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1134 tcg_temp_free(tmp1); \
1137 ARITH3(cmpbge)
1138 ARITH3(addlv)
1139 ARITH3(sublv)
1140 ARITH3(addqv)
1141 ARITH3(subqv)
1142 ARITH3(umulh)
1143 ARITH3(mullv)
1144 ARITH3(mulqv)
1145 ARITH3(minub8)
1146 ARITH3(minsb8)
1147 ARITH3(minuw4)
1148 ARITH3(minsw4)
1149 ARITH3(maxub8)
1150 ARITH3(maxsb8)
1151 ARITH3(maxuw4)
1152 ARITH3(maxsw4)
1153 ARITH3(perr)
1155 #define MVIOP2(name) \
1156 static inline void glue(gen_, name)(int rb, int rc) \
1158 if (unlikely(rc == 31)) \
1159 return; \
1160 if (unlikely(rb == 31)) \
1161 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1162 else \
1163 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1165 MVIOP2(pklb)
1166 MVIOP2(pkwb)
1167 MVIOP2(unpkbl)
1168 MVIOP2(unpkbw)
1170 static inline void gen_cmp(TCGCond cond, int ra, int rb, int rc, int islit,
1171 uint8_t lit)
1173 int l1, l2;
1174 TCGv tmp;
1176 if (unlikely(rc == 31))
1177 return;
1179 l1 = gen_new_label();
1180 l2 = gen_new_label();
1182 if (ra != 31) {
1183 tmp = tcg_temp_new();
1184 tcg_gen_mov_i64(tmp, cpu_ir[ra]);
1185 } else
1186 tmp = tcg_const_i64(0);
1187 if (islit)
1188 tcg_gen_brcondi_i64(cond, tmp, lit, l1);
1189 else
1190 tcg_gen_brcond_i64(cond, tmp, cpu_ir[rb], l1);
1192 tcg_gen_movi_i64(cpu_ir[rc], 0);
1193 tcg_gen_br(l2);
1194 gen_set_label(l1);
1195 tcg_gen_movi_i64(cpu_ir[rc], 1);
1196 gen_set_label(l2);
1199 static inline int translate_one(DisasContext *ctx, uint32_t insn)
1201 uint32_t palcode;
1202 int32_t disp21, disp16, disp12;
1203 uint16_t fn11, fn16;
1204 uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit, real_islit;
1205 uint8_t lit;
1206 int ret;
1208 /* Decode all instruction fields */
1209 opc = insn >> 26;
1210 ra = (insn >> 21) & 0x1F;
1211 rb = (insn >> 16) & 0x1F;
1212 rc = insn & 0x1F;
1213 sbz = (insn >> 13) & 0x07;
1214 real_islit = islit = (insn >> 12) & 1;
1215 if (rb == 31 && !islit) {
1216 islit = 1;
1217 lit = 0;
1218 } else
1219 lit = (insn >> 13) & 0xFF;
1220 palcode = insn & 0x03FFFFFF;
1221 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1222 disp16 = (int16_t)(insn & 0x0000FFFF);
1223 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1224 fn16 = insn & 0x0000FFFF;
1225 fn11 = (insn >> 5) & 0x000007FF;
1226 fpfn = fn11 & 0x3F;
1227 fn7 = (insn >> 5) & 0x0000007F;
1228 fn2 = (insn >> 5) & 0x00000003;
1229 ret = 0;
1230 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1231 opc, ra, rb, rc, disp16);
1233 switch (opc) {
1234 case 0x00:
1235 /* CALL_PAL */
1236 #ifdef CONFIG_USER_ONLY
1237 if (palcode == 0x9E) {
1238 /* RDUNIQUE */
1239 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_uniq);
1240 break;
1241 } else if (palcode == 0x9F) {
1242 /* WRUNIQUE */
1243 tcg_gen_mov_i64(cpu_uniq, cpu_ir[IR_A0]);
1244 break;
1246 #endif
1247 if (palcode >= 0x80 && palcode < 0xC0) {
1248 /* Unprivileged PAL call */
1249 gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x3F) << 6), 0);
1250 ret = 3;
1251 break;
1253 #ifndef CONFIG_USER_ONLY
1254 if (palcode < 0x40) {
1255 /* Privileged PAL code */
1256 if (ctx->mem_idx & 1)
1257 goto invalid_opc;
1258 gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0);
1259 ret = 3;
1261 #endif
1262 /* Invalid PAL call */
1263 goto invalid_opc;
1264 case 0x01:
1265 /* OPC01 */
1266 goto invalid_opc;
1267 case 0x02:
1268 /* OPC02 */
1269 goto invalid_opc;
1270 case 0x03:
1271 /* OPC03 */
1272 goto invalid_opc;
1273 case 0x04:
1274 /* OPC04 */
1275 goto invalid_opc;
1276 case 0x05:
1277 /* OPC05 */
1278 goto invalid_opc;
1279 case 0x06:
1280 /* OPC06 */
1281 goto invalid_opc;
1282 case 0x07:
1283 /* OPC07 */
1284 goto invalid_opc;
1285 case 0x08:
1286 /* LDA */
1287 if (likely(ra != 31)) {
1288 if (rb != 31)
1289 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
1290 else
1291 tcg_gen_movi_i64(cpu_ir[ra], disp16);
1293 break;
1294 case 0x09:
1295 /* LDAH */
1296 if (likely(ra != 31)) {
1297 if (rb != 31)
1298 tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
1299 else
1300 tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
1302 break;
1303 case 0x0A:
1304 /* LDBU */
1305 if (!(ctx->amask & AMASK_BWX))
1306 goto invalid_opc;
1307 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1308 break;
1309 case 0x0B:
1310 /* LDQ_U */
1311 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1312 break;
1313 case 0x0C:
1314 /* LDWU */
1315 if (!(ctx->amask & AMASK_BWX))
1316 goto invalid_opc;
1317 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1318 break;
1319 case 0x0D:
1320 /* STW */
1321 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0);
1322 break;
1323 case 0x0E:
1324 /* STB */
1325 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0);
1326 break;
1327 case 0x0F:
1328 /* STQ_U */
1329 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0);
1330 break;
1331 case 0x10:
1332 switch (fn7) {
1333 case 0x00:
1334 /* ADDL */
1335 if (likely(rc != 31)) {
1336 if (ra != 31) {
1337 if (islit) {
1338 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1339 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1340 } else {
1341 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1342 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1344 } else {
1345 if (islit)
1346 tcg_gen_movi_i64(cpu_ir[rc], lit);
1347 else
1348 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1351 break;
1352 case 0x02:
1353 /* S4ADDL */
1354 if (likely(rc != 31)) {
1355 if (ra != 31) {
1356 TCGv tmp = tcg_temp_new();
1357 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1358 if (islit)
1359 tcg_gen_addi_i64(tmp, tmp, lit);
1360 else
1361 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1362 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1363 tcg_temp_free(tmp);
1364 } else {
1365 if (islit)
1366 tcg_gen_movi_i64(cpu_ir[rc], lit);
1367 else
1368 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1371 break;
1372 case 0x09:
1373 /* SUBL */
1374 if (likely(rc != 31)) {
1375 if (ra != 31) {
1376 if (islit)
1377 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1378 else
1379 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1380 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1381 } else {
1382 if (islit)
1383 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1384 else {
1385 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1386 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1389 break;
1390 case 0x0B:
1391 /* S4SUBL */
1392 if (likely(rc != 31)) {
1393 if (ra != 31) {
1394 TCGv tmp = tcg_temp_new();
1395 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1396 if (islit)
1397 tcg_gen_subi_i64(tmp, tmp, lit);
1398 else
1399 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1400 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1401 tcg_temp_free(tmp);
1402 } else {
1403 if (islit)
1404 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1405 else {
1406 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1407 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1411 break;
1412 case 0x0F:
1413 /* CMPBGE */
1414 gen_cmpbge(ra, rb, rc, islit, lit);
1415 break;
1416 case 0x12:
1417 /* S8ADDL */
1418 if (likely(rc != 31)) {
1419 if (ra != 31) {
1420 TCGv tmp = tcg_temp_new();
1421 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1422 if (islit)
1423 tcg_gen_addi_i64(tmp, tmp, lit);
1424 else
1425 tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
1426 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1427 tcg_temp_free(tmp);
1428 } else {
1429 if (islit)
1430 tcg_gen_movi_i64(cpu_ir[rc], lit);
1431 else
1432 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
1435 break;
1436 case 0x1B:
1437 /* S8SUBL */
1438 if (likely(rc != 31)) {
1439 if (ra != 31) {
1440 TCGv tmp = tcg_temp_new();
1441 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1442 if (islit)
1443 tcg_gen_subi_i64(tmp, tmp, lit);
1444 else
1445 tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
1446 tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
1447 tcg_temp_free(tmp);
1448 } else {
1449 if (islit)
1450 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1451 else
1452 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1453 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1457 break;
1458 case 0x1D:
1459 /* CMPULT */
1460 gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
1461 break;
1462 case 0x20:
1463 /* ADDQ */
1464 if (likely(rc != 31)) {
1465 if (ra != 31) {
1466 if (islit)
1467 tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1468 else
1469 tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1470 } else {
1471 if (islit)
1472 tcg_gen_movi_i64(cpu_ir[rc], lit);
1473 else
1474 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1477 break;
1478 case 0x22:
1479 /* S4ADDQ */
1480 if (likely(rc != 31)) {
1481 if (ra != 31) {
1482 TCGv tmp = tcg_temp_new();
1483 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1484 if (islit)
1485 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1486 else
1487 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1488 tcg_temp_free(tmp);
1489 } else {
1490 if (islit)
1491 tcg_gen_movi_i64(cpu_ir[rc], lit);
1492 else
1493 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1496 break;
1497 case 0x29:
1498 /* SUBQ */
1499 if (likely(rc != 31)) {
1500 if (ra != 31) {
1501 if (islit)
1502 tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1503 else
1504 tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1505 } else {
1506 if (islit)
1507 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1508 else
1509 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1512 break;
1513 case 0x2B:
1514 /* S4SUBQ */
1515 if (likely(rc != 31)) {
1516 if (ra != 31) {
1517 TCGv tmp = tcg_temp_new();
1518 tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
1519 if (islit)
1520 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1521 else
1522 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1523 tcg_temp_free(tmp);
1524 } else {
1525 if (islit)
1526 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1527 else
1528 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1531 break;
1532 case 0x2D:
1533 /* CMPEQ */
1534 gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
1535 break;
1536 case 0x32:
1537 /* S8ADDQ */
1538 if (likely(rc != 31)) {
1539 if (ra != 31) {
1540 TCGv tmp = tcg_temp_new();
1541 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1542 if (islit)
1543 tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
1544 else
1545 tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1546 tcg_temp_free(tmp);
1547 } else {
1548 if (islit)
1549 tcg_gen_movi_i64(cpu_ir[rc], lit);
1550 else
1551 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1554 break;
1555 case 0x3B:
1556 /* S8SUBQ */
1557 if (likely(rc != 31)) {
1558 if (ra != 31) {
1559 TCGv tmp = tcg_temp_new();
1560 tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
1561 if (islit)
1562 tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
1563 else
1564 tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
1565 tcg_temp_free(tmp);
1566 } else {
1567 if (islit)
1568 tcg_gen_movi_i64(cpu_ir[rc], -lit);
1569 else
1570 tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
1573 break;
1574 case 0x3D:
1575 /* CMPULE */
1576 gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
1577 break;
1578 case 0x40:
1579 /* ADDL/V */
1580 gen_addlv(ra, rb, rc, islit, lit);
1581 break;
1582 case 0x49:
1583 /* SUBL/V */
1584 gen_sublv(ra, rb, rc, islit, lit);
1585 break;
1586 case 0x4D:
1587 /* CMPLT */
1588 gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
1589 break;
1590 case 0x60:
1591 /* ADDQ/V */
1592 gen_addqv(ra, rb, rc, islit, lit);
1593 break;
1594 case 0x69:
1595 /* SUBQ/V */
1596 gen_subqv(ra, rb, rc, islit, lit);
1597 break;
1598 case 0x6D:
1599 /* CMPLE */
1600 gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
1601 break;
1602 default:
1603 goto invalid_opc;
1605 break;
1606 case 0x11:
1607 switch (fn7) {
1608 case 0x00:
1609 /* AND */
1610 if (likely(rc != 31)) {
1611 if (ra == 31)
1612 tcg_gen_movi_i64(cpu_ir[rc], 0);
1613 else if (islit)
1614 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
1615 else
1616 tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1618 break;
1619 case 0x08:
1620 /* BIC */
1621 if (likely(rc != 31)) {
1622 if (ra != 31) {
1623 if (islit)
1624 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1625 else
1626 tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1627 } else
1628 tcg_gen_movi_i64(cpu_ir[rc], 0);
1630 break;
1631 case 0x14:
1632 /* CMOVLBS */
1633 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
1634 break;
1635 case 0x16:
1636 /* CMOVLBC */
1637 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
1638 break;
1639 case 0x20:
1640 /* BIS */
1641 if (likely(rc != 31)) {
1642 if (ra != 31) {
1643 if (islit)
1644 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1645 else
1646 tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1647 } else {
1648 if (islit)
1649 tcg_gen_movi_i64(cpu_ir[rc], lit);
1650 else
1651 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1654 break;
1655 case 0x24:
1656 /* CMOVEQ */
1657 gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
1658 break;
1659 case 0x26:
1660 /* CMOVNE */
1661 gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
1662 break;
1663 case 0x28:
1664 /* ORNOT */
1665 if (likely(rc != 31)) {
1666 if (ra != 31) {
1667 if (islit)
1668 tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1669 else
1670 tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1671 } else {
1672 if (islit)
1673 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1674 else
1675 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1678 break;
1679 case 0x40:
1680 /* XOR */
1681 if (likely(rc != 31)) {
1682 if (ra != 31) {
1683 if (islit)
1684 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
1685 else
1686 tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1687 } else {
1688 if (islit)
1689 tcg_gen_movi_i64(cpu_ir[rc], lit);
1690 else
1691 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1694 break;
1695 case 0x44:
1696 /* CMOVLT */
1697 gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
1698 break;
1699 case 0x46:
1700 /* CMOVGE */
1701 gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
1702 break;
1703 case 0x48:
1704 /* EQV */
1705 if (likely(rc != 31)) {
1706 if (ra != 31) {
1707 if (islit)
1708 tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
1709 else
1710 tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1711 } else {
1712 if (islit)
1713 tcg_gen_movi_i64(cpu_ir[rc], ~lit);
1714 else
1715 tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
1718 break;
1719 case 0x61:
1720 /* AMASK */
1721 if (likely(rc != 31)) {
1722 if (islit)
1723 tcg_gen_movi_i64(cpu_ir[rc], lit);
1724 else
1725 tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
1726 switch (ctx->env->implver) {
1727 case IMPLVER_2106x:
1728 /* EV4, EV45, LCA, LCA45 & EV5 */
1729 break;
1730 case IMPLVER_21164:
1731 case IMPLVER_21264:
1732 case IMPLVER_21364:
1733 tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rc],
1734 ~(uint64_t)ctx->amask);
1735 break;
1738 break;
1739 case 0x64:
1740 /* CMOVLE */
1741 gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
1742 break;
1743 case 0x66:
1744 /* CMOVGT */
1745 gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
1746 break;
1747 case 0x6C:
1748 /* IMPLVER */
1749 if (rc != 31)
1750 tcg_gen_movi_i64(cpu_ir[rc], ctx->env->implver);
1751 break;
1752 default:
1753 goto invalid_opc;
1755 break;
1756 case 0x12:
1757 switch (fn7) {
1758 case 0x02:
1759 /* MSKBL */
1760 gen_msk_l(ra, rb, rc, islit, lit, 0x01);
1761 break;
1762 case 0x06:
1763 /* EXTBL */
1764 gen_ext_l(ra, rb, rc, islit, lit, 0x01);
1765 break;
1766 case 0x0B:
1767 /* INSBL */
1768 gen_ins_l(ra, rb, rc, islit, lit, 0x01);
1769 break;
1770 case 0x12:
1771 /* MSKWL */
1772 gen_msk_l(ra, rb, rc, islit, lit, 0x03);
1773 break;
1774 case 0x16:
1775 /* EXTWL */
1776 gen_ext_l(ra, rb, rc, islit, lit, 0x03);
1777 break;
1778 case 0x1B:
1779 /* INSWL */
1780 gen_ins_l(ra, rb, rc, islit, lit, 0x03);
1781 break;
1782 case 0x22:
1783 /* MSKLL */
1784 gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
1785 break;
1786 case 0x26:
1787 /* EXTLL */
1788 gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
1789 break;
1790 case 0x2B:
1791 /* INSLL */
1792 gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
1793 break;
1794 case 0x30:
1795 /* ZAP */
1796 gen_zap(ra, rb, rc, islit, lit);
1797 break;
1798 case 0x31:
1799 /* ZAPNOT */
1800 gen_zapnot(ra, rb, rc, islit, lit);
1801 break;
1802 case 0x32:
1803 /* MSKQL */
1804 gen_msk_l(ra, rb, rc, islit, lit, 0xff);
1805 break;
1806 case 0x34:
1807 /* SRL */
1808 if (likely(rc != 31)) {
1809 if (ra != 31) {
1810 if (islit)
1811 tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1812 else {
1813 TCGv shift = tcg_temp_new();
1814 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1815 tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
1816 tcg_temp_free(shift);
1818 } else
1819 tcg_gen_movi_i64(cpu_ir[rc], 0);
1821 break;
1822 case 0x36:
1823 /* EXTQL */
1824 gen_ext_l(ra, rb, rc, islit, lit, 0xff);
1825 break;
1826 case 0x39:
1827 /* SLL */
1828 if (likely(rc != 31)) {
1829 if (ra != 31) {
1830 if (islit)
1831 tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1832 else {
1833 TCGv shift = tcg_temp_new();
1834 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1835 tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
1836 tcg_temp_free(shift);
1838 } else
1839 tcg_gen_movi_i64(cpu_ir[rc], 0);
1841 break;
1842 case 0x3B:
1843 /* INSQL */
1844 gen_ins_l(ra, rb, rc, islit, lit, 0xff);
1845 break;
1846 case 0x3C:
1847 /* SRA */
1848 if (likely(rc != 31)) {
1849 if (ra != 31) {
1850 if (islit)
1851 tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
1852 else {
1853 TCGv shift = tcg_temp_new();
1854 tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
1855 tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
1856 tcg_temp_free(shift);
1858 } else
1859 tcg_gen_movi_i64(cpu_ir[rc], 0);
1861 break;
1862 case 0x52:
1863 /* MSKWH */
1864 gen_msk_h(ra, rb, rc, islit, lit, 0x03);
1865 break;
1866 case 0x57:
1867 /* INSWH */
1868 gen_ins_h(ra, rb, rc, islit, lit, 0x03);
1869 break;
1870 case 0x5A:
1871 /* EXTWH */
1872 gen_ext_h(ra, rb, rc, islit, lit, 0x03);
1873 break;
1874 case 0x62:
1875 /* MSKLH */
1876 gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
1877 break;
1878 case 0x67:
1879 /* INSLH */
1880 gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
1881 break;
1882 case 0x6A:
1883 /* EXTLH */
1884 gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
1885 break;
1886 case 0x72:
1887 /* MSKQH */
1888 gen_msk_h(ra, rb, rc, islit, lit, 0xff);
1889 break;
1890 case 0x77:
1891 /* INSQH */
1892 gen_ins_h(ra, rb, rc, islit, lit, 0xff);
1893 break;
1894 case 0x7A:
1895 /* EXTQH */
1896 gen_ext_h(ra, rb, rc, islit, lit, 0xff);
1897 break;
1898 default:
1899 goto invalid_opc;
1901 break;
1902 case 0x13:
1903 switch (fn7) {
1904 case 0x00:
1905 /* MULL */
1906 if (likely(rc != 31)) {
1907 if (ra == 31)
1908 tcg_gen_movi_i64(cpu_ir[rc], 0);
1909 else {
1910 if (islit)
1911 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1912 else
1913 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1914 tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
1917 break;
1918 case 0x20:
1919 /* MULQ */
1920 if (likely(rc != 31)) {
1921 if (ra == 31)
1922 tcg_gen_movi_i64(cpu_ir[rc], 0);
1923 else if (islit)
1924 tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
1925 else
1926 tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
1928 break;
1929 case 0x30:
1930 /* UMULH */
1931 gen_umulh(ra, rb, rc, islit, lit);
1932 break;
1933 case 0x40:
1934 /* MULL/V */
1935 gen_mullv(ra, rb, rc, islit, lit);
1936 break;
1937 case 0x60:
1938 /* MULQ/V */
1939 gen_mulqv(ra, rb, rc, islit, lit);
1940 break;
1941 default:
1942 goto invalid_opc;
1944 break;
1945 case 0x14:
1946 switch (fpfn) { /* fn11 & 0x3F */
1947 case 0x04:
1948 /* ITOFS */
1949 if (!(ctx->amask & AMASK_FIX))
1950 goto invalid_opc;
1951 if (likely(rc != 31)) {
1952 if (ra != 31) {
1953 TCGv_i32 tmp = tcg_temp_new_i32();
1954 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1955 gen_helper_memory_to_s(cpu_fir[rc], tmp);
1956 tcg_temp_free_i32(tmp);
1957 } else
1958 tcg_gen_movi_i64(cpu_fir[rc], 0);
1960 break;
1961 case 0x0A:
1962 /* SQRTF */
1963 if (!(ctx->amask & AMASK_FIX))
1964 goto invalid_opc;
1965 gen_fsqrtf(rb, rc);
1966 break;
1967 case 0x0B:
1968 /* SQRTS */
1969 if (!(ctx->amask & AMASK_FIX))
1970 goto invalid_opc;
1971 gen_fsqrts(ctx, rb, rc, fn11);
1972 break;
1973 case 0x14:
1974 /* ITOFF */
1975 if (!(ctx->amask & AMASK_FIX))
1976 goto invalid_opc;
1977 if (likely(rc != 31)) {
1978 if (ra != 31) {
1979 TCGv_i32 tmp = tcg_temp_new_i32();
1980 tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
1981 gen_helper_memory_to_f(cpu_fir[rc], tmp);
1982 tcg_temp_free_i32(tmp);
1983 } else
1984 tcg_gen_movi_i64(cpu_fir[rc], 0);
1986 break;
1987 case 0x24:
1988 /* ITOFT */
1989 if (!(ctx->amask & AMASK_FIX))
1990 goto invalid_opc;
1991 if (likely(rc != 31)) {
1992 if (ra != 31)
1993 tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
1994 else
1995 tcg_gen_movi_i64(cpu_fir[rc], 0);
1997 break;
1998 case 0x2A:
1999 /* SQRTG */
2000 if (!(ctx->amask & AMASK_FIX))
2001 goto invalid_opc;
2002 gen_fsqrtg(rb, rc);
2003 break;
2004 case 0x02B:
2005 /* SQRTT */
2006 if (!(ctx->amask & AMASK_FIX))
2007 goto invalid_opc;
2008 gen_fsqrtt(ctx, rb, rc, fn11);
2009 break;
2010 default:
2011 goto invalid_opc;
2013 break;
2014 case 0x15:
2015 /* VAX floating point */
2016 /* XXX: rounding mode and trap are ignored (!) */
2017 switch (fpfn) { /* fn11 & 0x3F */
2018 case 0x00:
2019 /* ADDF */
2020 gen_faddf(ra, rb, rc);
2021 break;
2022 case 0x01:
2023 /* SUBF */
2024 gen_fsubf(ra, rb, rc);
2025 break;
2026 case 0x02:
2027 /* MULF */
2028 gen_fmulf(ra, rb, rc);
2029 break;
2030 case 0x03:
2031 /* DIVF */
2032 gen_fdivf(ra, rb, rc);
2033 break;
2034 case 0x1E:
2035 /* CVTDG */
2036 #if 0 // TODO
2037 gen_fcvtdg(rb, rc);
2038 #else
2039 goto invalid_opc;
2040 #endif
2041 break;
2042 case 0x20:
2043 /* ADDG */
2044 gen_faddg(ra, rb, rc);
2045 break;
2046 case 0x21:
2047 /* SUBG */
2048 gen_fsubg(ra, rb, rc);
2049 break;
2050 case 0x22:
2051 /* MULG */
2052 gen_fmulg(ra, rb, rc);
2053 break;
2054 case 0x23:
2055 /* DIVG */
2056 gen_fdivg(ra, rb, rc);
2057 break;
2058 case 0x25:
2059 /* CMPGEQ */
2060 gen_fcmpgeq(ra, rb, rc);
2061 break;
2062 case 0x26:
2063 /* CMPGLT */
2064 gen_fcmpglt(ra, rb, rc);
2065 break;
2066 case 0x27:
2067 /* CMPGLE */
2068 gen_fcmpgle(ra, rb, rc);
2069 break;
2070 case 0x2C:
2071 /* CVTGF */
2072 gen_fcvtgf(rb, rc);
2073 break;
2074 case 0x2D:
2075 /* CVTGD */
2076 #if 0 // TODO
2077 gen_fcvtgd(rb, rc);
2078 #else
2079 goto invalid_opc;
2080 #endif
2081 break;
2082 case 0x2F:
2083 /* CVTGQ */
2084 gen_fcvtgq(rb, rc);
2085 break;
2086 case 0x3C:
2087 /* CVTQF */
2088 gen_fcvtqf(rb, rc);
2089 break;
2090 case 0x3E:
2091 /* CVTQG */
2092 gen_fcvtqg(rb, rc);
2093 break;
2094 default:
2095 goto invalid_opc;
2097 break;
2098 case 0x16:
2099 /* IEEE floating-point */
2100 switch (fpfn) { /* fn11 & 0x3F */
2101 case 0x00:
2102 /* ADDS */
2103 gen_fadds(ctx, ra, rb, rc, fn11);
2104 break;
2105 case 0x01:
2106 /* SUBS */
2107 gen_fsubs(ctx, ra, rb, rc, fn11);
2108 break;
2109 case 0x02:
2110 /* MULS */
2111 gen_fmuls(ctx, ra, rb, rc, fn11);
2112 break;
2113 case 0x03:
2114 /* DIVS */
2115 gen_fdivs(ctx, ra, rb, rc, fn11);
2116 break;
2117 case 0x20:
2118 /* ADDT */
2119 gen_faddt(ctx, ra, rb, rc, fn11);
2120 break;
2121 case 0x21:
2122 /* SUBT */
2123 gen_fsubt(ctx, ra, rb, rc, fn11);
2124 break;
2125 case 0x22:
2126 /* MULT */
2127 gen_fmult(ctx, ra, rb, rc, fn11);
2128 break;
2129 case 0x23:
2130 /* DIVT */
2131 gen_fdivt(ctx, ra, rb, rc, fn11);
2132 break;
2133 case 0x24:
2134 /* CMPTUN */
2135 gen_fcmptun(ctx, ra, rb, rc, fn11);
2136 break;
2137 case 0x25:
2138 /* CMPTEQ */
2139 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2140 break;
2141 case 0x26:
2142 /* CMPTLT */
2143 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2144 break;
2145 case 0x27:
2146 /* CMPTLE */
2147 gen_fcmptle(ctx, ra, rb, rc, fn11);
2148 break;
2149 case 0x2C:
2150 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2151 /* CVTST */
2152 gen_fcvtst(ctx, rb, rc, fn11);
2153 } else {
2154 /* CVTTS */
2155 gen_fcvtts(ctx, rb, rc, fn11);
2157 break;
2158 case 0x2F:
2159 /* CVTTQ */
2160 gen_fcvttq(ctx, rb, rc, fn11);
2161 break;
2162 case 0x3C:
2163 /* CVTQS */
2164 gen_fcvtqs(ctx, rb, rc, fn11);
2165 break;
2166 case 0x3E:
2167 /* CVTQT */
2168 gen_fcvtqt(ctx, rb, rc, fn11);
2169 break;
2170 default:
2171 goto invalid_opc;
2173 break;
2174 case 0x17:
2175 switch (fn11) {
2176 case 0x010:
2177 /* CVTLQ */
2178 gen_fcvtlq(rb, rc);
2179 break;
2180 case 0x020:
2181 if (likely(rc != 31)) {
2182 if (ra == rb) {
2183 /* FMOV */
2184 if (ra == 31)
2185 tcg_gen_movi_i64(cpu_fir[rc], 0);
2186 else
2187 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
2188 } else {
2189 /* CPYS */
2190 gen_fcpys(ra, rb, rc);
2193 break;
2194 case 0x021:
2195 /* CPYSN */
2196 gen_fcpysn(ra, rb, rc);
2197 break;
2198 case 0x022:
2199 /* CPYSE */
2200 gen_fcpyse(ra, rb, rc);
2201 break;
2202 case 0x024:
2203 /* MT_FPCR */
2204 if (likely(ra != 31))
2205 gen_helper_store_fpcr(cpu_fir[ra]);
2206 else {
2207 TCGv tmp = tcg_const_i64(0);
2208 gen_helper_store_fpcr(tmp);
2209 tcg_temp_free(tmp);
2211 break;
2212 case 0x025:
2213 /* MF_FPCR */
2214 if (likely(ra != 31))
2215 gen_helper_load_fpcr(cpu_fir[ra]);
2216 break;
2217 case 0x02A:
2218 /* FCMOVEQ */
2219 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2220 break;
2221 case 0x02B:
2222 /* FCMOVNE */
2223 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2224 break;
2225 case 0x02C:
2226 /* FCMOVLT */
2227 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2228 break;
2229 case 0x02D:
2230 /* FCMOVGE */
2231 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2232 break;
2233 case 0x02E:
2234 /* FCMOVLE */
2235 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2236 break;
2237 case 0x02F:
2238 /* FCMOVGT */
2239 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2240 break;
2241 case 0x030:
2242 /* CVTQL */
2243 gen_fcvtql(rb, rc);
2244 break;
2245 case 0x130:
2246 /* CVTQL/V */
2247 gen_fcvtql_v(rb, rc);
2248 break;
2249 case 0x530:
2250 /* CVTQL/SV */
2251 gen_fcvtql_sv(rb, rc);
2252 break;
2253 default:
2254 goto invalid_opc;
2256 break;
2257 case 0x18:
2258 switch ((uint16_t)disp16) {
2259 case 0x0000:
2260 /* TRAPB */
2261 /* No-op. Just exit from the current tb */
2262 ret = 2;
2263 break;
2264 case 0x0400:
2265 /* EXCB */
2266 /* No-op. Just exit from the current tb */
2267 ret = 2;
2268 break;
2269 case 0x4000:
2270 /* MB */
2271 /* No-op */
2272 break;
2273 case 0x4400:
2274 /* WMB */
2275 /* No-op */
2276 break;
2277 case 0x8000:
2278 /* FETCH */
2279 /* No-op */
2280 break;
2281 case 0xA000:
2282 /* FETCH_M */
2283 /* No-op */
2284 break;
2285 case 0xC000:
2286 /* RPCC */
2287 if (ra != 31)
2288 gen_helper_load_pcc(cpu_ir[ra]);
2289 break;
2290 case 0xE000:
2291 /* RC */
2292 if (ra != 31)
2293 gen_helper_rc(cpu_ir[ra]);
2294 break;
2295 case 0xE800:
2296 /* ECB */
2297 break;
2298 case 0xF000:
2299 /* RS */
2300 if (ra != 31)
2301 gen_helper_rs(cpu_ir[ra]);
2302 break;
2303 case 0xF800:
2304 /* WH64 */
2305 /* No-op */
2306 break;
2307 default:
2308 goto invalid_opc;
2310 break;
2311 case 0x19:
2312 /* HW_MFPR (PALcode) */
2313 #if defined (CONFIG_USER_ONLY)
2314 goto invalid_opc;
2315 #else
2316 if (!ctx->pal_mode)
2317 goto invalid_opc;
2318 if (ra != 31) {
2319 TCGv tmp = tcg_const_i32(insn & 0xFF);
2320 gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]);
2321 tcg_temp_free(tmp);
2323 break;
2324 #endif
2325 case 0x1A:
2326 if (rb != 31)
2327 tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
2328 else
2329 tcg_gen_movi_i64(cpu_pc, 0);
2330 if (ra != 31)
2331 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2332 /* Those four jumps only differ by the branch prediction hint */
2333 switch (fn2) {
2334 case 0x0:
2335 /* JMP */
2336 break;
2337 case 0x1:
2338 /* JSR */
2339 break;
2340 case 0x2:
2341 /* RET */
2342 break;
2343 case 0x3:
2344 /* JSR_COROUTINE */
2345 break;
2347 ret = 1;
2348 break;
2349 case 0x1B:
2350 /* HW_LD (PALcode) */
2351 #if defined (CONFIG_USER_ONLY)
2352 goto invalid_opc;
2353 #else
2354 if (!ctx->pal_mode)
2355 goto invalid_opc;
2356 if (ra != 31) {
2357 TCGv addr = tcg_temp_new();
2358 if (rb != 31)
2359 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2360 else
2361 tcg_gen_movi_i64(addr, disp12);
2362 switch ((insn >> 12) & 0xF) {
2363 case 0x0:
2364 /* Longword physical access (hw_ldl/p) */
2365 gen_helper_ldl_raw(cpu_ir[ra], addr);
2366 break;
2367 case 0x1:
2368 /* Quadword physical access (hw_ldq/p) */
2369 gen_helper_ldq_raw(cpu_ir[ra], addr);
2370 break;
2371 case 0x2:
2372 /* Longword physical access with lock (hw_ldl_l/p) */
2373 gen_helper_ldl_l_raw(cpu_ir[ra], addr);
2374 break;
2375 case 0x3:
2376 /* Quadword physical access with lock (hw_ldq_l/p) */
2377 gen_helper_ldq_l_raw(cpu_ir[ra], addr);
2378 break;
2379 case 0x4:
2380 /* Longword virtual PTE fetch (hw_ldl/v) */
2381 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2382 break;
2383 case 0x5:
2384 /* Quadword virtual PTE fetch (hw_ldq/v) */
2385 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2386 break;
2387 case 0x6:
2388 /* Incpu_ir[ra]id */
2389 goto invalid_opc;
2390 case 0x7:
2391 /* Incpu_ir[ra]id */
2392 goto invalid_opc;
2393 case 0x8:
2394 /* Longword virtual access (hw_ldl) */
2395 gen_helper_st_virt_to_phys(addr, addr);
2396 gen_helper_ldl_raw(cpu_ir[ra], addr);
2397 break;
2398 case 0x9:
2399 /* Quadword virtual access (hw_ldq) */
2400 gen_helper_st_virt_to_phys(addr, addr);
2401 gen_helper_ldq_raw(cpu_ir[ra], addr);
2402 break;
2403 case 0xA:
2404 /* Longword virtual access with protection check (hw_ldl/w) */
2405 tcg_gen_qemu_ld32s(cpu_ir[ra], addr, 0);
2406 break;
2407 case 0xB:
2408 /* Quadword virtual access with protection check (hw_ldq/w) */
2409 tcg_gen_qemu_ld64(cpu_ir[ra], addr, 0);
2410 break;
2411 case 0xC:
2412 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2413 gen_helper_set_alt_mode();
2414 gen_helper_st_virt_to_phys(addr, addr);
2415 gen_helper_ldl_raw(cpu_ir[ra], addr);
2416 gen_helper_restore_mode();
2417 break;
2418 case 0xD:
2419 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2420 gen_helper_set_alt_mode();
2421 gen_helper_st_virt_to_phys(addr, addr);
2422 gen_helper_ldq_raw(cpu_ir[ra], addr);
2423 gen_helper_restore_mode();
2424 break;
2425 case 0xE:
2426 /* Longword virtual access with alternate access mode and
2427 * protection checks (hw_ldl/wa)
2429 gen_helper_set_alt_mode();
2430 gen_helper_ldl_data(cpu_ir[ra], addr);
2431 gen_helper_restore_mode();
2432 break;
2433 case 0xF:
2434 /* Quadword virtual access with alternate access mode and
2435 * protection checks (hw_ldq/wa)
2437 gen_helper_set_alt_mode();
2438 gen_helper_ldq_data(cpu_ir[ra], addr);
2439 gen_helper_restore_mode();
2440 break;
2442 tcg_temp_free(addr);
2444 break;
2445 #endif
2446 case 0x1C:
2447 switch (fn7) {
2448 case 0x00:
2449 /* SEXTB */
2450 if (!(ctx->amask & AMASK_BWX))
2451 goto invalid_opc;
2452 if (likely(rc != 31)) {
2453 if (islit)
2454 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
2455 else
2456 tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
2458 break;
2459 case 0x01:
2460 /* SEXTW */
2461 if (!(ctx->amask & AMASK_BWX))
2462 goto invalid_opc;
2463 if (likely(rc != 31)) {
2464 if (islit)
2465 tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
2466 else
2467 tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
2469 break;
2470 case 0x30:
2471 /* CTPOP */
2472 if (!(ctx->amask & AMASK_CIX))
2473 goto invalid_opc;
2474 if (likely(rc != 31)) {
2475 if (islit)
2476 tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
2477 else
2478 gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
2480 break;
2481 case 0x31:
2482 /* PERR */
2483 if (!(ctx->amask & AMASK_MVI))
2484 goto invalid_opc;
2485 gen_perr(ra, rb, rc, islit, lit);
2486 break;
2487 case 0x32:
2488 /* CTLZ */
2489 if (!(ctx->amask & AMASK_CIX))
2490 goto invalid_opc;
2491 if (likely(rc != 31)) {
2492 if (islit)
2493 tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
2494 else
2495 gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
2497 break;
2498 case 0x33:
2499 /* CTTZ */
2500 if (!(ctx->amask & AMASK_CIX))
2501 goto invalid_opc;
2502 if (likely(rc != 31)) {
2503 if (islit)
2504 tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
2505 else
2506 gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
2508 break;
2509 case 0x34:
2510 /* UNPKBW */
2511 if (!(ctx->amask & AMASK_MVI))
2512 goto invalid_opc;
2513 if (real_islit || ra != 31)
2514 goto invalid_opc;
2515 gen_unpkbw (rb, rc);
2516 break;
2517 case 0x35:
2518 /* UNPKBL */
2519 if (!(ctx->amask & AMASK_MVI))
2520 goto invalid_opc;
2521 if (real_islit || ra != 31)
2522 goto invalid_opc;
2523 gen_unpkbl (rb, rc);
2524 break;
2525 case 0x36:
2526 /* PKWB */
2527 if (!(ctx->amask & AMASK_MVI))
2528 goto invalid_opc;
2529 if (real_islit || ra != 31)
2530 goto invalid_opc;
2531 gen_pkwb (rb, rc);
2532 break;
2533 case 0x37:
2534 /* PKLB */
2535 if (!(ctx->amask & AMASK_MVI))
2536 goto invalid_opc;
2537 if (real_islit || ra != 31)
2538 goto invalid_opc;
2539 gen_pklb (rb, rc);
2540 break;
2541 case 0x38:
2542 /* MINSB8 */
2543 if (!(ctx->amask & AMASK_MVI))
2544 goto invalid_opc;
2545 gen_minsb8 (ra, rb, rc, islit, lit);
2546 break;
2547 case 0x39:
2548 /* MINSW4 */
2549 if (!(ctx->amask & AMASK_MVI))
2550 goto invalid_opc;
2551 gen_minsw4 (ra, rb, rc, islit, lit);
2552 break;
2553 case 0x3A:
2554 /* MINUB8 */
2555 if (!(ctx->amask & AMASK_MVI))
2556 goto invalid_opc;
2557 gen_minub8 (ra, rb, rc, islit, lit);
2558 break;
2559 case 0x3B:
2560 /* MINUW4 */
2561 if (!(ctx->amask & AMASK_MVI))
2562 goto invalid_opc;
2563 gen_minuw4 (ra, rb, rc, islit, lit);
2564 break;
2565 case 0x3C:
2566 /* MAXUB8 */
2567 if (!(ctx->amask & AMASK_MVI))
2568 goto invalid_opc;
2569 gen_maxub8 (ra, rb, rc, islit, lit);
2570 break;
2571 case 0x3D:
2572 /* MAXUW4 */
2573 if (!(ctx->amask & AMASK_MVI))
2574 goto invalid_opc;
2575 gen_maxuw4 (ra, rb, rc, islit, lit);
2576 break;
2577 case 0x3E:
2578 /* MAXSB8 */
2579 if (!(ctx->amask & AMASK_MVI))
2580 goto invalid_opc;
2581 gen_maxsb8 (ra, rb, rc, islit, lit);
2582 break;
2583 case 0x3F:
2584 /* MAXSW4 */
2585 if (!(ctx->amask & AMASK_MVI))
2586 goto invalid_opc;
2587 gen_maxsw4 (ra, rb, rc, islit, lit);
2588 break;
2589 case 0x70:
2590 /* FTOIT */
2591 if (!(ctx->amask & AMASK_FIX))
2592 goto invalid_opc;
2593 if (likely(rc != 31)) {
2594 if (ra != 31)
2595 tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
2596 else
2597 tcg_gen_movi_i64(cpu_ir[rc], 0);
2599 break;
2600 case 0x78:
2601 /* FTOIS */
2602 if (!(ctx->amask & AMASK_FIX))
2603 goto invalid_opc;
2604 if (rc != 31) {
2605 TCGv_i32 tmp1 = tcg_temp_new_i32();
2606 if (ra != 31)
2607 gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
2608 else {
2609 TCGv tmp2 = tcg_const_i64(0);
2610 gen_helper_s_to_memory(tmp1, tmp2);
2611 tcg_temp_free(tmp2);
2613 tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
2614 tcg_temp_free_i32(tmp1);
2616 break;
2617 default:
2618 goto invalid_opc;
2620 break;
2621 case 0x1D:
2622 /* HW_MTPR (PALcode) */
2623 #if defined (CONFIG_USER_ONLY)
2624 goto invalid_opc;
2625 #else
2626 if (!ctx->pal_mode)
2627 goto invalid_opc;
2628 else {
2629 TCGv tmp1 = tcg_const_i32(insn & 0xFF);
2630 if (ra != 31)
2631 gen_helper_mtpr(tmp1, cpu_ir[ra]);
2632 else {
2633 TCGv tmp2 = tcg_const_i64(0);
2634 gen_helper_mtpr(tmp1, tmp2);
2635 tcg_temp_free(tmp2);
2637 tcg_temp_free(tmp1);
2638 ret = 2;
2640 break;
2641 #endif
2642 case 0x1E:
2643 /* HW_REI (PALcode) */
2644 #if defined (CONFIG_USER_ONLY)
2645 goto invalid_opc;
2646 #else
2647 if (!ctx->pal_mode)
2648 goto invalid_opc;
2649 if (rb == 31) {
2650 /* "Old" alpha */
2651 gen_helper_hw_rei();
2652 } else {
2653 TCGv tmp;
2655 if (ra != 31) {
2656 tmp = tcg_temp_new();
2657 tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51));
2658 } else
2659 tmp = tcg_const_i64(((int64_t)insn << 51) >> 51);
2660 gen_helper_hw_ret(tmp);
2661 tcg_temp_free(tmp);
2663 ret = 2;
2664 break;
2665 #endif
2666 case 0x1F:
2667 /* HW_ST (PALcode) */
2668 #if defined (CONFIG_USER_ONLY)
2669 goto invalid_opc;
2670 #else
2671 if (!ctx->pal_mode)
2672 goto invalid_opc;
2673 else {
2674 TCGv addr, val;
2675 addr = tcg_temp_new();
2676 if (rb != 31)
2677 tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
2678 else
2679 tcg_gen_movi_i64(addr, disp12);
2680 if (ra != 31)
2681 val = cpu_ir[ra];
2682 else {
2683 val = tcg_temp_new();
2684 tcg_gen_movi_i64(val, 0);
2686 switch ((insn >> 12) & 0xF) {
2687 case 0x0:
2688 /* Longword physical access */
2689 gen_helper_stl_raw(val, addr);
2690 break;
2691 case 0x1:
2692 /* Quadword physical access */
2693 gen_helper_stq_raw(val, addr);
2694 break;
2695 case 0x2:
2696 /* Longword physical access with lock */
2697 gen_helper_stl_c_raw(val, val, addr);
2698 break;
2699 case 0x3:
2700 /* Quadword physical access with lock */
2701 gen_helper_stq_c_raw(val, val, addr);
2702 break;
2703 case 0x4:
2704 /* Longword virtual access */
2705 gen_helper_st_virt_to_phys(addr, addr);
2706 gen_helper_stl_raw(val, addr);
2707 break;
2708 case 0x5:
2709 /* Quadword virtual access */
2710 gen_helper_st_virt_to_phys(addr, addr);
2711 gen_helper_stq_raw(val, addr);
2712 break;
2713 case 0x6:
2714 /* Invalid */
2715 goto invalid_opc;
2716 case 0x7:
2717 /* Invalid */
2718 goto invalid_opc;
2719 case 0x8:
2720 /* Invalid */
2721 goto invalid_opc;
2722 case 0x9:
2723 /* Invalid */
2724 goto invalid_opc;
2725 case 0xA:
2726 /* Invalid */
2727 goto invalid_opc;
2728 case 0xB:
2729 /* Invalid */
2730 goto invalid_opc;
2731 case 0xC:
2732 /* Longword virtual access with alternate access mode */
2733 gen_helper_set_alt_mode();
2734 gen_helper_st_virt_to_phys(addr, addr);
2735 gen_helper_stl_raw(val, addr);
2736 gen_helper_restore_mode();
2737 break;
2738 case 0xD:
2739 /* Quadword virtual access with alternate access mode */
2740 gen_helper_set_alt_mode();
2741 gen_helper_st_virt_to_phys(addr, addr);
2742 gen_helper_stl_raw(val, addr);
2743 gen_helper_restore_mode();
2744 break;
2745 case 0xE:
2746 /* Invalid */
2747 goto invalid_opc;
2748 case 0xF:
2749 /* Invalid */
2750 goto invalid_opc;
2752 if (ra == 31)
2753 tcg_temp_free(val);
2754 tcg_temp_free(addr);
2756 break;
2757 #endif
2758 case 0x20:
2759 /* LDF */
2760 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2761 break;
2762 case 0x21:
2763 /* LDG */
2764 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2765 break;
2766 case 0x22:
2767 /* LDS */
2768 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2769 break;
2770 case 0x23:
2771 /* LDT */
2772 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2773 break;
2774 case 0x24:
2775 /* STF */
2776 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0);
2777 break;
2778 case 0x25:
2779 /* STG */
2780 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0);
2781 break;
2782 case 0x26:
2783 /* STS */
2784 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0);
2785 break;
2786 case 0x27:
2787 /* STT */
2788 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0);
2789 break;
2790 case 0x28:
2791 /* LDL */
2792 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2793 break;
2794 case 0x29:
2795 /* LDQ */
2796 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2797 break;
2798 case 0x2A:
2799 /* LDL_L */
2800 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2801 break;
2802 case 0x2B:
2803 /* LDQ_L */
2804 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2805 break;
2806 case 0x2C:
2807 /* STL */
2808 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0);
2809 break;
2810 case 0x2D:
2811 /* STQ */
2812 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0);
2813 break;
2814 case 0x2E:
2815 /* STL_C */
2816 gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1);
2817 break;
2818 case 0x2F:
2819 /* STQ_C */
2820 gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1);
2821 break;
2822 case 0x30:
2823 /* BR */
2824 if (ra != 31)
2825 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2826 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2827 ret = 1;
2828 break;
2829 case 0x31: /* FBEQ */
2830 gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2831 ret = 1;
2832 break;
2833 case 0x32: /* FBLT */
2834 gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2835 ret = 1;
2836 break;
2837 case 0x33: /* FBLE */
2838 gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2839 ret = 1;
2840 break;
2841 case 0x34:
2842 /* BSR */
2843 if (ra != 31)
2844 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2845 tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2));
2846 ret = 1;
2847 break;
2848 case 0x35: /* FBNE */
2849 gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2850 ret = 1;
2851 break;
2852 case 0x36: /* FBGE */
2853 gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2854 ret = 1;
2855 break;
2856 case 0x37: /* FBGT */
2857 gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2858 ret = 1;
2859 break;
2860 case 0x38:
2861 /* BLBC */
2862 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2863 ret = 1;
2864 break;
2865 case 0x39:
2866 /* BEQ */
2867 gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2868 ret = 1;
2869 break;
2870 case 0x3A:
2871 /* BLT */
2872 gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2873 ret = 1;
2874 break;
2875 case 0x3B:
2876 /* BLE */
2877 gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2878 ret = 1;
2879 break;
2880 case 0x3C:
2881 /* BLBS */
2882 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2883 ret = 1;
2884 break;
2885 case 0x3D:
2886 /* BNE */
2887 gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2888 ret = 1;
2889 break;
2890 case 0x3E:
2891 /* BGE */
2892 gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2893 ret = 1;
2894 break;
2895 case 0x3F:
2896 /* BGT */
2897 gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2898 ret = 1;
2899 break;
2900 invalid_opc:
2901 gen_invalid(ctx);
2902 ret = 3;
2903 break;
2906 return ret;
2909 static inline void gen_intermediate_code_internal(CPUState *env,
2910 TranslationBlock *tb,
2911 int search_pc)
2913 DisasContext ctx, *ctxp = &ctx;
2914 target_ulong pc_start;
2915 uint32_t insn;
2916 uint16_t *gen_opc_end;
2917 CPUBreakpoint *bp;
2918 int j, lj = -1;
2919 int ret;
2920 int num_insns;
2921 int max_insns;
2923 pc_start = tb->pc;
2924 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2925 ctx.pc = pc_start;
2926 ctx.amask = env->amask;
2927 ctx.env = env;
2928 #if defined (CONFIG_USER_ONLY)
2929 ctx.mem_idx = 0;
2930 #else
2931 ctx.mem_idx = ((env->ps >> 3) & 3);
2932 ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
2933 #endif
2935 /* ??? Every TB begins with unset rounding mode, to be initialized on
2936 the first fp insn of the TB. Alternately we could define a proper
2937 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2938 to reset the FP_STATUS to that default at the end of any TB that
2939 changes the default. We could even (gasp) dynamiclly figure out
2940 what default would be most efficient given the running program. */
2941 ctx.tb_rm = -1;
2942 /* Similarly for flush-to-zero. */
2943 ctx.tb_ftz = -1;
2945 num_insns = 0;
2946 max_insns = tb->cflags & CF_COUNT_MASK;
2947 if (max_insns == 0)
2948 max_insns = CF_COUNT_MASK;
2950 gen_icount_start();
2951 for (ret = 0; ret == 0;) {
2952 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
2953 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
2954 if (bp->pc == ctx.pc) {
2955 gen_excp(&ctx, EXCP_DEBUG, 0);
2956 break;
2960 if (search_pc) {
2961 j = gen_opc_ptr - gen_opc_buf;
2962 if (lj < j) {
2963 lj++;
2964 while (lj < j)
2965 gen_opc_instr_start[lj++] = 0;
2967 gen_opc_pc[lj] = ctx.pc;
2968 gen_opc_instr_start[lj] = 1;
2969 gen_opc_icount[lj] = num_insns;
2971 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2972 gen_io_start();
2973 insn = ldl_code(ctx.pc);
2974 num_insns++;
2976 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2977 tcg_gen_debug_insn_start(ctx.pc);
2980 ctx.pc += 4;
2981 ret = translate_one(ctxp, insn);
2982 if (ret != 0)
2983 break;
2984 /* if we reach a page boundary or are single stepping, stop
2985 * generation
2987 if (env->singlestep_enabled) {
2988 gen_excp(&ctx, EXCP_DEBUG, 0);
2989 break;
2992 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2993 break;
2995 if (gen_opc_ptr >= gen_opc_end)
2996 break;
2998 if (num_insns >= max_insns)
2999 break;
3001 if (singlestep) {
3002 break;
3005 if (ret != 1 && ret != 3) {
3006 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3008 if (tb->cflags & CF_LAST_IO)
3009 gen_io_end();
3010 /* Generate the return instruction */
3011 tcg_gen_exit_tb(0);
3012 gen_icount_end(tb, num_insns);
3013 *gen_opc_ptr = INDEX_op_end;
3014 if (search_pc) {
3015 j = gen_opc_ptr - gen_opc_buf;
3016 lj++;
3017 while (lj <= j)
3018 gen_opc_instr_start[lj++] = 0;
3019 } else {
3020 tb->size = ctx.pc - pc_start;
3021 tb->icount = num_insns;
3023 #ifdef DEBUG_DISAS
3024 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3025 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3026 log_target_disas(pc_start, ctx.pc - pc_start, 1);
3027 qemu_log("\n");
3029 #endif
3032 void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
3034 gen_intermediate_code_internal(env, tb, 0);
3037 void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
3039 gen_intermediate_code_internal(env, tb, 1);
3042 struct cpu_def_t {
3043 const char *name;
3044 int implver, amask;
3047 static const struct cpu_def_t cpu_defs[] = {
3048 { "ev4", IMPLVER_2106x, 0 },
3049 { "ev5", IMPLVER_21164, 0 },
3050 { "ev56", IMPLVER_21164, AMASK_BWX },
3051 { "pca56", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3052 { "ev6", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3053 { "ev67", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3054 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3055 { "ev68", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3056 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), },
3057 { "21064", IMPLVER_2106x, 0 },
3058 { "21164", IMPLVER_21164, 0 },
3059 { "21164a", IMPLVER_21164, AMASK_BWX },
3060 { "21164pc", IMPLVER_21164, AMASK_BWX | AMASK_MVI },
3061 { "21264", IMPLVER_21264, AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP },
3062 { "21264a", IMPLVER_21264, (AMASK_BWX | AMASK_FIX | AMASK_CIX
3063 | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH), }
3066 CPUAlphaState * cpu_alpha_init (const char *cpu_model)
3068 CPUAlphaState *env;
3069 int implver, amask, i, max;
3071 env = qemu_mallocz(sizeof(CPUAlphaState));
3072 cpu_exec_init(env);
3073 alpha_translate_init();
3074 tlb_flush(env, 1);
3076 /* Default to ev67; no reason not to emulate insns by default. */
3077 implver = IMPLVER_21264;
3078 amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI
3079 | AMASK_TRAP | AMASK_PREFETCH);
3081 max = ARRAY_SIZE(cpu_defs);
3082 for (i = 0; i < max; i++) {
3083 if (strcmp (cpu_model, cpu_defs[i].name) == 0) {
3084 implver = cpu_defs[i].implver;
3085 amask = cpu_defs[i].amask;
3086 break;
3089 env->implver = implver;
3090 env->amask = amask;
3092 env->ps = 0x1F00;
3093 #if defined (CONFIG_USER_ONLY)
3094 env->ps |= 1 << 3;
3095 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
3096 | FPCR_UNFD | FPCR_INED | FPCR_DNOD));
3097 #else
3098 pal_init(env);
3099 #endif
3101 /* Initialize IPR */
3102 #if defined (CONFIG_USER_ONLY)
3103 env->ipr[IPR_EXC_ADDR] = 0;
3104 env->ipr[IPR_EXC_SUM] = 0;
3105 env->ipr[IPR_EXC_MASK] = 0;
3106 #else
3108 uint64_t hwpcb;
3109 hwpcb = env->ipr[IPR_PCBB];
3110 env->ipr[IPR_ASN] = 0;
3111 env->ipr[IPR_ASTEN] = 0;
3112 env->ipr[IPR_ASTSR] = 0;
3113 env->ipr[IPR_DATFX] = 0;
3114 /* XXX: fix this */
3115 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3116 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3117 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3118 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3119 env->ipr[IPR_FEN] = 0;
3120 env->ipr[IPR_IPL] = 31;
3121 env->ipr[IPR_MCES] = 0;
3122 env->ipr[IPR_PERFMON] = 0; /* Implementation specific */
3123 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3124 env->ipr[IPR_SISR] = 0;
3125 env->ipr[IPR_VIRBND] = -1ULL;
3127 #endif
3129 qemu_init_vcpu(env);
3130 return env;
3133 void gen_pc_load(CPUState *env, TranslationBlock *tb,
3134 unsigned long searched_pc, int pc_pos, void *puc)
3136 env->pc = gen_opc_pc[pc_pos];